blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a690d3e3c7eeede902404ea982afe32d49a6f713 | 2b7d422e78c188923158a2a0780a99eca960e746 | /usr/lib/python2.7/dist-packages/mercurial/match.py | 425798d0afc4652d7f9b71d6ed924ee8a177b6af | [
"Python-2.0"
] | permissive | sroberti/VREP-Sandbox | 4fd6839cd85ac01aa0f2617b5d6e28440451b913 | 44f7d42494654357b6524aefeb79d7e30599c01d | refs/heads/master | 2022-12-24T14:56:10.155484 | 2019-04-18T15:11:54 | 2019-04-18T15:11:54 | 180,481,713 | 0 | 1 | null | 2022-12-14T18:05:19 | 2019-04-10T02:00:14 | C++ | UTF-8 | Python | false | false | 34,493 | py | # match.py - filename matching
#
# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import copy
import os
import re
from .i18n import _
from . import (
error,
pathutil,
util,
)
allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
'listfile', 'listfile0', 'set', 'include', 'subinclude',
'rootfilesin')
cwdrelativepatternkinds = ('relpath', 'glob')
propertycache = util.propertycache
def _rematcher(regex):
'''compile the regexp with the best available regexp engine and return a
matcher function'''
m = util.re.compile(regex)
try:
# slightly faster, provided by facebook's re2 bindings
return m.test_match
except AttributeError:
return m.match
def _expandsets(kindpats, ctx, listsubrepos):
'''Returns the kindpats list with the 'set' patterns expanded.'''
fset = set()
other = []
for kind, pat, source in kindpats:
if kind == 'set':
if not ctx:
raise error.ProgrammingError("fileset expression with no "
"context")
s = ctx.getfileset(pat)
fset.update(s)
if listsubrepos:
for subpath in ctx.substate:
s = ctx.sub(subpath).getfileset(pat)
fset.update(subpath + '/' + f for f in s)
continue
other.append((kind, pat, source))
return fset, other
def _expandsubinclude(kindpats, root):
'''Returns the list of subinclude matcher args and the kindpats without the
subincludes in it.'''
relmatchers = []
other = []
for kind, pat, source in kindpats:
if kind == 'subinclude':
sourceroot = pathutil.dirname(util.normpath(source))
pat = util.pconvert(pat)
path = pathutil.join(sourceroot, pat)
newroot = pathutil.dirname(path)
matcherargs = (newroot, '', [], ['include:%s' % path])
prefix = pathutil.canonpath(root, root, newroot)
if prefix:
prefix += '/'
relmatchers.append((prefix, matcherargs))
else:
other.append((kind, pat, source))
return relmatchers, other
def _kindpatsalwaysmatch(kindpats):
""""Checks whether the kindspats match everything, as e.g.
'relpath:.' does.
"""
for kind, pat, source in kindpats:
if pat != '' or kind not in ['relpath', 'glob']:
return False
return True
def match(root, cwd, patterns=None, include=None, exclude=None, default='glob',
exact=False, auditor=None, ctx=None, listsubrepos=False, warn=None,
badfn=None, icasefs=False):
"""build an object to match a set of file patterns
arguments:
root - the canonical root of the tree you're matching against
cwd - the current working directory, if relevant
patterns - patterns to find
include - patterns to include (unless they are excluded)
exclude - patterns to exclude (even if they are included)
default - if a pattern in patterns has no explicit type, assume this one
exact - patterns are actually filenames (include/exclude still apply)
warn - optional function used for printing warnings
badfn - optional bad() callback for this matcher instead of the default
icasefs - make a matcher for wdir on case insensitive filesystems, which
normalizes the given patterns to the case in the filesystem
a pattern is one of:
'glob:<glob>' - a glob relative to cwd
're:<regexp>' - a regular expression
'path:<path>' - a path relative to repository root, which is matched
recursively
'rootfilesin:<path>' - a path relative to repository root, which is
matched non-recursively (will not match subdirectories)
'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
'relpath:<path>' - a path relative to cwd
'relre:<regexp>' - a regexp that needn't match the start of a name
'set:<fileset>' - a fileset expression
'include:<path>' - a file of patterns to read and include
'subinclude:<path>' - a file of patterns to match against files under
the same directory
'<something>' - a pattern of the specified default type
"""
normalize = _donormalize
if icasefs:
if exact:
raise error.ProgrammingError("a case-insensitive exact matcher "
"doesn't make sense")
dirstate = ctx.repo().dirstate
dsnormalize = dirstate.normalize
def normalize(patterns, default, root, cwd, auditor, warn):
kp = _donormalize(patterns, default, root, cwd, auditor, warn)
kindpats = []
for kind, pats, source in kp:
if kind not in ('re', 'relre'): # regex can't be normalized
p = pats
pats = dsnormalize(pats)
# Preserve the original to handle a case only rename.
if p != pats and p in dirstate:
kindpats.append((kind, p, source))
kindpats.append((kind, pats, source))
return kindpats
if exact:
m = exactmatcher(root, cwd, patterns, badfn)
elif patterns:
kindpats = normalize(patterns, default, root, cwd, auditor, warn)
if _kindpatsalwaysmatch(kindpats):
m = alwaysmatcher(root, cwd, badfn, relativeuipath=True)
else:
m = patternmatcher(root, cwd, kindpats, ctx=ctx,
listsubrepos=listsubrepos, badfn=badfn)
else:
# It's a little strange that no patterns means to match everything.
# Consider changing this to match nothing (probably using nevermatcher).
m = alwaysmatcher(root, cwd, badfn)
if include:
kindpats = normalize(include, 'glob', root, cwd, auditor, warn)
im = includematcher(root, cwd, kindpats, ctx=ctx,
listsubrepos=listsubrepos, badfn=None)
m = intersectmatchers(m, im)
if exclude:
kindpats = normalize(exclude, 'glob', root, cwd, auditor, warn)
em = includematcher(root, cwd, kindpats, ctx=ctx,
listsubrepos=listsubrepos, badfn=None)
m = differencematcher(m, em)
return m
def exact(root, cwd, files, badfn=None):
return exactmatcher(root, cwd, files, badfn=badfn)
def always(root, cwd):
return alwaysmatcher(root, cwd)
def never(root, cwd):
return nevermatcher(root, cwd)
def badmatch(match, badfn):
"""Make a copy of the given matcher, replacing its bad method with the given
one.
"""
m = copy.copy(match)
m.bad = badfn
return m
def _donormalize(patterns, default, root, cwd, auditor, warn):
'''Convert 'kind:pat' from the patterns list to tuples with kind and
normalized and rooted patterns and with listfiles expanded.'''
kindpats = []
for kind, pat in [_patsplit(p, default) for p in patterns]:
if kind in cwdrelativepatternkinds:
pat = pathutil.canonpath(root, cwd, pat, auditor)
elif kind in ('relglob', 'path', 'rootfilesin'):
pat = util.normpath(pat)
elif kind in ('listfile', 'listfile0'):
try:
files = util.readfile(pat)
if kind == 'listfile0':
files = files.split('\0')
else:
files = files.splitlines()
files = [f for f in files if f]
except EnvironmentError:
raise error.Abort(_("unable to read file list (%s)") % pat)
for k, p, source in _donormalize(files, default, root, cwd,
auditor, warn):
kindpats.append((k, p, pat))
continue
elif kind == 'include':
try:
fullpath = os.path.join(root, util.localpath(pat))
includepats = readpatternfile(fullpath, warn)
for k, p, source in _donormalize(includepats, default,
root, cwd, auditor, warn):
kindpats.append((k, p, source or pat))
except error.Abort as inst:
raise error.Abort('%s: %s' % (pat, inst[0]))
except IOError as inst:
if warn:
warn(_("skipping unreadable pattern file '%s': %s\n") %
(pat, inst.strerror))
continue
# else: re or relre - which cannot be normalized
kindpats.append((kind, pat, ''))
return kindpats
class basematcher(object):
def __init__(self, root, cwd, badfn=None, relativeuipath=True):
self._root = root
self._cwd = cwd
if badfn is not None:
self.bad = badfn
self._relativeuipath = relativeuipath
def __call__(self, fn):
return self.matchfn(fn)
def __iter__(self):
for f in self._files:
yield f
# Callbacks related to how the matcher is used by dirstate.walk.
# Subscribers to these events must monkeypatch the matcher object.
def bad(self, f, msg):
'''Callback from dirstate.walk for each explicit file that can't be
found/accessed, with an error message.'''
# If an explicitdir is set, it will be called when an explicitly listed
# directory is visited.
explicitdir = None
# If an traversedir is set, it will be called when a directory discovered
# by recursive traversal is visited.
traversedir = None
def abs(self, f):
'''Convert a repo path back to path that is relative to the root of the
matcher.'''
return f
def rel(self, f):
'''Convert repo path back to path that is relative to cwd of matcher.'''
return util.pathto(self._root, self._cwd, f)
def uipath(self, f):
'''Convert repo path to a display path. If patterns or -I/-X were used
to create this matcher, the display path will be relative to cwd.
Otherwise it is relative to the root of the repo.'''
return (self._relativeuipath and self.rel(f)) or self.abs(f)
@propertycache
def _files(self):
return []
def files(self):
'''Explicitly listed files or patterns or roots:
if no patterns or .always(): empty list,
if exact: list exact files,
if not .anypats(): list all files and dirs,
else: optimal roots'''
return self._files
@propertycache
def _fileset(self):
return set(self._files)
def exact(self, f):
'''Returns True if f is in .files().'''
return f in self._fileset
def matchfn(self, f):
return False
def visitdir(self, dir):
'''Decides whether a directory should be visited based on whether it
has potential matches in it or one of its subdirectories. This is
based on the match's primary, included, and excluded patterns.
Returns the string 'all' if the given directory and all subdirectories
should be visited. Otherwise returns True or False indicating whether
the given directory should be visited.
'''
return True
def always(self):
'''Matcher will match everything and .files() will be empty --
optimization might be possible.'''
return False
def isexact(self):
'''Matcher will match exactly the list of files in .files() --
optimization might be possible.'''
return False
def prefix(self):
'''Matcher will match the paths in .files() recursively --
optimization might be possible.'''
return False
def anypats(self):
'''None of .always(), .isexact(), and .prefix() is true --
optimizations will be difficult.'''
return not self.always() and not self.isexact() and not self.prefix()
class alwaysmatcher(basematcher):
'''Matches everything.'''
def __init__(self, root, cwd, badfn=None, relativeuipath=False):
super(alwaysmatcher, self).__init__(root, cwd, badfn,
relativeuipath=relativeuipath)
def always(self):
return True
def matchfn(self, f):
return True
def visitdir(self, dir):
return 'all'
def __repr__(self):
return '<alwaysmatcher>'
class nevermatcher(basematcher):
'''Matches nothing.'''
def __init__(self, root, cwd, badfn=None):
super(nevermatcher, self).__init__(root, cwd, badfn)
# It's a little weird to say that the nevermatcher is an exact matcher
# or a prefix matcher, but it seems to make sense to let callers take
# fast paths based on either. There will be no exact matches, nor any
# prefixes (files() returns []), so fast paths iterating over them should
# be efficient (and correct).
def isexact(self):
return True
def prefix(self):
return True
def visitdir(self, dir):
return False
def __repr__(self):
return '<nevermatcher>'
class patternmatcher(basematcher):
def __init__(self, root, cwd, kindpats, ctx=None, listsubrepos=False,
badfn=None):
super(patternmatcher, self).__init__(root, cwd, badfn)
self._files = _explicitfiles(kindpats)
self._prefix = _prefix(kindpats)
self._pats, self.matchfn = _buildmatch(ctx, kindpats, '$', listsubrepos,
root)
@propertycache
def _dirs(self):
return set(util.dirs(self._fileset)) | {'.'}
def visitdir(self, dir):
if self._prefix and dir in self._fileset:
return 'all'
return ('.' in self._fileset or
dir in self._fileset or
dir in self._dirs or
any(parentdir in self._fileset
for parentdir in util.finddirs(dir)))
def prefix(self):
return self._prefix
def __repr__(self):
return ('<patternmatcher patterns=%r>' % self._pats)
class includematcher(basematcher):
def __init__(self, root, cwd, kindpats, ctx=None, listsubrepos=False,
badfn=None):
super(includematcher, self).__init__(root, cwd, badfn)
self._pats, self.matchfn = _buildmatch(ctx, kindpats, '(?:/|$)',
listsubrepos, root)
self._prefix = _prefix(kindpats)
roots, dirs = _rootsanddirs(kindpats)
# roots are directories which are recursively included.
self._roots = set(roots)
# dirs are directories which are non-recursively included.
self._dirs = set(dirs)
def visitdir(self, dir):
if self._prefix and dir in self._roots:
return 'all'
return ('.' in self._roots or
dir in self._roots or
dir in self._dirs or
any(parentdir in self._roots
for parentdir in util.finddirs(dir)))
def __repr__(self):
return ('<includematcher includes=%r>' % self._pats)
class exactmatcher(basematcher):
'''Matches the input files exactly. They are interpreted as paths, not
patterns (so no kind-prefixes).
'''
def __init__(self, root, cwd, files, badfn=None):
super(exactmatcher, self).__init__(root, cwd, badfn)
if isinstance(files, list):
self._files = files
else:
self._files = list(files)
matchfn = basematcher.exact
@propertycache
def _dirs(self):
return set(util.dirs(self._fileset)) | {'.'}
def visitdir(self, dir):
return dir in self._dirs
def isexact(self):
return True
def __repr__(self):
return ('<exactmatcher files=%r>' % self._files)
class differencematcher(basematcher):
'''Composes two matchers by matching if the first matches and the second
does not.
The second matcher's non-matching-attributes (root, cwd, bad, explicitdir,
traversedir) are ignored.
'''
def __init__(self, m1, m2):
super(differencematcher, self).__init__(m1._root, m1._cwd)
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.explicitdir = m1.explicitdir
self.traversedir = m1.traversedir
def matchfn(self, f):
return self._m1(f) and not self._m2(f)
@propertycache
def _files(self):
if self.isexact():
return [f for f in self._m1.files() if self(f)]
# If m1 is not an exact matcher, we can't easily figure out the set of
# files, because its files() are not always files. For example, if
# m1 is "path:dir" and m2 is "rootfileins:.", we don't
# want to remove "dir" from the set even though it would match m2,
# because the "dir" in m1 may not be a file.
return self._m1.files()
def visitdir(self, dir):
if self._m2.visitdir(dir) == 'all':
return False
return bool(self._m1.visitdir(dir))
def isexact(self):
return self._m1.isexact()
def __repr__(self):
return ('<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2))
def intersectmatchers(m1, m2):
'''Composes two matchers by matching if both of them match.
The second matcher's non-matching-attributes (root, cwd, bad, explicitdir,
traversedir) are ignored.
'''
if m1 is None or m2 is None:
return m1 or m2
if m1.always():
m = copy.copy(m2)
# TODO: Consider encapsulating these things in a class so there's only
# one thing to copy from m1.
m.bad = m1.bad
m.explicitdir = m1.explicitdir
m.traversedir = m1.traversedir
m.abs = m1.abs
m.rel = m1.rel
m._relativeuipath |= m1._relativeuipath
return m
if m2.always():
m = copy.copy(m1)
m._relativeuipath |= m2._relativeuipath
return m
return intersectionmatcher(m1, m2)
class intersectionmatcher(basematcher):
def __init__(self, m1, m2):
super(intersectionmatcher, self).__init__(m1._root, m1._cwd)
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.explicitdir = m1.explicitdir
self.traversedir = m1.traversedir
@propertycache
def _files(self):
if self.isexact():
m1, m2 = self._m1, self._m2
if not m1.isexact():
m1, m2 = m2, m1
return [f for f in m1.files() if m2(f)]
# It neither m1 nor m2 is an exact matcher, we can't easily intersect
# the set of files, because their files() are not always files. For
# example, if intersecting a matcher "-I glob:foo.txt" with matcher of
# "path:dir2", we don't want to remove "dir2" from the set.
return self._m1.files() + self._m2.files()
def matchfn(self, f):
return self._m1(f) and self._m2(f)
def visitdir(self, dir):
visit1 = self._m1.visitdir(dir)
if visit1 == 'all':
return self._m2.visitdir(dir)
# bool() because visit1=True + visit2='all' should not be 'all'
return bool(visit1 and self._m2.visitdir(dir))
def always(self):
return self._m1.always() and self._m2.always()
def isexact(self):
return self._m1.isexact() or self._m2.isexact()
def __repr__(self):
return ('<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2))
class subdirmatcher(basematcher):
"""Adapt a matcher to work on a subdirectory only.
The paths are remapped to remove/insert the path as needed:
>>> from . import pycompat
>>> m1 = match(b'root', b'', [b'a.txt', b'sub/b.txt'])
>>> m2 = subdirmatcher(b'sub', m1)
>>> bool(m2(b'a.txt'))
False
>>> bool(m2(b'b.txt'))
True
>>> bool(m2.matchfn(b'a.txt'))
False
>>> bool(m2.matchfn(b'b.txt'))
True
>>> m2.files()
['b.txt']
>>> m2.exact(b'b.txt')
True
>>> util.pconvert(m2.rel(b'b.txt'))
'sub/b.txt'
>>> def bad(f, msg):
... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
>>> m1.bad = bad
>>> m2.bad(b'x.txt', b'No such file')
sub/x.txt: No such file
>>> m2.abs(b'c.txt')
'sub/c.txt'
"""
def __init__(self, path, matcher):
super(subdirmatcher, self).__init__(matcher._root, matcher._cwd)
self._path = path
self._matcher = matcher
self._always = matcher.always()
self._files = [f[len(path) + 1:] for f in matcher._files
if f.startswith(path + "/")]
# If the parent repo had a path to this subrepo and the matcher is
# a prefix matcher, this submatcher always matches.
if matcher.prefix():
self._always = any(f == path for f in matcher._files)
def bad(self, f, msg):
self._matcher.bad(self._path + "/" + f, msg)
def abs(self, f):
return self._matcher.abs(self._path + "/" + f)
def rel(self, f):
return self._matcher.rel(self._path + "/" + f)
def uipath(self, f):
return self._matcher.uipath(self._path + "/" + f)
def matchfn(self, f):
# Some information is lost in the superclass's constructor, so we
# can not accurately create the matching function for the subdirectory
# from the inputs. Instead, we override matchfn() and visitdir() to
# call the original matcher with the subdirectory path prepended.
return self._matcher.matchfn(self._path + "/" + f)
def visitdir(self, dir):
if dir == '.':
dir = self._path
else:
dir = self._path + "/" + dir
return self._matcher.visitdir(dir)
def always(self):
return self._always
def prefix(self):
return self._matcher.prefix() and not self._always
def __repr__(self):
return ('<subdirmatcher path=%r, matcher=%r>' %
(self._path, self._matcher))
class unionmatcher(basematcher):
"""A matcher that is the union of several matchers.
The non-matching-attributes (root, cwd, bad, explicitdir, traversedir) are
taken from the first matcher.
"""
def __init__(self, matchers):
m1 = matchers[0]
super(unionmatcher, self).__init__(m1._root, m1._cwd)
self.explicitdir = m1.explicitdir
self.traversedir = m1.traversedir
self._matchers = matchers
def matchfn(self, f):
for match in self._matchers:
if match(f):
return True
return False
def visitdir(self, dir):
r = False
for m in self._matchers:
v = m.visitdir(dir)
if v == 'all':
return v
r |= v
return r
def __repr__(self):
return ('<unionmatcher matchers=%r>' % self._matchers)
def patkind(pattern, default=None):
'''If pattern is 'kind:pat' with a known kind, return kind.'''
return _patsplit(pattern, default)[0]
def _patsplit(pattern, default):
"""Split a string into the optional pattern kind prefix and the actual
pattern."""
if ':' in pattern:
kind, pat = pattern.split(':', 1)
if kind in allpatternkinds:
return kind, pat
return default, pattern
def _globre(pat):
r'''Convert an extended glob string to a regexp string.
>>> from . import pycompat
>>> def bprint(s):
... print(pycompat.sysstr(s))
>>> bprint(_globre(br'?'))
.
>>> bprint(_globre(br'*'))
[^/]*
>>> bprint(_globre(br'**'))
.*
>>> bprint(_globre(br'**/a'))
(?:.*/)?a
>>> bprint(_globre(br'a/**/b'))
a\/(?:.*/)?b
>>> bprint(_globre(br'[a*?!^][^b][!c]'))
[a*?!^][\^b][^c]
>>> bprint(_globre(br'{a,b}'))
(?:a|b)
>>> bprint(_globre(br'.\*\?'))
\.\*\?
'''
i, n = 0, len(pat)
res = ''
group = 0
escape = util.re.escape
def peek():
return i < n and pat[i:i + 1]
while i < n:
c = pat[i:i + 1]
i += 1
if c not in '*?[{},\\':
res += escape(c)
elif c == '*':
if peek() == '*':
i += 1
if peek() == '/':
i += 1
res += '(?:.*/)?'
else:
res += '.*'
else:
res += '[^/]*'
elif c == '?':
res += '.'
elif c == '[':
j = i
if j < n and pat[j:j + 1] in '!]':
j += 1
while j < n and pat[j:j + 1] != ']':
j += 1
if j >= n:
res += '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j + 1
if stuff[0:1] == '!':
stuff = '^' + stuff[1:]
elif stuff[0:1] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
elif c == '{':
group += 1
res += '(?:'
elif c == '}' and group:
res += ')'
group -= 1
elif c == ',' and group:
res += '|'
elif c == '\\':
p = peek()
if p:
i += 1
res += escape(p)
else:
res += escape(c)
else:
res += escape(c)
return res
def _regex(kind, pat, globsuffix):
'''Convert a (normalized) pattern of any kind into a regular expression.
globsuffix is appended to the regexp of globs.'''
if not pat:
return ''
if kind == 're':
return pat
if kind in ('path', 'relpath'):
if pat == '.':
return ''
return util.re.escape(pat) + '(?:/|$)'
if kind == 'rootfilesin':
if pat == '.':
escaped = ''
else:
# Pattern is a directory name.
escaped = util.re.escape(pat) + '/'
# Anything after the pattern must be a non-directory.
return escaped + '[^/]+$'
if kind == 'relglob':
return '(?:|.*/)' + _globre(pat) + globsuffix
if kind == 'relre':
if pat.startswith('^'):
return pat
return '.*' + pat
return _globre(pat) + globsuffix
def _buildmatch(ctx, kindpats, globsuffix, listsubrepos, root):
'''Return regexp string and a matcher function for kindpats.
globsuffix is appended to the regexp of globs.'''
matchfuncs = []
subincludes, kindpats = _expandsubinclude(kindpats, root)
if subincludes:
submatchers = {}
def matchsubinclude(f):
for prefix, matcherargs in subincludes:
if f.startswith(prefix):
mf = submatchers.get(prefix)
if mf is None:
mf = match(*matcherargs)
submatchers[prefix] = mf
if mf(f[len(prefix):]):
return True
return False
matchfuncs.append(matchsubinclude)
fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
if fset:
matchfuncs.append(fset.__contains__)
regex = ''
if kindpats:
regex, mf = _buildregexmatch(kindpats, globsuffix)
matchfuncs.append(mf)
if len(matchfuncs) == 1:
return regex, matchfuncs[0]
else:
return regex, lambda f: any(mf(f) for mf in matchfuncs)
def _buildregexmatch(kindpats, globsuffix):
"""Build a match function from a list of kinds and kindpats,
return regexp string and a matcher function."""
try:
regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
for (k, p, s) in kindpats])
if len(regex) > 20000:
raise OverflowError
return regex, _rematcher(regex)
except OverflowError:
# We're using a Python with a tiny regex engine and we
# made it explode, so we'll divide the pattern list in two
# until it works
l = len(kindpats)
if l < 2:
raise
regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
return regex, lambda s: a(s) or b(s)
except re.error:
for k, p, s in kindpats:
try:
_rematcher('(?:%s)' % _regex(k, p, globsuffix))
except re.error:
if s:
raise error.Abort(_("%s: invalid pattern (%s): %s") %
(s, k, p))
else:
raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
raise error.Abort(_("invalid pattern"))
def _patternrootsanddirs(kindpats):
'''Returns roots and directories corresponding to each pattern.
This calculates the roots and directories exactly matching the patterns and
returns a tuple of (roots, dirs) for each. It does not return other
directories which may also need to be considered, like the parent
directories.
'''
r = []
d = []
for kind, pat, source in kindpats:
if kind == 'glob': # find the non-glob prefix
root = []
for p in pat.split('/'):
if '[' in p or '{' in p or '*' in p or '?' in p:
break
root.append(p)
r.append('/'.join(root) or '.')
elif kind in ('relpath', 'path'):
r.append(pat or '.')
elif kind in ('rootfilesin',):
d.append(pat or '.')
else: # relglob, re, relre
r.append('.')
return r, d
def _roots(kindpats):
'''Returns root directories to match recursively from the given patterns.'''
roots, dirs = _patternrootsanddirs(kindpats)
return roots
def _rootsanddirs(kindpats):
'''Returns roots and exact directories from patterns.
roots are directories to match recursively, whereas exact directories should
be matched non-recursively. The returned (roots, dirs) tuple will also
include directories that need to be implicitly considered as either, such as
parent directories.
>>> _rootsanddirs(
... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
... (b'glob', b'g*', b'')])
(['g/h', 'g/h', '.'], ['g', '.'])
>>> _rootsanddirs(
... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
([], ['g/h', '.', 'g', '.'])
>>> _rootsanddirs(
... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
... (b'path', b'', b'')])
(['r', 'p/p', '.'], ['p', '.'])
>>> _rootsanddirs(
... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
... (b'relre', b'rr', b'')])
(['.', '.', '.'], ['.'])
'''
r, d = _patternrootsanddirs(kindpats)
# Append the parents as non-recursive/exact directories, since they must be
# scanned to get to either the roots or the other exact directories.
d.extend(util.dirs(d))
d.extend(util.dirs(r))
# util.dirs() does not include the root directory, so add it manually
d.append('.')
return r, d
def _explicitfiles(kindpats):
'''Returns the potential explicit filenames from the patterns.
>>> _explicitfiles([(b'path', b'foo/bar', b'')])
['foo/bar']
>>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
[]
'''
# Keep only the pattern kinds where one can specify filenames (vs only
# directory names).
filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
return _roots(filable)
def _prefix(kindpats):
'''Whether all the patterns match a prefix (i.e. recursively)'''
for kind, pat, source in kindpats:
if kind not in ('path', 'relpath'):
return False
return True
_commentre = None
def readpatternfile(filepath, warn, sourceinfo=False):
'''parse a pattern file, returning a list of
patterns. These patterns should be given to compile()
to be validated and converted into a match function.
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
pattern # pattern of the current default type
if sourceinfo is set, returns a list of tuples:
(pattern, lineno, originalline). This is useful to debug ignore patterns.
'''
syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
'include': 'include', 'subinclude': 'subinclude'}
syntax = 'relre:'
patterns = []
fp = open(filepath, 'rb')
for lineno, line in enumerate(util.iterfile(fp), start=1):
if "#" in line:
global _commentre
if not _commentre:
_commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
m = _commentre.search(line)
if m:
line = line[:m.end(1)]
# fixup properly escaped comments that survived the above
line = line.replace("\\#", "#")
line = line.rstrip()
if not line:
continue
if line.startswith('syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
if warn:
warn(_("%s: ignoring invalid syntax '%s'\n") %
(filepath, s))
continue
linesyntax = syntax
for s, rels in syntaxes.iteritems():
if line.startswith(rels):
linesyntax = rels
line = line[len(rels):]
break
elif line.startswith(s+':'):
linesyntax = rels
line = line[len(s) + 1:]
break
if sourceinfo:
patterns.append((linesyntax + line, lineno, line))
else:
patterns.append(linesyntax + line)
fp.close()
return patterns
| [
"samuel.p.roberti@gmail.com"
] | samuel.p.roberti@gmail.com |
f990494489bde1a8610d3c197a3d453e56275a20 | b47c136e077f5100478338280495193a8ab81801 | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/fxas21002c_simpletest.py | 03e33d75201c976b1b9d57b78db301d55e193ae0 | [
"Apache-2.0"
] | permissive | IanSMoyes/SpiderPi | 22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | refs/heads/master | 2023-03-20T22:30:23.362137 | 2021-03-12T17:37:33 | 2021-03-12T17:37:33 | 339,555,949 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Simple demo of the FXAS21002C gyroscope.
# Will print the gyroscope values every second.
import time
import board
import busio
import adafruit_fxas21002c
# Initialize I2C bus and device.
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_fxas21002c.FXAS21002C(i2c)
# Optionally create the sensor with a different gyroscope range (the
# default is 250 DPS, but you can use 500, 1000, or 2000 DPS values):
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_500DPS)
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_1000DPS)
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_2000DPS)
# Main loop will read the gyroscope values every second and print them out.
while True:
# Read gyroscope.
gyro_x, gyro_y, gyro_z = sensor.gyroscope
# Print values.
print(
"Gyroscope (radians/s): ({0:0.3f}, {1:0.3f}, {2:0.3f})".format(
gyro_x, gyro_y, gyro_z
)
)
# Delay for a second.
time.sleep(1.0)
| [
"ians.moyes@gmail.com"
] | ians.moyes@gmail.com |
2543d81a8f1e8d62cca1d44ab2baf964f4eeb4e7 | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /16/addons/script.icechannel.extn.common/plugins/tvandmovies/g2g_mvs.py | 63c69da9c356ca9af9df9f71a21c8688c0011255 | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | '''
g2g.cm # OdrtKapH2dNRpVHxhBtg
Copyright (C) 2013
'''
from entertainment.plugnplay.interfaces import MovieSource
#from entertainment.plugnplay.interfaces import CustomSettings
from entertainment.plugnplay import Plugin
from entertainment import common
import os
from entertainment.xgoogle.search import GoogleSearch
import xbmc
import xbmcgui
class g2g(MovieSource):
implements = [MovieSource]
name = "g2g"
display_name = "g2g.cm"
base_url = 'http://g2gfmmovies.com/'
#img='https://raw.githubusercontent.com/Coolwavexunitytalk/images/92bed8a40419803f31f90e2268956db50d306997/flixanity.png'
source_enabled_by_default = 'true'
cookie_file = os.path.join(common.cookies_path, 'g2glogin.cookie')
icon = common.notify_icon
'''
def __init__(self):
xml = '<settings>\n'
xml += '<category label="Account">\n'
xml += '<setting id="tv_user" type="text" label="Email" default="Enter your noobroom email" />\n'
xml += '<setting id="tv_pwd" type="text" option="hidden" label="Password" default="xunity" />'
xml += '<setting label="Premium account will allow for 1080 movies and the TV Shows section" type="lsep" />\n'
xml += '<setting id="premium" type="bool" label="Enable Premium account" default="false" />\n'
xml += '</category>\n'
xml += '</settings>\n'
self.CreateSettings(self.name, self.display_name, xml)
'''
def GetFileHosts(self, url, list, lock, message_queue,type):
import re
from entertainment.net import Net
net = Net(cached=False)
print '################################'
print url
content = net.http_GET(url).content
if type == 'movies':
r='class="movie_version_link"> <a href="(.+?)".+?document.writeln\(\'(.+?)\'\)'
else:
r='class="movie_version_link"> <a href="(.+?)".+?version_host">(.+?)<'
match=re.compile(r,re.DOTALL).findall(content)
for item_url ,HOST in match:
self.AddFileHost(list, 'DVD', item_url,host=HOST.upper())
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue):
from entertainment.net import Net
import re
#net = Net(cached=False)
name = self.CleanTextForSearch(name)
import urllib
name = name.lower()
net = Net(cached=False)
if type == 'movies':
title = self.CleanTextForSearch(title)
name = self.CleanTextForSearch(name)
URL= self.base_url+'?type=movie&keywords=%s' %name.replace(' ','+')
content = net.http_GET(URL).content
match =re.compile('href="(.+?)" target="_blank"><img class="image" src=".+?" alt="(.+?)"').findall(content)
for item_url , name in match:
if year in name:
#print item_url
self.GetFileHosts(item_url, list, lock, message_queue,type)
elif type == 'tv_episodes':
title = self.CleanTextForSearch(title)
name = self.CleanTextForSearch(name)
URL= self.base_url+'?type=tv&keywords=%s' %name.replace(' ','+')
content = net.http_GET(URL).content
match =re.compile('href="(.+?)" target="_blank"><img class="image" src=".+?" alt="(.+?)"').findall(content)
for url , NAME in match:
if name.lower() in self.CleanTextForSearch(NAME.lower()):
url=url.replace('-online.html','')
item_url=url+'-season-%s-episode-%s-online.html' % (season,episode)
self.GetFileHosts(item_url, list, lock, message_queue,type)
def Resolve(self, url):
from entertainment.net import Net
import re
net = Net(cached=False)
import base64
print url
content = net.http_GET(url).content
URL=base64.b64decode(re.compile('&url=(.+?)&').findall(content)[0])
#print '###############################'
#print URL
from entertainment import istream
play_url = istream.ResolveUrl(URL)
#print play_url
return play_url
| [
"esc0rtd3w@gmail.com"
] | esc0rtd3w@gmail.com |
75e958e7d869c291cb288b7e42ace08e1c1f7532 | 7356a31baa4e7d42756fc44f46eeccc399d7f038 | /electrumx/lib/coins.py | bcf7b4967ffc387a51e47411c599bafb8695c976 | [
"MIT"
] | permissive | zcore-dev/electrumx-server-mue | 06e7c4fc9277d1835e907f8150cc3027c964ae13 | d082c70c629c8b5dd8f9a94762fd1f2300a8ba20 | refs/heads/master | 2020-08-21T22:37:49.474821 | 2019-10-04T19:55:05 | 2019-10-04T19:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95,352 | py | # Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Module providing coin abstraction.
Anything coin-specific should go in this file and be subclassed where
necessary for appropriate handling.
'''
from collections import namedtuple
import re
import struct
from decimal import Decimal
from hashlib import sha256
from functools import partial
import electrumx.lib.util as util
from electrumx.lib.hash import Base58, hash160, double_sha256, hash_to_hex_str
from electrumx.lib.hash import HASHX_LEN, hex_str_to_hash
from electrumx.lib.script import ScriptPubKey, OpCodes
import electrumx.lib.tx as lib_tx
import electrumx.lib.tx_dash as lib_tx_dash
import electrumx.server.block_processor as block_proc
import electrumx.server.daemon as daemon
from electrumx.server.session import (ElectrumX, DashElectrumX,
MonetaryUnitElectrumX, SmartCashElectrumX, AuxPoWElectrumX)
Block = namedtuple("Block", "raw header transactions")
OP_RETURN = OpCodes.OP_RETURN
class CoinError(Exception):
'''Exception raised for coin-related errors.'''
class Coin(object):
'''Base class of coin hierarchy.'''
REORG_LIMIT = 200
# Not sure if these are coin-specific
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = ElectrumX
DEFAULT_MAX_SEND = 1000000
DESERIALIZER = lib_tx.Deserializer
DAEMON = daemon.Daemon
BLOCK_PROCESSOR = block_proc.BlockProcessor
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'timestamp',
'bits', 'nonce')
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("05")]
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
WIF_BYTE = bytes.fromhex("80")
ENCODE_CHECK = Base58.encode_check
DECODE_CHECK = Base58.decode_check
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
# Peer discovery
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS = []
CRASH_CLIENT_VER = None
BLACKLIST_URL = None
@classmethod
def lookup_coin_class(cls, name, net):
'''Return a coin class given name and network.
Raise an exception if unrecognised.'''
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in util.subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
coin_req_attrs = req_attrs.copy()
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError('coin {} missing {} attributes'
.format(name, missing))
return coin
raise CoinError('unknown coin {} and network {} combination'
.format(name, net))
@classmethod
def sanitize_url(cls, url):
# Remove surrounding ws and trailing /s
url = url.strip().rstrip('/')
match = cls.RPC_URL_REGEX.match(url)
if not match:
raise CoinError('invalid daemon URL: "{}"'.format(url))
if match.groups()[1] is None:
url += ':{:d}'.format(cls.RPC_PORT)
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url + '/'
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def hashX_from_script(cls, script):
'''Returns a hashX from a script, or None if the script is provably
unspendable so the output can be dropped.
'''
if script and script[0] == OP_RETURN:
return None
return sha256(script).digest()[:HASHX_LEN]
@staticmethod
def lookup_xverbytes(verbytes):
'''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.'''
# Order means BTC testnet will override NMC testnet
for coin in util.subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
return False, coin
raise CoinError('version bytes unrecognised')
@classmethod
def address_to_hashX(cls, address):
'''Return a hashX given a coin address.'''
return cls.hashX_from_script(cls.pay_to_address_script(address))
@classmethod
def P2PKH_address_from_hash160(cls, hash160):
'''Return a P2PKH address given a public key.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
@classmethod
def P2PKH_address_from_pubkey(cls, pubkey):
'''Return a coin address given a public key.'''
return cls.P2PKH_address_from_hash160(hash160(pubkey))
@classmethod
def P2SH_address_from_hash160(cls, hash160):
'''Return a coin address given a hash160.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def hash160_to_P2PKH_script(cls, hash160):
return ScriptPubKey.P2PKH_script(hash160)
@classmethod
def hash160_to_P2PKH_hashX(cls, hash160):
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
@classmethod
def pay_to_address_script(cls, address):
'''Return a pubkey script that pays to a pubkey hash.
Pass the address (either P2PKH or P2SH) in base58 form.
'''
raw = cls.DECODE_CHECK(address)
# Require version byte(s) plus hash160.
verbyte = -1
verlen = len(raw) - 20
if verlen > 0:
verbyte, hash160 = raw[:verlen], raw[verlen:]
if verbyte == cls.P2PKH_VERBYTE:
return cls.hash160_to_P2PKH_script(hash160)
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash160)
raise CoinError('invalid address: {}'.format(address))
@classmethod
def privkey_WIF(cls, privkey_bytes, compressed):
'''Return the private key encoded in Wallet Import Format.'''
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
payload.append(0x01)
return cls.ENCODE_CHECK(payload)
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
'''Given a header return previous hash'''
return header[4:36]
@classmethod
def static_header_offset(cls, height):
'''Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating.'''
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def static_header_len(cls, height):
'''Given a header height return its length.'''
return (cls.static_header_offset(height + 1)
- cls.static_header_offset(height))
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
return block[:cls.static_header_len(height)]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
header = cls.block_header(raw_block, height)
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def decimal_value(cls, value):
'''Return the number of standard coin units as a Decimal given a
quantity of smallest units.
For example 1 BTC is returned for 100 million satoshis.
'''
return Decimal(value) / cls.VALUE_PER_COIN
@classmethod
def warn_old_client_on_tx_broadcast(cls, _client_ver):
return False
class AuxPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerAuxPow
SESSIONCLS = AuxPoWElectrumX
# AuxPoW headers are significantly larger, so the DEFAULT_MAX_SEND from
# Bitcoin is insufficient. In Namecoin mainnet, 5 MB wasn't enough to
# sync, while 10 MB worked fine.
DEFAULT_MAX_SEND = 10000000
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def block_header(cls, block, height):
'''Return the AuxPow block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
class EquihashMixin(object):
STATIC_BLOCK_HEADERS = False
BASIC_HEADER_SIZE = 140 # Excluding Equihash solution
DESERIALIZER = lib_tx.DeserializerEquihash
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'reserved',
'timestamp', 'bits', 'nonce')
HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I 32s').unpack_from
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
class ScryptMixin(object):
DESERIALIZER = lib_tx.DeserializerTxTime
HEADER_HASH = None
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
if cls.HEADER_HASH is None:
import scrypt
cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32)
version, = util.unpack_le_uint32_from(header)
if version > 6:
return super().header_hash(header)
else:
return cls.HEADER_HASH(header)
class KomodoMixin(object):
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("BC")
GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a'
'63bfa1beae327897f56c5cfb7daaae71')
DESERIALIZER = lib_tx.DeserializerZcash
class BitcoinMixin(object):
SHORTNAME = "BTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
RPC_PORT = 8332
class NameMixin(object):
@staticmethod
def find_end_position_of_name(script, length):
"""Find the end position of the name data"""
n = 0
for _i in range(length):
# Content of this loop is copied from Script.get_ops's loop
op = script[n]
n += 1
if op <= OpCodes.OP_PUSHDATA4:
# Raw bytes follow
if op < OpCodes.OP_PUSHDATA1:
dlen = op
elif op == OpCodes.OP_PUSHDATA1:
dlen = script[n]
n += 1
elif op == OpCodes.OP_PUSHDATA2:
dlen, = struct.unpack('<H', script[n: n + 2])
n += 2
else:
dlen, = struct.unpack('<I', script[n: n + 4])
n += 4
if n + dlen > len(script):
raise IndexError
n += dlen
return n
class HOdlcoin(Coin):
NAME = "HOdlcoin"
SHORTNAME = "HODLC"
NET = "mainnet"
BASIC_HEADER_SIZE = 88
P2PKH_VERBYTE = bytes.fromhex("28")
WIF_BYTE = bytes.fromhex("a8")
GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb'
'82c28a9e94e917c94b40538d5658c04b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 258858
TX_COUNT_HEIGHT = 382138
TX_PER_BLOCK = 5
class BitcoinSV(BitcoinMixin, Coin):
NAME = "BitcoinSV"
SHORTNAME = "BSV"
TX_COUNT = 267318795
TX_COUNT_HEIGHT = 557037
TX_PER_BLOCK = 400
PEERS = [
'electrumx.bitcoinsv.io s',
'satoshi.vision.cash s',
'sv.usebsv.com s t',
'sv.jochen-hoenicke.de s t',
'sv.satoshi.io s t',
]
class BitcoinCash(BitcoinMixin, Coin):
NAME = "BitcoinCashABC" # Some releases later remove the ABC suffix
SHORTNAME = "BCH"
TX_COUNT = 265479628
TX_COUNT_HEIGHT = 556592
TX_PER_BLOCK = 400
PEERS = [
'bch.imaginary.cash s t',
'electroncash.dk s t',
'wallet.satoshiscoffeehouse.com s t',
]
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 4):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electron Cash.<br/>'
'Download the latest version from this web site ONLY:<br/>'
'https://electroncash.org/'
'<br/><br/>')
return False
class BitcoinSegwit(BitcoinMixin, Coin):
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
MEMPOOL_HISTOGRAM_REFRESH_SECS = 120
TX_COUNT = 318337769
TX_COUNT_HEIGHT = 524213
TX_PER_BLOCK = 1400
CRASH_CLIENT_VER = (3, 2, 3)
BLACKLIST_URL = 'https://electrum.org/blacklist.json'
PEERS = [
'E-X.not.fyi s t',
'electrum.vom-stausee.de s t',
'electrum.hsmiths.com s t',
'helicarrier.bauerj.eu s t',
'hsmiths4fyqlw5xw.onion s t',
'ozahtqwp25chjdjd.onion s t',
'electrum.hodlister.co s',
'electrum3.hodlister.co s',
'btc.usebsv.com s50006',
'fortress.qtornado.com s443 t',
'ecdsa.net s110 t',
'e2.keff.org s t',
'currentlane.lovebitco.in s t',
'electrum.jochen-hoenicke.de s50005 t50003',
'vps5.hsmiths.com s',
]
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 3):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electrum.<br/>'
'Download the new version from the usual place:<br/>'
'https://electrum.org/'
'<br/><br/>')
return False
class BitcoinGold(EquihashMixin, BitcoinMixin, Coin):
CHUNK_SIZE = 252
NAME = "BitcoinGold"
SHORTNAME = "BTG"
FORK_HEIGHT = 491407
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("17")]
DESERIALIZER = lib_tx.DeserializerEquihashSegWit
TX_COUNT = 265026255
TX_COUNT_HEIGHT = 499923
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
RPC_PORT = 8332
PEERS = [
'electrumx-eu.bitcoingold.org s50002 t50001',
'electrumx-us.bitcoingold.org s50002 t50001'
]
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
height, = util.unpack_le_uint32_from(header, 68)
if height >= cls.FORK_HEIGHT:
return double_sha256(header)
else:
return double_sha256(header[:68] + header[100:112])
class BitcoinGoldTestnet(BitcoinGold):
FORK_HEIGHT = 1
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'testnet'
RPC_PORT = 18332
GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe'
'a2f557b53ec379e78959de3853e6f9f6')
PEERS = [
'test-node1.bitcoingold.org s50002',
'test-node2.bitcoingold.org s50002',
'test-node3.bitcoingold.org s50002'
]
class BitcoinGoldRegtest(BitcoinGold):
FORK_HEIGHT = 2000
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'regtest'
RPC_PORT = 18444
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
class BitcoinDiamond(BitcoinSegwit, Coin):
NAME = "BitcoinDiamond"
SHORTNAME = "BCD"
TX_VERSION = 12
TX_COUNT = 274277819
TX_COUNT_HEIGHT = 498678
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
PEERS = []
VALUE_PER_COIN = 10000000
DESERIALIZER = lib_tx.DeserializerBitcoinDiamondSegWit
class Emercoin(NameMixin, Coin):
NAME = "Emercoin"
SHORTNAME = "EMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("5c")]
GENESIS_HASH = ('00000000bcccd459d036a588d1008fce'
'8da3754b205736f32ddfd35350e84c2d')
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1700
VALUE_PER_COIN = 1000000
RPC_PORT = 6662
DESERIALIZER = lib_tx.DeserializerEmercoin
PEERS = []
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
deserializer = cls.DESERIALIZER(block)
if deserializer.is_merged_block():
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
return block[:cls.static_header_len(height)]
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def hashX_from_script(cls, script):
address_script = cls.address_script_from_script(script)
return super().hashX_from_script(address_script)
@classmethod
def address_script_from_script(cls, script):
from electrumx.lib.script import _match_ops, Script, ScriptError
try:
ops = Script.get_ops(script)
except ScriptError:
return script
match = _match_ops
# Name opcodes
OP_NAME_NEW = OpCodes.OP_1
OP_NAME_UPDATE = OpCodes.OP_2
OP_NAME_DELETE = OpCodes.OP_3
# Opcode sequences for name operations
# Script structure: https://git.io/fjuRu
NAME_NEW_OPS = [OP_NAME_NEW, OpCodes.OP_DROP, -1, -1,
OpCodes.OP_2DROP, -1, OpCodes.OP_DROP]
NAME_UPDATE_OPS = [OP_NAME_UPDATE, OpCodes.OP_DROP, -1, -1,
OpCodes.OP_2DROP, -1, OpCodes.OP_DROP]
NAME_DELETE_OPS = [OP_NAME_DELETE, OpCodes.OP_DROP, -1,
OpCodes.OP_DROP]
name_script_op_count = None
# Detect name operations; determine count of opcodes.
for name_ops in [NAME_NEW_OPS, NAME_UPDATE_OPS, NAME_DELETE_OPS]:
if match(ops[:len(name_ops)], name_ops):
name_script_op_count = len(name_ops)
break
if name_script_op_count is None:
return script
name_end_pos = cls.find_end_position_of_name(script, name_script_op_count)
# Strip the name data to yield the address script
address_script = script[name_end_pos:]
return address_script
class BitcoinTestnetMixin(object):
SHORTNAME = "XTN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000000933ea01ad0ee984209779ba'
'aec3ced90fa3f408719526f8d77f4943')
REORG_LIMIT = 8000
TX_COUNT = 12242438
TX_COUNT_HEIGHT = 1035428
TX_PER_BLOCK = 21
RPC_PORT = 18332
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
class BitcoinSVTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Bitcoin SV daemons.'''
NAME = "BitcoinSV"
PEERS = [
'electrontest.cascharia.com t51001 s51002',
]
class BitcoinSVScalingTestnet(BitcoinSVTestnet):
NET = "scalingtest"
PEERS = [
'stn-server.electrumsv.io t51001 s51002',
]
TX_COUNT = 2015
TX_COUNT_HEIGHT = 5711
TX_PER_BLOCK = 5000
class BitcoinCashTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Bitcoin Cash daemons.'''
NAME = "BitcoinCashABC"
PEERS = [
'bch0.kister.net t s',
'testnet.imaginary.cash t50001 s50002',
'blackie.c3-soft.com t60001 s60002',
]
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 4):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electron Cash.<br/>'
'Download the latest version from this web site ONLY:<br/>'
'https://electroncash.org/'
'<br/><br/>')
return False
class BitcoinSVRegtest(BitcoinSVTestnet):
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Core bitcoind >= 0.13.1.'''
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
CRASH_CLIENT_VER = (3, 2, 3)
PEERS = [
'testnet.hsmiths.com t53011 s53012',
'hsmithsxurybd7uh.onion t53011 s53012',
'testnet.qtornado.com s t',
'testnet1.bauerj.eu t50001 s50002',
'tn.not.fyi t55001 s55002',
'bitcoin.cluelessperson.com s t',
]
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 3):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electrum.<br/>'
'Download the new version from the usual place:<br/>'
'https://electrum.org/'
'<br/><br/>')
return False
class BitcoinSegwitRegtest(BitcoinSegwitTestnet):
NAME = "BitcoinSegwit"
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinNolnet(BitcoinCash):
'''Bitcoin Unlimited nolimit testnet.'''
NET = "nolnet"
GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862'
'3bd0f10d8c001304bdfc1a7902ae6d35')
PEERS = []
REORG_LIMIT = 8000
TX_COUNT = 583589
TX_COUNT_HEIGHT = 8617
TX_PER_BLOCK = 50
RPC_PORT = 28332
PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'}
class Litecoin(Coin):
NAME = "Litecoin"
SHORTNAME = "LTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10
RPC_PORT = 9332
REORG_LIMIT = 800
PEERS = [
'ex.lug.gs s444',
'electrum-ltc.bysh.me s t',
'electrum-ltc.ddns.net s t',
'electrum-ltc.wilv.in s t',
'electrum.cryptomachine.com p1000 s t',
'electrum.ltc.xurious.com s t',
'eywr5eubdbbe2laq.onion s50008 t50007',
]
class LitecoinTestnet(Litecoin):
SHORTNAME = "XLT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d'
'88575f59ed816ff5e6a63deb4e3e29a0')
TX_COUNT = 21772
TX_COUNT_HEIGHT = 20800
TX_PER_BLOCK = 2
RPC_PORT = 19332
REORG_LIMIT = 4000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum-ltc.bysh.me s t',
'electrum.ltc.xurious.com s t',
]
class LitecoinRegtest(LitecoinTestnet):
NET = "regtest"
GENESIS_HASH = ('530827f38f93b43ed12af0b3ad25a288'
'dc02ed74d6d7857862df51fc56c416f9')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinCashRegtest(BitcoinTestnetMixin, Coin):
NAME = "BitcoinCashABC" # Some releases later remove the ABC suffix
NET = "regtest"
PEERS = []
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
class Viacoin(AuxPowMixin, Coin):
NAME = "Viacoin"
SHORTNAME = "VIA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("21")]
WIF_BYTE = bytes.fromhex("c7")
GENESIS_HASH = ('4e9b54001f9976049830128ec0331515'
'eaabe35a70970d79971da1539a400ba1')
TX_COUNT = 113638
TX_COUNT_HEIGHT = 3473674
TX_PER_BLOCK = 30
RPC_PORT = 5222
REORG_LIMIT = 5000
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
PEERS = [
'vialectrum.bitops.me s t',
'server.vialectrum.org s t',
'vialectrum.viacoin.net s t',
'viax1.bitops.me s t',
]
class ViacoinTestnet(Viacoin):
SHORTNAME = "TVI"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("7f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ff")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
RPC_PORT = 25222
REORG_LIMIT = 2500
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'vialectrum.bysh.me s t',
]
class ViacoinTestnetSegWit(ViacoinTestnet):
NET = "testnet-segwit"
DESERIALIZER = lib_tx.DeserializerSegWit
# Source: https://github.com/GravityCoinOfficial/GravityCoin/
class GravityCoin(Coin):
NAME = "GravityCoin"
SHORTNAME = "GXX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("28")
P2SH_VERBYTES = [bytes.fromhex("0a")]
WIF_BYTE = bytes.fromhex("d2")
GENESIS_HASH = ('322bad477efb4b33fa4b1f0b2861eaf543c61068da9898a95062fdb02ada486f')
TX_COUNT = 446050
TX_COUNT_HEIGHT = 547346
TX_PER_BLOCK = 2
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
RPC_PORT = 29200
REORG_LIMIT = 5000
PEERS = []
# Source: https://github.com/BitcoinZeroOfficial/bitcoinzero
class Bitcoinzero(Coin):
NAME = "Bitcoinzero"
SHORTNAME = "BZX"
TX_COUNT = 43798
TX_COUNT_HEIGHT = 44
TX_PER_BLOCK = 576
NET = "mainnet"
GENESIS_HASH = '322bad477efb4b33fa4b1f0b2861eaf543c61068da9898a95062fdb02ada486f'
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("4b")
P2SH_VERBYTES = [bytes.fromhex("22")]
WIF_BYTE = bytes.fromhex("d2")
RPC_PORT = 29202
REORG_LIMIT = 5000
PEERS = []
class Unitus(Coin):
NAME = "Unitus"
SHORTNAME = "UIS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("44")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("84")
GENESIS_HASH = ('d8a2b2439d013a59f3bfc626a33487a3'
'd7d27e42a3c9e0b81af814cd8e592f31')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 3484561
TX_COUNT_HEIGHT = 1697605
TX_PER_BLOCK = 3
RPC_PORT = 50604
REORG_LIMIT = 2000
PEERS = [
'electrumx.unituscurrency.com s t',
]
# Source: namecoin.org
class Namecoin(NameMixin, AuxPowMixin, Coin):
NAME = "Namecoin"
SHORTNAME = "NMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("d7dd6370")
XPRV_VERBYTES = bytes.fromhex("d7dc6e31")
P2PKH_VERBYTE = bytes.fromhex("34")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("e4")
GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e'
'807c155b0da735e6483dfba2f0a9c770')
TX_COUNT = 4415768
TX_COUNT_HEIGHT = 329065
TX_PER_BLOCK = 10
RPC_PORT = 8336
PEERS = [
'ex.lug.gs s446',
'luggscoqbymhvnkp.onion t82',
'ulrichard.ch s50006 t50005',
]
BLOCK_PROCESSOR = block_proc.NamecoinBlockProcessor
@classmethod
def split_name_script(cls, script):
from electrumx.lib.script import _match_ops, Script, ScriptError
try:
ops = Script.get_ops(script)
except ScriptError:
return None, script
match = _match_ops
# Name opcodes
OP_NAME_NEW = OpCodes.OP_1
OP_NAME_FIRSTUPDATE = OpCodes.OP_2
OP_NAME_UPDATE = OpCodes.OP_3
# Opcode sequences for name operations
NAME_NEW_OPS = [OP_NAME_NEW, -1, OpCodes.OP_2DROP]
NAME_FIRSTUPDATE_OPS = [OP_NAME_FIRSTUPDATE, -1, -1, -1,
OpCodes.OP_2DROP, OpCodes.OP_2DROP]
NAME_UPDATE_OPS = [OP_NAME_UPDATE, -1, -1, OpCodes.OP_2DROP,
OpCodes.OP_DROP]
name_script_op_count = None
name_pushdata = None
# Detect name operations; determine count of opcodes.
# Also extract the name field -- we might use that for something in a
# future version.
if match(ops[:len(NAME_NEW_OPS)], NAME_NEW_OPS):
name_script_op_count = len(NAME_NEW_OPS)
elif match(ops[:len(NAME_FIRSTUPDATE_OPS)], NAME_FIRSTUPDATE_OPS):
name_script_op_count = len(NAME_FIRSTUPDATE_OPS)
name_pushdata = ops[1]
elif match(ops[:len(NAME_UPDATE_OPS)], NAME_UPDATE_OPS):
name_script_op_count = len(NAME_UPDATE_OPS)
name_pushdata = ops[1]
if name_script_op_count is None:
return None, script
name_end_pos = cls.find_end_position_of_name(script, name_script_op_count)
# Strip the name data to yield the address script
address_script = script[name_end_pos:]
if name_pushdata is None:
return None, address_script
normalized_name_op_script = bytearray()
normalized_name_op_script.append(OP_NAME_UPDATE)
normalized_name_op_script.extend(Script.push_data(name_pushdata[1]))
normalized_name_op_script.extend(Script.push_data(bytes([])))
normalized_name_op_script.append(OpCodes.OP_2DROP)
normalized_name_op_script.append(OpCodes.OP_DROP)
normalized_name_op_script.append(OpCodes.OP_RETURN)
return bytes(normalized_name_op_script), address_script
@classmethod
def hashX_from_script(cls, script):
_name_op_script, address_script = cls.split_name_script(script)
return super().hashX_from_script(address_script)
@classmethod
def address_from_script(cls, script):
_name_op_script, address_script = cls.split_name_script(script)
return super().address_from_script(address_script)
@classmethod
def name_hashX_from_script(cls, script):
name_op_script, _address_script = cls.split_name_script(script)
if name_op_script is None:
return None
return super().hashX_from_script(name_op_script)
class NamecoinTestnet(Namecoin):
NAME = "Namecoin"
SHORTNAME = "XNM"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
class Dogecoin(AuxPowMixin, Coin):
NAME = "Dogecoin"
SHORTNAME = "DOGE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02facafd")
XPRV_VERBYTES = bytes.fromhex("02fac398")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82'
'1aa1d6ef92e7c9902eb318182c355691')
TX_COUNT = 27583427
TX_COUNT_HEIGHT = 1604979
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
class DogecoinTestnet(Dogecoin):
NAME = "Dogecoin"
SHORTNAME = "XDT"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('bb0a78264637406b6360aad926284d54'
'4d7049f45189db5664f3c4d07350559e')
# Source: https://github.com/motioncrypto/motion
class Motion(Coin):
NAME = "Motion"
SHORTNAME = "XMN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('000001e9dc60dd2618e91f7b90141349'
'22c374496b61c1a272519b1c39979d78')
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("12")]
TX_COUNT_HEIGHT = 54353
TX_COUNT = 92701
TX_PER_BLOCK = 4
RPC_PORT = 3385
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x16r_hash
return x16r_hash.getPoWHash(header)
# Source: https://github.com/dashpay/dash
class Dash(Coin):
NAME = "Dash"
SHORTNAME = "DASH"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637'
'9c733355108f107a430458cdf3407ab6')
P2PKH_VERBYTE = bytes.fromhex("4c")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 4
RPC_PORT = 9998
PEERS = [
'electrum.dash.org s t',
'electrum.masternode.io s t',
'electrum-drk.club s t',
'dashcrypto.space s t',
'electrum.dash.siampm.com s t',
'wl4sfwq2hwxnodof.onion s t',
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
DESERIALIZER = lib_tx_dash.DeserializerDash
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class DashTestnet(Dash):
SHORTNAME = "tDASH"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
XPRV_VERBYTES = bytes.fromhex("3a8061a0")
GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483'
'7288a481e5c005f6563d91623bf8bc2c')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
RPC_PORT = 19998
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum.dash.siampm.com s t',
'dasht.random.re s54002 t54001',
]
class Argentum(AuxPowMixin, Coin):
NAME = "Argentum"
SHORTNAME = "ARG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8'
'e007e5abffd6855de52ad59df7bb0bb2')
TX_COUNT = 2263089
TX_COUNT_HEIGHT = 2050260
TX_PER_BLOCK = 2000
RPC_PORT = 13581
class ArgentumTestnet(Argentum):
SHORTNAME = "XRG"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
REORG_LIMIT = 2000
class DigiByte(Coin):
NAME = "DigiByte"
SHORTNAME = "DGB"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1E")
GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f'
'e016d6fcb6dfad3a64c98dcc6e1e8496')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1046018
TX_COUNT_HEIGHT = 1435000
TX_PER_BLOCK = 1000
RPC_PORT = 12022
class DigiByteTestnet(DigiByte):
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728'
'e2a444af34c447dbd0916fa3430a68c2')
RPC_PORT = 15022
REORG_LIMIT = 2000
class FairCoin(Coin):
NAME = "FairCoin"
SHORTNAME = "FAIR"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("5f")
P2SH_VERBYTES = [bytes.fromhex("24")]
WIF_BYTE = bytes.fromhex("df")
GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578'
'1825a9407a5215dd7eda723373a0a1d7')
BASIC_HEADER_SIZE = 108
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root',
'payload_hash', 'timestamp', 'creatorId')
HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I').unpack_from
TX_COUNT = 505
TX_COUNT_HEIGHT = 470
TX_PER_BLOCK = 1
RPC_PORT = 40405
PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'}
PEERS = [
'electrum.faircoin.world s',
'electrumfair.punto0.org s',
]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class Zcash(EquihashMixin, Coin):
NAME = "Zcash"
SHORTNAME = "ZEC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d'
'd06b4a8a5c453883c000b031973dce08')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8232
REORG_LIMIT = 800
class ZcashTestnet(Zcash):
SHORTNAME = "TAZ"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("1D25")
P2SH_VERBYTES = [bytes.fromhex("1CBA")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('05a60a92d99d85997cce3b87616c089f'
'6124d7342af37106edc76126334a2c38')
TX_COUNT = 242312
TX_COUNT_HEIGHT = 321685
TX_PER_BLOCK = 2
RPC_PORT = 18232
class SnowGem(EquihashMixin, Coin):
NAME = "SnowGem"
SHORTNAME = "XSG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C28")
P2SH_VERBYTES = [bytes.fromhex("1C2D")]
GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009'
'4740524311a131de40e7f705e4c29a5b')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 1680878
TX_COUNT_HEIGHT = 627250
TX_PER_BLOCK = 2
RPC_PORT = 16112
REORG_LIMIT = 800
CHUNK_SIZE = 200
class BitcoinZ(EquihashMixin, Coin):
NAME = "BitcoinZ"
SHORTNAME = "BTCZ"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7'
'c43197e2a660229be65db8a4534d75c1')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 171976
TX_COUNT_HEIGHT = 81323
TX_PER_BLOCK = 3
RPC_PORT = 1979
REORG_LIMIT = 800
class Hush(EquihashMixin, Coin):
NAME = "Hush"
SHORTNAME = "HUSH"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('0003a67bc26fe564b75daf11186d3606'
'52eb435a35ba3d9d3e7e5d5f8e62dc17')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8822
REORG_LIMIT = 800
class ZelCash(EquihashMixin, Coin):
NAME = "ZelCash"
SHORTNAME = "ZEL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('00052461a5006c2e3b74ce48992a0869'
'5607912d5604c3eb8da25749b0900444')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 450539
TX_COUNT_HEIGHT = 167114
TX_PER_BLOCK = 3
RPC_PORT = 16124
REORG_LIMIT = 800
class Zclassic(EquihashMixin, Coin):
NAME = "Zclassic"
SHORTNAME = "ZCL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('0007104ccda289427919efc39dc9e4d4'
'99804b7bebc22df55f8b834301260602')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8023
REORG_LIMIT = 800
class Koto(Coin):
NAME = "Koto"
SHORTNAME = "KOTO"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1836")
P2SH_VERBYTES = [bytes.fromhex("183B")]
GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16'
'cd1b1d195c164da00f39c499a2e9959e')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 158914
TX_COUNT_HEIGHT = 67574
TX_PER_BLOCK = 3
RPC_PORT = 8432
REORG_LIMIT = 800
PEERS = [
'fr.kotocoin.info s t',
'electrum.kotocoin.info s t',
]
class KotoTestnet(Koto):
SHORTNAME = "TOKO"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("18A4")
P2SH_VERBYTES = [bytes.fromhex("1839")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('bf84afbde20c2d213b68b231ddb585ab'
'616ef7567226820f00d9b397d774d2f0')
TX_COUNT = 91144
TX_COUNT_HEIGHT = 89662
TX_PER_BLOCK = 1
RPC_PORT = 18432
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'testnet.kotocoin.info s t',
]
class Komodo(KomodoMixin, EquihashMixin, Coin):
NAME = "Komodo"
SHORTNAME = "KMD"
NET = "mainnet"
TX_COUNT = 693629
TX_COUNT_HEIGHT = 491777
TX_PER_BLOCK = 2
RPC_PORT = 7771
REORG_LIMIT = 800
PEERS = []
class Monaize(KomodoMixin, EquihashMixin, Coin):
NAME = "Monaize"
SHORTNAME = "MNZ"
NET = "mainnet"
TX_COUNT = 256
TX_COUNT_HEIGHT = 128
TX_PER_BLOCK = 2
RPC_PORT = 14337
REORG_LIMIT = 800
PEERS = []
class Einsteinium(Coin):
NAME = "Einsteinium"
SHORTNAME = "EMC2"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9'
'84303b5b97eb7b42868f714611aed94b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2087559
TX_COUNT_HEIGHT = 1358517
TX_PER_BLOCK = 2
RPC_PORT = 41879
REORG_LIMIT = 2000
class Blackcoin(ScryptMixin, Coin):
NAME = "Blackcoin"
SHORTNAME = "BLK"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d'
'f2c183bf232f263d0ba5b101911e4563')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 15715
REORG_LIMIT = 5000
class Bitbay(ScryptMixin, Coin):
NAME = "Bitbay"
SHORTNAME = "BAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000075685d3be1f253ce777174b1594'
'354e79954d2a32a6f77fe9cba00e6467')
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 19914
REORG_LIMIT = 5000
class DeepOnion(Coin):
NAME = "DeepOnion"
SHORTNAME = "ONION"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1F")
P2SH_VERBYTES = [bytes.fromhex("4E")]
WIF_BYTE = bytes.fromhex("9f")
GENESIS_HASH = ('000004e29458ef4f2e0abab544737b07'
'344e6ff13718f7c2d12926166db07b5e')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1194707
TX_COUNT_HEIGHT = 530000
TX_PER_BLOCK = 2
RPC_PORT = 18580
REORG_LIMIT = 200
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for DeepOnion.
Need to download `x13_hash` module
Source code: https://github.com/MaruCoinOfficial/x13-hash
'''
import x13_hash
return x13_hash.getPoWHash(header)
class Peercoin(Coin):
NAME = "Peercoin"
SHORTNAME = "PPC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("75")]
WIF_BYTE = bytes.fromhex("b7")
GENESIS_HASH = ('0000000032fe677166d54963b62a4677'
'd8957e87c508eaa4fd7eb1c880cd27e3')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.FakeEstimateFeeDaemon
ESTIMATE_FEE = 0.01
RELAY_FEE = 0.01
TX_COUNT = 1207356
TX_COUNT_HEIGHT = 306425
TX_PER_BLOCK = 4
RPC_PORT = 9902
REORG_LIMIT = 5000
PEERS = [
"electrum.peercoinexplorer.net s"
]
VALUE_PER_COIN = 1000000
class PeercoinTestnet(Peercoin):
NAME = "PeercoinTestnet"
SHORTNAME = "tPPC"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000001f757bb737f6596503e17cd17'
'b0658ce630cc727c0cca81aec47c9f06')
ESTIMATE_FEE = 0.001
class Trezarcoin(Coin):
NAME = "Trezarcoin"
SHORTNAME = "TZC"
NET = "mainnet"
VALUE_PER_COIN = 1000000
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("42")
P2SH_VERBYTES = [bytes.fromhex("08")]
WIF_BYTE = bytes.fromhex("c2")
GENESIS_HASH = ('24502ba55d673d2ee9170d83dae2d1ad'
'b3bfb4718e4f200db9951382cc4f6ee6')
DESERIALIZER = lib_tx.DeserializerTrezarcoin
HEADER_HASH = lib_tx.DeserializerTrezarcoin.blake2s
HEADER_HASH_GEN = lib_tx.DeserializerTrezarcoin.blake2s_gen
BASIC_HEADER_SIZE = 80
TX_COUNT = 742886
TX_COUNT_HEIGHT = 643128
TX_PER_BLOCK = 2
RPC_PORT = 17299
REORG_LIMIT = 2000
PEERS = [
'electrumx1.trezarcoin.com s t',
]
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = cls.HEADER_HASH_GEN(header)
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
class Reddcoin(Coin):
NAME = "Reddcoin"
SHORTNAME = "RDD"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3d")
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9'
'dc8acbf99e3b4b3110fad4eb74c1decc')
DESERIALIZER = lib_tx.DeserializerReddcoin
TX_COUNT = 5413508
TX_COUNT_HEIGHT = 1717382
TX_PER_BLOCK = 3
RPC_PORT = 45443
class TokenPay(ScryptMixin, Coin):
NAME = "TokenPay"
SHORTNAME = "TPAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("41")
P2SH_VERBYTES = [bytes.fromhex("7e")]
WIF_BYTE = bytes.fromhex("b3")
GENESIS_HASH = ('000008b71ab32e585a23f0de642dc113'
'740144e94c0ece047751e9781f953ae9')
DESERIALIZER = lib_tx.DeserializerTokenPay
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 147934
TX_COUNT_HEIGHT = 73967
TX_PER_BLOCK = 100
RPC_PORT = 8800
REORG_LIMIT = 500
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
PEERS = [
"electrum-us.tpay.ai s",
"electrum-eu.tpay.ai s",
]
class Vertcoin(Coin):
NAME = "Vertcoin"
SHORTNAME = "VTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("47")
GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb'
'90013a990ccea12c492d22110489f0c4')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2383423
TX_COUNT_HEIGHT = 759076
TX_PER_BLOCK = 3
RPC_PORT = 5888
REORG_LIMIT = 1000
class Monacoin(Coin):
NAME = "Monacoin"
SHORTNAME = "MONA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("B0")
GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed'
'1bfc0b376eb54fd7afa42e0d418c8bb6')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2568580
TX_COUNT_HEIGHT = 1029766
TX_PER_BLOCK = 2
RPC_PORT = 9402
REORG_LIMIT = 1000
BLACKLIST_URL = 'https://electrum-mona.org/blacklist.json'
PEERS = [
'electrumx.tamami-foundation.org s t',
'electrumx2.tamami-foundation.org s t',
'electrumx3.tamami-foundation.org s t',
'electrumx2.monacoin.nl s t',
'electrumx3.monacoin.nl s t',
'electrumx1.monacoin.ninja s t',
'electrumx2.monacoin.ninja s t',
'electrumx2.movsign.info s t',
'electrum-mona.bitbank.cc s t',
'ri7rzlmdaf4eqbza.onion s t',
]
class MonacoinTestnet(Monacoin):
SHORTNAME = "XMN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d'
'638ba8258ae478158f449c321061e0b2')
TX_COUNT = 83602
TX_COUNT_HEIGHT = 83252
TX_PER_BLOCK = 1
RPC_PORT = 19402
REORG_LIMIT = 1000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrumx1.testnet.monacoin.ninja s t',
'electrumx1.testnet.monacoin.nl s t',
]
class Crown(AuxPowMixin, Coin):
NAME = "Crown"
SHORTNAME = "CRW"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2SH_VERBYTES = [bytes.fromhex("1c")]
GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686'
'14ff3df78c8d13cb814fd7e69a1dc6da')
TX_COUNT = 13336629
TX_COUNT_HEIGHT = 1268206
TX_PER_BLOCK = 10
RPC_PORT = 9341
REORG_LIMIT = 1000
PEERS = [
'sgp-crwseed.crowndns.info s t',
'blr-crwseed.crowndns.info s t',
'sfo-crwseed.crowndns.info s t',
'nyc-crwseed.crowndns.info s t',
'ams-crwseed.crowndns.info s t',
'tor-crwseed.crowndns.info s t',
'lon-crwseed.crowndns.info s t',
'fra-crwseed.crowndns.info s t',
]
class Fujicoin(Coin):
NAME = "Fujicoin"
SHORTNAME = "FJC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("a4")
GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e'
'a636f70856183086842667a1597714a0')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 170478
TX_COUNT_HEIGHT = 1521676
TX_PER_BLOCK = 1
RPC_PORT = 3776
REORG_LIMIT = 1000
class Neblio(ScryptMixin, Coin):
NAME = "Neblio"
SHORTNAME = "NEBL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("70")]
GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25'
'2e6557e222cab9be73181d359cd28bcc')
TX_COUNT = 23675
TX_COUNT_HEIGHT = 22785
TX_PER_BLOCK = 1
RPC_PORT = 6326
REORG_LIMIT = 1000
class Bitzeny(Coin):
NAME = "Bitzeny"
SHORTNAME = "ZNY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("51")
GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa'
'4acada9e4340d43ca738bf4e9fb8f5ce')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1408733
TX_COUNT_HEIGHT = 1015115
TX_PER_BLOCK = 1
RPC_PORT = 9252
REORG_LIMIT = 1000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import zny_yespower_0_5
return zny_yespower_0_5.getPoWHash(header)
class CanadaeCoin(AuxPowMixin, Coin):
NAME = "CanadaeCoin"
SHORTNAME = "CDN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1C")
WIF_BYTE = bytes.fromhex("9c")
GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44'
'cabdae9e0028058072181b3fb675d94a')
ESTIMATE_FEE = 0.0001
RELAY_FEE = 0.0001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 3455905
TX_COUNT_HEIGHT = 3645419
TX_PER_BLOCK = 1
RPC_PORT = 34330
REORG_LIMIT = 1000
class Denarius(Coin):
NAME = "Denarius"
SHORTNAME = "DNR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1E") # Address starts with a D
P2SH_VERBYTES = [bytes.fromhex("5A")]
WIF_BYTE = bytes.fromhex("9E") # WIF starts with a 6
GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32'
'64d641a5dbf0de89fd0182c2c4828fcd')
DESERIALIZER = lib_tx.DeserializerTxTime
TX_COUNT = 4230
RPC_PORT = 32339
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT_HEIGHT = 306187
TX_PER_BLOCK = 4000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import tribus_hash
return tribus_hash.getPoWHash(header)
class DenariusTestnet(Denarius):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("12")
P2SH_VERBYTES = [bytes.fromhex("74")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778'
'4b8ca2aa98bdd066278d590462a4fdb4')
RPC_PORT = 32338
REORG_LIMIT = 2000
class Sibcoin(Dash):
NAME = "Sibcoin"
SHORTNAME = "SIB"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("3F")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000c492bf73490420868bc577680bf'
'c4c60116e7e85343bc624787c21efa4c')
DAEMON = daemon.DashDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 1944
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for sibcoin.
Need to download `x11_gost_hash` module
Source code: https://github.com/ivansib/x11_gost_hash
'''
import x11_gost_hash
return x11_gost_hash.getPoWHash(header)
class Chips(Coin):
NAME = "Chips"
SHORTNAME = "CHIPS"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e'
'4d0c84951537a6f5a7c39a0a9d30e1e7')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 145290
TX_COUNT_HEIGHT = 318637
TX_PER_BLOCK = 2
RPC_PORT = 57776
REORG_LIMIT = 800
class Feathercoin(Coin):
NAME = "Feathercoin"
SHORTNAME = "FTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488BC26")
XPRV_VERBYTES = bytes.fromhex("0488DAEE")
P2PKH_VERBYTE = bytes.fromhex("0E")
WIF_BYTE = bytes.fromhex("8E")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 3170843
TX_COUNT_HEIGHT = 1981777
TX_PER_BLOCK = 2
RPC_PORT = 9337
REORG_LIMIT = 2000
PEERS = [
'electrumx-ch-1.feathercoin.ch s t',
]
class UFO(Coin):
NAME = "UniformFiscalObject"
SHORTNAME = "UFO"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("1B")
P2SH_VERBYTES = [bytes.fromhex("44")]
WIF_BYTE = bytes.fromhex("9B")
GENESIS_HASH = ('ba1d39b4928ab03d813d952daf65fb77'
'97fcf538a9c1b8274f4edc8557722d13')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1608926
TX_COUNT_HEIGHT = 1300154
TX_PER_BLOCK = 2
RPC_PORT = 9888
REORG_LIMIT = 2000
PEERS = [
'electrumx1.ufobject.com s t',
]
class Newyorkcoin(AuxPowMixin, Coin):
NAME = "Newyorkcoin"
SHORTNAME = "NYC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de'
'dfcb3c839fbc8e01ed4044540d08fe48')
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class NewyorkcoinTestnet(Newyorkcoin):
SHORTNAME = "tNYC"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('24463e4d3c625b0a9059f309044c2cf0'
'd7e196cf2a6ecce901f24f681be33c8f')
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class Bitcore(BitcoinMixin, Coin):
NAME = "Bitcore"
SHORTNAME = "BTX"
P2PKH_VERBYTE = bytes.fromhex("03")
P2SH_VERBYTES = [bytes.fromhex("7D")]
DESERIALIZER = lib_tx.DeserializerSegWit
GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e'
'6f69411d613f6448034508cea52e9574')
TX_COUNT = 126979
TX_COUNT_HEIGHT = 126946
TX_PER_BLOCK = 2
RPC_PORT = 8556
PEERS = [
'ele1.bitcore.cc s t',
'ele2.bitcore.cc s t',
'ele3.bitcore.cc s t',
'ele4.bitcore.cc s t'
]
class GameCredits(Coin):
NAME = "GameCredits"
SHORTNAME = "GAME"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("26")
WIF_BYTE = bytes.fromhex("a6")
GENESIS_HASH = ('91ec5f25ee9a0ffa1af7d4da4db9a552'
'228dd2dc77cdb15b738be4e1f55f30ee')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 316796
TX_COUNT_HEIGHT = 2040250
TX_PER_BLOCK = 2
RPC_PORT = 40001
REORG_LIMIT = 1000
class Machinecoin(Coin):
NAME = "Machinecoin"
SHORTNAME = "MAC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("26"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b2")
GENESIS_HASH = ('6a1f879bcea5471cbfdee1fd0cb2ddcc'
'4fed569a500e352d41de967703e83172')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 137641
TX_COUNT_HEIGHT = 513020
TX_PER_BLOCK = 2
RPC_PORT = 40332
REORG_LIMIT = 800
class BitcoinAtom(Coin):
NAME = "BitcoinAtom"
SHORTNAME = "BCA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("0a")]
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerBitcoinAtom
HEADER_SIZE_POST_FORK = 84
BLOCK_PROOF_OF_STAKE = 0x01
BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00'
TX_COUNT = 295158744
TX_COUNT_HEIGHT = 589197
TX_PER_BLOCK = 10
RPC_PORT = 9136
REORG_LIMIT = 5000
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE]
# New block header format has some extra flags in the end
if len(header) == cls.HEADER_SIZE_POST_FORK:
flags, = util.unpack_le_uint32_from(header, len(header) - 4)
# Proof of work blocks have special serialization
if flags & cls.BLOCK_PROOF_OF_STAKE != 0:
header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS
return double_sha256(header_to_be_hashed)
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class Decred(Coin):
NAME = "Decred"
SHORTNAME = "DCR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fda926")
XPRV_VERBYTES = bytes.fromhex("02fda4e8")
P2PKH_VERBYTE = bytes.fromhex("073f")
P2SH_VERBYTES = [bytes.fromhex("071a")]
WIF_BYTE = bytes.fromhex("22de")
GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe0'
'89edd4396b86d2de66b0cef42b21d980')
BASIC_HEADER_SIZE = 180
HEADER_HASH = lib_tx.DeserializerDecred.blake256
DESERIALIZER = lib_tx.DeserializerDecred
DAEMON = daemon.DecredDaemon
BLOCK_PROCESSOR = block_proc.DecredBlockProcessor
ENCODE_CHECK = partial(Base58.encode_check,
hash_fn=lib_tx.DeserializerDecred.blake256d)
DECODE_CHECK = partial(Base58.decode_check,
hash_fn=lib_tx.DeserializerDecred.blake256d)
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'stake_root',
'vote_bits', 'final_state', 'voters', 'fresh_stake',
'revocations', 'pool_size', 'bits', 'sbits',
'block_height', 'size', 'timestamp', 'nonce',
'extra_data', 'stake_version')
HEADER_UNPACK = struct.Struct(
'< i 32s 32s 32s H 6s H B B I I Q I I I I 32s I').unpack_from
TX_COUNT = 4629388
TX_COUNT_HEIGHT = 260628
TX_PER_BLOCK = 17
REORG_LIMIT = 1000
RPC_PORT = 9109
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class DecredTestnet(Decred):
SHORTNAME = "tDCR"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587d1")
XPRV_VERBYTES = bytes.fromhex("04358397")
P2PKH_VERBYTE = bytes.fromhex("0f21")
P2SH_VERBYTES = [bytes.fromhex("0efc")]
WIF_BYTE = bytes.fromhex("230e")
GENESIS_HASH = (
'a649dce53918caf422e9c711c858837e08d626ecfcd198969b24f7b634a49bac')
BASIC_HEADER_SIZE = 180
ALLOW_ADVANCING_ERRORS = True
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1800
REORG_LIMIT = 1000
RPC_PORT = 19109
class Axe(Dash):
NAME = "Axe"
SHORTNAME = "AXE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
GENESIS_HASH = ('00000c33631ca6f2f61368991ce2dc03'
'306b5bb50bf7cede5cfbba6db38e52e6')
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
DESERIALIZER = lib_tx_dash.DeserializerDash
TX_COUNT = 18405
TX_COUNT_HEIGHT = 30237
TX_PER_BLOCK = 1
RPC_PORT = 9337
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for AXE.
Need to download `axe_hash` module
Source code: https://github.com/AXErunners/axe_hash
'''
import x11_hash
return x11_hash.getPoWHash(header)
class Xuez(Coin):
NAME = "Xuez"
SHORTNAME = "XUEZ"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("022d2533")
XPRV_VERBYTES = bytes.fromhex("0221312b")
P2PKH_VERBYTE = bytes.fromhex("48")
P2SH_VERBYTES = [bytes.fromhex("12")]
WIF_BYTE = bytes.fromhex("d4")
GENESIS_HASH = ('000000e1febc39965b055e8e0117179a'
'4d18e24e7aaa0c69864c4054b4f29445')
TX_COUNT = 30000
TX_COUNT_HEIGHT = 15000
TX_PER_BLOCK = 1
RPC_PORT = 41799
REORG_LIMIT = 1000
BASIC_HEADER_SIZE = 112
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for Xuez.
Need to download `xevan_hash` module
Source code: https://github.com/xuez/xuez
'''
version, = util.unpack_le_uint32_from(header)
import xevan_hash
if version == 1:
return xevan_hash.getPoWHash(header[:80])
else:
return xevan_hash.getPoWHash(header)
# Source: https://github.com/odinblockchain/odin
class Odin(Coin):
NAME = "ODIN"
SHORTNAME = "ODIN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("27561872")
XPRV_VERBYTES = bytes.fromhex("27256746")
P2PKH_VERBYTE = bytes.fromhex("73")
P2SH_VERBYTES = [bytes.fromhex("39")]
WIF_BYTE = bytes.fromhex("8a")
GENESIS_HASH = ('31ca29566549e444cf227a0e2e067aed'
'847c2acc541d3bbf9ca1ae89f4fd57d7')
TX_COUNT = 340000
TX_COUNT_HEIGHT = 340000
TX_PER_BLOCK = 2
RPC_PORT = 22101
REORG_LIMIT = 100
BASIC_HEADER_SIZE = 80
HDR_V4_SIZE = 112
HDR_V4_HEIGHT = 143447
HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
if height >= cls.HDR_V4_HEIGHT:
relative_v4_offset = (height - cls.HDR_V4_HEIGHT) * cls.HDR_V4_SIZE
return cls.HDR_V4_START_OFFSET + relative_v4_offset
else:
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version >= 4:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class Pac(Coin):
NAME = "PAC"
SHORTNAME = "PAC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000354655ff039a51273fe61d3b493'
'bd2897fe6c16f732dbc4ae19f04b789e')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("CC")
TX_COUNT_HEIGHT = 14939
TX_COUNT = 23708
TX_PER_BLOCK = 2
RPC_PORT = 7111
PEERS = [
'electrum.paccoin.io s t',
'electro-pac.paccoin.io s t'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class PacTestnet(Pac):
SHORTNAME = "tPAC"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
GENESIS_HASH = ('00000da63bd9478b655ef6bf1bf76cd9'
'af05202ab68643f9091e049b2b5280ed')
P2PKH_VERBYTE = bytes.fromhex("78")
P2SH_VERBYTES = [bytes.fromhex("0E")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT_HEIGHT = 16275
TX_COUNT = 16275
TX_PER_BLOCK = 1
RPC_PORT = 17111
class Zcoin(Coin):
NAME = "Zcoin"
SHORTNAME = "XZC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("52")
P2SH_VERBYTES = [bytes.fromhex("07")]
WIF_BYTE = bytes.fromhex("d2")
GENESIS_HASH = ('4381deb85b1b2c9843c222944b616d99'
'7516dcbd6a964e1eaf0def0830695233')
TX_COUNT = 667154
TX_COUNT_HEIGHT = 100266
TX_PER_BLOCK = 4000 # 2000 for 1MB block
IRC_PREFIX = None
RPC_PORT = 8888
REORG_LIMIT = 5000
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
MTP_HEADER_EXTRA_SIZE = 100
MTP_HEADER_DATA_SIZE = 198864
MTP_HEADER_DATA_START = Coin.BASIC_HEADER_SIZE + MTP_HEADER_EXTRA_SIZE
MTP_HEADER_DATA_END = MTP_HEADER_DATA_START + MTP_HEADER_DATA_SIZE
STATIC_BLOCK_HEADERS = False
DAEMON = daemon.ZcoinMtpDaemon
DESERIALIZER = lib_tx.DeserializerZcoin
PEERS = [
'electrum.polispay.com'
]
@classmethod
def is_mtp(cls, header):
from electrumx.lib.util import unpack_le_uint32_from, hex_to_bytes
if isinstance(header, str):
nVersion, = unpack_le_uint32_from(hex_to_bytes(header[0:4*2]))
elif isinstance(header, bytes):
nVersion, = unpack_le_uint32_from(header[0:4])
else:
raise "Cannot handle the passed type"
return nVersion & 0x1000
@classmethod
def block_header(cls, block, height):
sz = cls.BASIC_HEADER_SIZE
if cls.is_mtp(block):
sz += cls.MTP_HEADER_EXTRA_SIZE
return block[:sz]
@classmethod
def header_hash(cls, header):
sz = cls.BASIC_HEADER_SIZE
if cls.is_mtp(header):
sz += cls.MTP_HEADER_EXTRA_SIZE
return double_sha256(header[:sz])
class ZcoinTestnet(Zcoin):
SHORTNAME = "tXZC"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("41")
P2SH_VERBYTES = [bytes.fromhex("b2")]
WIF_BYTE = bytes.fromhex("b9")
GENESIS_HASH = '1e3487fdb1a7d46dac3e8f3e58339c6e' \
'ff54abf6aef353485f3ed64250a35e89'
REORG_LIMIT = 8000
RPC_PORT = 18888
class GINCoin(Coin):
NAME = "GINCoin"
SHORTNAME = "GIN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000cd6bde619b2c3b23ad2e384328a'
'450a37fa28731debf748c3b17f91f97d')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("38")]
WIF_BYTE = bytes.fromhex("3c")
TX_COUNT_HEIGHT = 225000
TX_COUNT = 470784
TX_PER_BLOCK = 4
RPC_PORT = 10211
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
# Seems that the main lyra2z_hash python package doesn't works.
# Tested and working with: https://github.com/LapoLab/lyra2z-py
@classmethod
def header_hash(cls, header):
timestamp = util.unpack_le_uint32_from(header, 68)[0]
if timestamp > 1550246400:
import x16rt_hash
return x16rt_hash.getPoWHash(header)
elif timestamp > 1525651200:
import lyra2z_hash
return lyra2z_hash.getPoWHash(header)
import neoscrypt
return neoscrypt.getPoWHash(header)
class Polis(Coin):
NAME = "Polis"
SHORTNAME = "POLIS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("03E25D7E")
XPRV_VERBYTES = bytes.fromhex("03E25945")
GENESIS_HASH = ('000009701eb781a8113b1af1d814e2f0'
'60f6408a2c990db291bc5108a1345c1e')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("38")]
WIF_BYTE = bytes.fromhex("3c")
TX_COUNT_HEIGHT = 280600
TX_COUNT = 635415
TX_PER_BLOCK = 4
RPC_PORT = 24127
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class MNPCoin(Coin):
NAME = "MNPCoin"
SHORTNAME = "MNP"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000924036c67d803ce606ded814312'
'7e62fa2111dd3b063880a1067c69ccb1')
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("35")]
WIF_BYTE = bytes.fromhex("37")
TX_COUNT_HEIGHT = 248000
TX_COUNT = 506447
TX_PER_BLOCK = 4
RPC_PORT = 13373
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import quark_hash
return quark_hash.getPoWHash(header)
class ColossusXT(Coin):
NAME = "ColossusXT"
SHORTNAME = "COLX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('a0ce8206c908357008c1b9a8ba2813af'
'f0989ca7f72d62b14e652c55f02b4f5c')
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTES = [bytes.fromhex("0D")]
WIF_BYTE = bytes.fromhex("D4")
TX_COUNT_HEIGHT = 356500
BASIC_HEADER_SIZE = 80
HDR_V5_HEIGHT = 500000
HDR_V5_SIZE = 112
HDR_V5_START_OFFSET = HDR_V5_HEIGHT * BASIC_HEADER_SIZE
TX_COUNT = 761041
TX_PER_BLOCK = 4
RPC_PORT = 51473
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
if height >= cls.HDR_V5_HEIGHT:
relative_v4_offset = (height - cls.HDR_V5_HEIGHT) * cls.HDR_V5_SIZE
return cls.HDR_V5_START_OFFSET + relative_v4_offset
else:
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version >= 5:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class Minexcoin(EquihashMixin, Coin):
NAME = "Minexcoin"
SHORTNAME = "MNX"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("4b")
GENESIS_HASH = ('490a36d9451a55ed197e34aca7414b35'
'd775baa4a8e896f1c577f65ce2d214cb')
STATIC_BLOCK_HEADERS = True
BASIC_HEADER_SIZE = 209
HEADER_SIZE_NO_SOLUTION = 140
TX_COUNT = 327963
TX_COUNT_HEIGHT = 74495
TX_PER_BLOCK = 5
RPC_PORT = 8022
CHUNK_SIZE = 960
PEERS = [
'electrumx.xpresit.net s t',
'elex01-ams.turinex.eu s t',
'eu.minexpool.nl s t'
]
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.HEADER_SIZE_NO_SOLUTION)
class Groestlcoin(Coin):
NAME = "Groestlcoin"
SHORTNAME = "GRS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
GENESIS_HASH = ('00000ac5927c594d49cc0bdb81759d0d'
'a8297eb614683d3acb62f0703b639023')
DESERIALIZER = lib_tx.DeserializerGroestlcoin
TX_COUNT = 115900
TX_COUNT_HEIGHT = 1601528
TX_PER_BLOCK = 5
RPC_PORT = 1441
BLACKLIST_URL = 'https://groestlcoin.org/blacklist.json'
PEERS = [
'electrum1.groestlcoin.org s t',
'electrum2.groestlcoin.org s t',
'6brsrbiinpc32tfc.onion t',
'xkj42efxrcy6vbfw.onion t',
]
def grshash(data):
import groestlcoin_hash
return groestlcoin_hash.getHash(data, len(data))
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.grshash(header)
ENCODE_CHECK = partial(Base58.encode_check, hash_fn=grshash)
DECODE_CHECK = partial(Base58.decode_check, hash_fn=grshash)
class GroestlcoinTestnet(Groestlcoin):
SHORTNAME = "TGRS"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000ffbb50fc9898cdd36ec163e6ba'
'23230164c0052a28876255b7dcf2cd36')
RPC_PORT = 17766
PEERS = [
'electrum-test1.groestlcoin.org s t',
'electrum-test2.groestlcoin.org s t',
'7frvhgofuf522b5i.onion t',
'aocojvqcybdoxekv.onion t',
]
class Pivx(Coin):
NAME = "Pivx"
SHORTNAME = "PIVX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("022D2533")
XPRV_VERBYTES = bytes.fromhex("0221312B")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("d4")
GENESIS_HASH = ('0000041e482b9b9691d98eefb4847340'
'5c0b8ec31b76df3797c74a78680ef818')
BASIC_HEADER_SIZE = 80
HDR_V4_SIZE = 112
HDR_V4_HEIGHT = 863787
HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE
TX_COUNT = 2930206
TX_COUNT_HEIGHT = 1299212
TX_PER_BLOCK = 2
RPC_PORT = 51473
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
if height >= cls.HDR_V4_HEIGHT:
relative_v4_offset = (height - cls.HDR_V4_HEIGHT) * cls.HDR_V4_SIZE
return cls.HDR_V4_START_OFFSET + relative_v4_offset
else:
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version >= 4:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class PivxTestnet(Pivx):
SHORTNAME = "tPIVX"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a8061a0")
XPRV_VERBYTES = bytes.fromhex("3a805837")
P2PKH_VERBYTE = bytes.fromhex("8B")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = (
'0000041e482b9b9691d98eefb48473405c0b8ec31b76df3797c74a78680ef818')
BASIC_HEADER_SIZE = 80
HDR_V4_SIZE = 112
HDR_V4_HEIGHT = 863787
HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE
TX_COUNT = 2157510
TX_COUNT_HEIGHT = 569399
TX_PER_BLOCK = 2
RPC_PORT = 51472
class MonetaryUnit(Coin):
NAME = "MonetaryUnit"
SHORTNAME = "MUE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("022d2533")
XPRV_VERBYTES = bytes.fromhex("0221312b")
P2PKH_VERBYTE = bytes.fromhex("10")
P2SH_VERBYTES = [bytes.fromhex("4c")]
WIF_BYTE = bytes.fromhex("7e")
GENESIS_HASH = (
'0b58ed450b3819ca54ab0054c4d220ca4f887d21c9e55d2a333173adf76d987f')
DAEMON = daemon.MonetaryUnitDaemon
TX_COUNT = 569399
TX_COUNT_HEIGHT = 2157510
TX_PER_BLOCK = 2
RPC_PORT = 29947
REORG_LIMIT = 1000
SESSIONCLS = MonetaryUnitElectrumX
PEERS = []
class EXOS(Coin):
NAME = "EXOS"
SHORTNAME = "EXOS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
GENESIS_HASH = ('00000036090a68c523471da7a4f0f958'
'c1b4403fef74a003be7f71877699cab7')
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTE = [bytes.fromhex("57")]
WIF_BYTE = bytes.fromhex("9C")
RPC_PORT = 4561
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 4
DAEMON = daemon.PreLegacyRPCDaemon
DESERIALIZER = lib_tx.DeserializerTxTime
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version > 2:
return double_sha256(header)
else:
return hex_str_to_hash(EXOS.GENESIS_HASH)
class EXOSTestnet(EXOS):
SHORTNAME = "tEXOS"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
GENESIS_HASH = ('0000059bb2c2048493efcb0f1a034972'
'b3ce4089d54c93b69aaab212fb369887')
P2PKH_VERBYTE = bytes.fromhex("4B")
P2SH_VERBYTE = [bytes.fromhex("CE")]
WIF_BYTE = bytes.fromhex("CB")
RPC_PORT = 14561
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version > 2:
return double_sha256(header)
else:
return hex_str_to_hash(EXOSTestnet.GENESIS_HASH)
class SmartCash(Coin):
NAME = "SmartCash"
SHORTNAME = "SMART"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3f")
P2SH_VERBYTES = [bytes.fromhex("12")]
WIF_BYTE = bytes.fromhex("bf")
GENESIS_HASH = ('000007acc6970b812948d14ea5a0a13d'
'b0fdd07d5047c7e69101fa8b361e05a4')
DESERIALIZER = lib_tx.DeserializerSmartCash
RPC_PORT = 9679
REORG_LIMIT = 5000
TX_COUNT = 1115016
TX_COUNT_HEIGHT = 541656
TX_PER_BLOCK = 1
ENCODE_CHECK = partial(Base58.encode_check,
hash_fn=lib_tx.DeserializerSmartCash.keccak)
DECODE_CHECK = partial(Base58.decode_check,
hash_fn=lib_tx.DeserializerSmartCash.keccak)
HEADER_HASH = lib_tx.DeserializerSmartCash.keccak
DAEMON = daemon.SmartCashDaemon
SESSIONCLS = SmartCashElectrumX
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
class NIX(Coin):
NAME = "NIX"
SHORTNAME = "NIX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("35")]
GENESIS_HASH = ('dd28ad86def767c3cfc34267a950d871'
'fc7462bc57ea4a929fc3596d9b598e41')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 114240
TX_COUNT_HEIGHT = 87846
TX_PER_BLOCK = 3
RPC_PORT = 6215
REORG_LIMIT = 1000
class NIXTestnet(NIX):
SHORTNAME = "tNIX"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
GENESIS_HASH = ('dd28ad86def767c3cfc34267a950d871'
'fc7462bc57ea4a929fc3596d9b598e41')
P2PKH_VERBYTE = bytes.fromhex("01")
P2SH_VERBYTE = [bytes.fromhex("03")]
RPC_PORT = 16215
DESERIALIZER = lib_tx.DeserializerSegWit
class Noir(Coin):
NAME = "Noir"
SHORTNAME = "NOR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2SH_VERBYTES = [bytes.fromhex("07")]
WIF_BYTE = bytes.fromhex("D0")
GENESIS_HASH = ('23911212a525e3d149fcad6c559c8b17'
'f1e8326a272a75ff9bb315c8d96433ef')
RPC_PORT = 8825
TX_COUNT = 586369
TX_COUNT_HEIGHT = 379290
TX_PER_BLOCK = 5
class BitcoinPlus(Coin):
NAME = "BitcoinPlus"
SHORTNAME = "XBC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000005f6a28e686f641c616e56182d1'
'b43afbe08a223f23bda23cdf9d55b882')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1479247
TX_COUNT_HEIGHT = 749740
TX_PER_BLOCK = 2
RPC_PORT = 8885
REORG_LIMIT = 2000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x13_hash
return x13_hash.getPoWHash(header)
class Myriadcoin(AuxPowMixin, Coin):
NAME = "Myriadcoin"
SHORTNAME = "XMY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("09")]
WIF_BYTE = bytes.fromhex("b2")
GENESIS_HASH = ('00000ffde4c020b5938441a0ea3d314b'
'f619eff0b38f32f78f7583cffa1ea485')
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
TX_COUNT = 1976629
TX_COUNT_HEIGHT = 2580356
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
RPC_PORT = 10889
class MyriadcoinTestnet(Myriadcoin):
NAME = "Myriadcoin"
SHORTNAME = "XMT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("58")
P2SH_VERBYTES = [bytes.fromhex("bc")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('0000017ce2a79c8bddafbbe47c004aa9'
'2b20678c354b34085f62b762084b9788')
class Sparks(Coin):
NAME = "Sparks"
SHORTNAME = "SPK"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000a5c6ddfaac5097218560d5b92d4'
'16931cfeba1abf10c81d1d6a232fc8ea')
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("C6")
TX_COUNT_HEIGHT = 117400
TX_COUNT = 162310
TX_PER_BLOCK = 4
RPC_PORT = 8818
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
import neoscrypt
return neoscrypt.getPoWHash(header)
# Source: https://github.com/LIMXTEC/BitSend
class Bitsend(Coin):
NAME = "Bitsend"
SHORTNAME = "BSD"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("66")
WIF_BYTE = bytes.fromhex("cc")
GENESIS_HASH = ('0000012e1b8843ac9ce8c18603658eaf'
'8895f99d3f5e7e1b7b1686f35e3c087a')
TX_COUNT = 974672
TX_COUNT_HEIGHT = 586022
TX_PER_BLOCK = 2
RPC_PORT = 8800
REORG_LIMIT = 1000
DESERIALIZER = lib_tx.DeserializerSegWit
XEVAN_TIMESTAMP = 1477958400
PEERS = [
'ele1.bitsend.cc s t',
'51.15.121.233 s t'
]
@classmethod
def header_hash(cls, header):
timestamp, = util.unpack_le_uint32_from(header, 68)
if timestamp > cls.XEVAN_TIMESTAMP:
import xevan_hash
return xevan_hash.getPoWHash(header)
else:
import x11_hash
return x11_hash.getPoWHash(header)
@classmethod
def genesis_block(cls, block):
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
class Ritocoin(Coin):
NAME = "Ritocoin"
SHORTNAME = "RITO"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0534E7CA")
XPRV_VERBYTES = bytes.fromhex("05347EAC")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("69")]
GENESIS_HASH = ('00000075e344bdf1c0e433f453764b18'
'30a7aa19b2a5213e707502a22b779c1b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1188090
TX_COUNT_HEIGHT = 296030
TX_PER_BLOCK = 3
RPC_PORT = 8766
REORG_LIMIT = 55
PEERS = [
'electrum-rito.minermore.com s t'
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x21s_hash
return x21s_hash.getPoWHash(header)
class Ravencoin(Coin):
NAME = "Ravencoin"
SHORTNAME = "RVN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("7A")]
GENESIS_HASH = ('0000006b444bc2f2ffe627be9d9e7e7a'
'0730000870ef6eb6da46c8eae389df90')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 3911020
TX_COUNT_HEIGHT = 602000
TX_PER_BLOCK = 4
RPC_PORT = 8766
REORG_LIMIT = 55
PEERS = [
'rvn.satoshi.org.uk s t',
'electrum-rvn.minermore.com s t',
'153.126.197.243 s t'
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x16r_hash
return x16r_hash.getPoWHash(header)
class RavencoinTestnet(Ravencoin):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('000000ecfc5e6324a079542221d00e10'
'362bdc894d56500c414060eea8a3ad5a')
TX_COUNT = 108085
TX_COUNT_HEIGHT = 60590
TX_PER_BLOCK = 4
RPC_PORT = 18766
PEER_DEFAULT_PORTS = {'t': '50003', 's': '50004'}
REORG_LIMIT = 55
PEERS = [
'rvn.satoshi.org.uk s t'
]
class Bolivarcoin(Coin):
NAME = "Bolivarcoin"
SHORTNAME = "BOLI"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("55")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("D5")
GENESIS_HASH = ('00000e4fc293a1912b9d73cbb8d8f727'
'0007a7d84382f1370661e65d5d57b1f6')
TX_COUNT = 1082515
TX_COUNT_HEIGHT = 540410
TX_PER_BLOCK = 10
RPC_PORT = 3563
REORG_LIMIT = 800
PEERS = []
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class Onixcoin(Coin):
NAME = "Onixcoin"
SHORTNAME = "ONX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("4B")
GENESIS_HASH = ('000007140b7a6ca0b64965824f5731f6'
'e86daadf19eb299033530b1e61236e43')
TX_COUNT = 431808
TX_COUNT_HEIGHT = 321132
TX_PER_BLOCK = 10
RPC_PORT = 41019
REORG_LIMIT = 800
PEERS = []
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class Electra(Coin):
NAME = "Electra"
SHORTNAME = "ECA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("A1")
GENESIS_HASH = ('00000f98da995de0ef1665c7d3338687'
'923c1199230a44ecbdb5cec9306e4f4e')
RPC_PORT = 5788
TX_COUNT = 615729
TX_COUNT_HEIGHT = 205243
TX_PER_BLOCK = 3
REORG_LIMIT = 100
DESERIALIZER = lib_tx.DeserializerElectra
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
version, = util.unpack_le_uint32_from(header)
import nist5_hash
if version != 8:
return nist5_hash.getPoWHash(header)
else:
return double_sha256(header)
class ECCoin(Coin):
NAME = "ECCoin"
SHORTNAME = "ECC"
NET = "mainnet"
DESERIALIZER = lib_tx.DeserializerECCoin
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("08")]
GENESIS_HASH = ('a60ac43c88dbc44b826cf315352a8a7b373d2af8b6e1c4c4a0638859c5e9ecd1')
TX_COUNT = 4661197
TX_COUNT_HEIGHT = 2114846
TX_PER_BLOCK = 10
VALUE_PER_COIN = 1000000
RPC_PORT = 19119
@classmethod
def header_hash(cls, header):
# you have to install scryp python module (pip install scrypt)
import scrypt
return scrypt.hash(header, header, 1024, 1, 1, 32)
class Bellcoin(Coin):
NAME = "Bellcoin"
SHORTNAME = "BELL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000008f3b6bd10c2d03b06674a006b8d'
'9731f6cb58179ef1eee008cee2209603')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 264129
TX_COUNT_HEIGHT = 219574
TX_PER_BLOCK = 5
RPC_PORT = 25252
REORG_LIMIT = 1000
PEERS = [
'bell.electrumx.japanesecoin-pool.work s t',
'bell.streetcrypto7.com s t',
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import bell_yespower
return bell_yespower.getPoWHash(header)
| [
"sotblad@monetaryunit.org"
] | sotblad@monetaryunit.org |
35ee20b58a930dc5be88bf349a6be48e6d7f59ea | 986769755b642932cbbf4b3f1022ff04980014cd | /WhatManager2/urls.py | 911fedfad0903f35b437e5a36f7179c3b98c8ad7 | [
"MIT"
] | permissive | jimrollenhagen/WhatManager2 | 47d4bba79dc9c80a1f91576b334235f419da3160 | 832b04396f23c19764557b7ccb6d563035d424ec | refs/heads/master | 2020-12-28T22:46:02.411169 | 2014-09-02T02:53:44 | 2014-09-02T02:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('home.urls')),
url(r'^json/', include('what_json.urls')),
url(r'^download/', include('download.urls')),
url(r'^user/', include('login.urls')),
url(r'^queue/', include('queue.urls')),
url(r'^profile/', include('what_profile.urls')),
url(r'^player/', include('player.urls')),
url(r'^allmusic/', include('allmusic.urls')),
url(r'^torrent_list/', include('torrent_list.urls')),
url(r'^transcode/', include('what_transcode.urls')),
url(r'^books/', include('books.urls')),
url(r'^books/', include('bibliotik.urls')),
url(r'^books/bibliotik/json/', include('bibliotik_json.urls')),
url(r'^userscript/', include('userscript.urls')),
)
| [
"ivailo@karamanolev.com"
] | ivailo@karamanolev.com |
7564e367b36c449b7e12686ed0ab34031b201f69 | 2b96be128373ddd61945bf8b34af832844867b20 | /errormsg.py | 5c4ab463a62c612c60074f07580c20d63b351711 | [] | no_license | novaliu86/apparatus3-seq | 330f7039ec1404cc3badfbd88a3f0a0e396e8421 | 3f7bae71e9844444f2b354fc3c5f5455ca67b2e4 | refs/heads/master | 2021-01-17T22:30:06.239255 | 2013-10-04T23:33:29 | 2013-10-04T23:33:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py |
from Tkinter import *
import tkMessageBox
def box(title,msg):
window = Tk()
window.wm_withdraw()
tkMessageBox.showerror(title=title,message=msg, parent=window)
if __name__ == "__main__":
box("error","Error Mesagge")
##window = Tk()
##window.wm_withdraw()
#print window.geometry()
#message at x:0,y:0
#window.geometry("500x500+100+100")#remember its .geometry("WidthxHeight(+or-)X(+or-)Y")
##tkMessageBox.showerror(title="error",message="Error Message",parent=window)
#centre screen message
#window.geometry("1x1+"+str(window.winfo_screenwidth()/2)+"+"+str(window.winfo_screenheight()/2))
#print window.geometry()
#tkMessageBox.showinfo(title="Greetings", message="Hello World!")
| [
"pmd323@gmail.com"
] | pmd323@gmail.com |
ef4c4c1f3068800382a2dbdbacfe2b987fe76892 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/505.py | 22a5a74feb5b7fe6de6eacca91094c15de6f4929 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # ___ ______________________________________________ ___ #
#|_/*| |*\_|#
#|_/*| Google Code Jam - "Hello World" |*\_|#
#|_/*| 10.04.2015 - the end |*\_|#
#|_/*| Qualification |*\_|#
#|_/*|______________________________________________|*\_|#
#| |#
#| Denis Werner - denis@nobbd.de |#
#|______________________________________________________|#
# #
import math
filename = "B-large.in"
filename = "B-small.in"
filename = "B-small-attempt6.in"
lines = ()
with open(filename) as file:
lines = file.read().splitlines()
number_of_sets = int(lines[0])
with open(filename+".solved","w") as outputfile:
for i in range(0,number_of_sets):
eaters = int(lines[i*2+1])
plates = map(int,lines[i*2+2].split(" "))
solved = False
rounds = 0
max_init = max(plates)
print " "
print "###### ROUND "+str(i+1)+" ########"
while not solved:
print plates
#get max pancakes
c = max(plates)
# create log-list for current plates list
log_list = [0]*(c+1)
for li in range(0,c+1):
log_list[li] = li
for base in range(2,c):
#print "base:"+str(base)
for pi in range(0,len(plates)):
current_p = plates[pi]
#print "pi: "+str(current_p)
if current_p > base:
#cur_log = int(math.log(current_p,base))
cur_log = float(current_p)/base
#print "log: "+str(cur_log)
log_list[base] += max(1,cur_log-1)
print "log list: " + str(log_list)
log_list = log_list[2:]
if log_list:
new_best = len(log_list) - log_list[::-1].index(min(log_list)) + 1
#best_split = log_list.index(min(log_list)) + 2
best_split = new_best
print "Best split till: " + str(best_split)
#print "new: " + str(new_best)
print "max: "+str(c)
print c==best_split
if c == best_split:
solved = True
rounds += best_split
else:
for pi in range(0,len(plates)):
p = plates[pi]
if p > best_split:
print str(p)+" splitted."
rounds += 1
plates.append(best_split)
plates[pi] -= best_split
else:
rounds += c
solved = True
print "Solved no log list"
if rounds > max_init:
print "rounds bigger then max"
rounds = max_init
# number of plates with > p/2 pancakes < p/2 then split
line = "Case #"+str(i+1)+": "+str(int(rounds))
print line
outputfile.write(line+"\n")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c84bb7cc50aed8f41a750faecaf4c15d1962f258 | 554090526e41ab16f927e5fcb772e119923d6080 | /three_charts.py | f43d1dbcd5b0ae57c779948590b52519a681fae4 | [] | no_license | s2t2/charts-gallery-py | f95a151995b9b72ccc1e1823a6d3689a21d5489a | 64abd69be3cd4fcbd7732eb1f4e0ac4f1715bae7 | refs/heads/master | 2020-04-21T05:58:24.546261 | 2019-02-06T21:20:08 | 2019-02-06T21:20:08 | 169,354,429 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | # three_charts.py
#
# CHART 1 (PIE)
#
pie_data = [
{"company": "Company X", "market_share": 0.55},
{"company": "Company Y", "market_share": 0.30},
{"company": "Company Z", "market_share": 0.15}
]
print("----------------")
print("GENERATING PIE CHART...")
print(pie_data) # TODO: create a pie chart based on the pie_data
#
# CHART 2 (LINE)
#
line_data = [
{"date": "2019-01-01", "stock_price_usd": 100.00},
{"date": "2019-01-02", "stock_price_usd": 101.01},
{"date": "2019-01-03", "stock_price_usd": 120.20},
{"date": "2019-01-04", "stock_price_usd": 107.07},
{"date": "2019-01-05", "stock_price_usd": 142.42},
{"date": "2019-01-06", "stock_price_usd": 135.35},
{"date": "2019-01-07", "stock_price_usd": 160.60},
{"date": "2019-01-08", "stock_price_usd": 162.62},
]
print("----------------")
print("GENERATING LINE GRAPH...")
print(line_data) # TODO: create a line graph based on the line_data
#
# CHART 3 (HORIZONTAL BAR)
#
bar_data = [
{"genre": "Thriller", "viewers": 123456},
{"genre": "Mystery", "viewers": 234567},
{"genre": "Sci-Fi", "viewers": 987654},
{"genre": "Fantasy", "viewers": 876543},
{"genre": "Documentary", "viewers": 283105},
{"genre": "Action", "viewers": 544099},
{"genre": "Romantic Comedy", "viewers": 121212}
]
print("----------------")
print("GENERATING BAR CHART...")
print(bar_data) # TODO: create a horizontal bar chart based on the bar_data
| [
"s2t2@users.noreply.github.com"
] | s2t2@users.noreply.github.com |
b194d0f0623ac12adb20f646bd9719a895c6fa03 | 4292312541c9f13cb501be3ade936ff156e80fbe | /proso_models/api_test.py | d4f16107e4ddf2a2129437b51615a2a0a0822049 | [
"MIT"
] | permissive | thanhtd91/proso-apps | 995ce5a327a93a6c77ebbe21297c3c18bde92711 | 58c95ebb4da1207de8972237c383489575ce2a20 | refs/heads/master | 2022-09-23T05:03:51.276549 | 2020-03-19T22:02:12 | 2020-03-19T22:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | from django.conf import settings
from django.core.management import call_command
from proso.django.test import TestCase
from proso_models.models import Item
from proso_flashcards.models import Term, Flashcard, Category, Context
import json
class PracticeAPITest(TestCase):
fixtures = [
'test_common_data.yaml',
'test_models_data.yaml',
'test_flashcards_data.yaml',
'test_testapp_data.yaml'
]
def setUp(self):
self._categories = dict([((c.identifier, c.lang), c) for c in Category.objects.all()])
self._contexts = dict([((c.identifier, c.lang), c) for c in Context.objects.all()])
self._terms = dict([((t.identifier, t.lang), t) for t in Term.objects.all()])
self._flashcards = dict([((f.identifier, f.lang), f) for f in Flashcard.objects.select_related('term', 'context').all()])
call_command('find_item_types')
call_command('fill_item_types')
def test_language(self):
for lang in [None, 'cs', 'en']:
if lang is not None:
content = self._get_practice(language=lang)
else:
content = self._get_practice()
lang = settings.LANGUAGE_CODE[:2]
for question in content['data']:
flashcard = question['payload']
self.assertEqual(flashcard['lang'], lang, 'The flashcard has an expected language.')
self.assertEqual(flashcard['term']['lang'], lang, 'The term has an expected language.')
for option in flashcard.get('options', []):
self.assertEqual(option['lang'], lang, 'The option flashcard has an expected language.')
self.assertEqual(option['term']['lang'], lang, 'The option term has an expected language.')
def test_limit(self):
for limit in [1, 5, 10]:
content = self._get_practice(language='en', limit=limit)
self.assertEqual(len(content['data']), limit, 'There is proper number of questions.')
def test_categories(self):
for category_name, term_type_name in [('world', 'state'), ('cz', 'city'), ('africa', 'state')]:
practice_filter = '[["category/{}"], ["category/{}"]]'.format(term_type_name, category_name)
content = self._get_practice(language='en', filter=practice_filter, limit=100)
for question in content['data']:
term = self._terms[question['payload']['term']['identifier'], 'en']
term_categories = Item.objects.get_parents_graph([term.item_id])[term.item_id]
category = self._categories[category_name, 'en']
term_type = self._categories[term_type_name, 'en']
self.assertTrue({term_type.item_id, category.item_id}.issubset(term_categories), "The term has expected categories.")
def test_avoid(self):
avoid = list(map(lambda f: f.item_id, [f for f in list(self._flashcards.values()) if f.lang == 'en']))[:-10]
content = self._get_practice(language='en', avoid=json.dumps(avoid), limit=10)
found = [q['payload']['item_id'] for q in content['data']]
self.assertEqual(set(found) & set(avoid), set(), "There is no flashcard with avoided id.")
def _get_practice(self, **kwargs):
kwargs_str = '&'.join(['%s=%s' % (key_val[0], key_val[1]) for key_val in list(kwargs.items())])
url = '/models/practice/?%s' % kwargs_str
response = self.client.get(url)
self.assertEqual(response.status_code, 200, 'The status code is OK, url: %s' % url)
content = json.loads(response.content.decode("utf-8"))
self.assertGreater(len(content['data']), 0, 'There is at least one question, url: %s' % url)
return content
| [
"jan.papousek@gmail.com"
] | jan.papousek@gmail.com |
e398a0c0124acbf11bc286459c836a7fd9b5c0dd | 62039bcca548f2b974d13e7ef17d78ba39cf8010 | /tests/test_ncbi_entrez_annotations.py | 692eeea55b1f89ca6b915c5827721165681c9d79 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | camiloaruiz/goatools | 8d48ef34d13df15fcc738aba2dcbe67032cdf5e3 | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | refs/heads/master | 2020-03-20T07:40:22.899604 | 2018-08-07T19:59:34 | 2018-08-07T19:59:34 | 137,287,908 | 0 | 0 | BSD-2-Clause | 2018-06-14T00:56:11 | 2018-06-14T00:56:10 | null | UTF-8 | Python | false | false | 3,653 | py | """Tests downloading and reading of the GO annotation file from NCBI Gene.
python test_NCBI_Entrez_annotations.py
"""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import sys
from collections import defaultdict
from goatools.associations import get_assoc_ncbi_taxids
from goatools.test_data.genes_NCBI_9606_ProteinCoding import GeneID2nt as GeneID2nt_hsa
from goatools.test_data.genes_NCBI_7227_ProteinCoding import GeneID2nt as GeneID2nt_dme
def test_ncbi_gene2go(log=sys.stdout):
"""Return GO associations to Entrez GeneIDs. Download if necessary.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
49672 items found in gene2go from NCBI's ftp server
taxid GOs GeneIDs Description
----- ------ ------- -----------
10090 16,807 18,971 all DNA items
7227 7,022 12,019 all DNA items
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,299 18,680 all DNA items
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# Get associations for human(9606), mouse(10090), and fly(7227)
# (optional) multi-level dictionary separate associations by taxid
taxid2asscs = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
# Simple dictionary containing id2gos
taxids = [9606, 10090, 7227]
id2gos = get_assoc_ncbi_taxids(taxids, taxid2asscs=taxid2asscs, loading_bar=None)
log.write(" {N} items found in gene2go from NCBI's ftp server\n".format(N=len(id2gos)))
taxid2pc = {9606:GeneID2nt_hsa, 7227:GeneID2nt_dme}
# Report findings
log.write(" taxid GOs GeneIDs Description\n")
log.write(" ----- ------ ------- -----------\n")
for taxid, asscs in taxid2asscs.items():
num_gene2gos_all = len(asscs['GeneID2GOs'])
num_go2genes_all = len(asscs['GO2GeneIDs'])
log.write(" {TAXID:>6} {N:>6,} {M:>7,} all DNA items\n".format(
TAXID=taxid, N=num_go2genes_all, M=num_gene2gos_all))
# Basic check to ensure gene2go was downloaded and data was returned.
assert num_gene2gos_all > 11000
assert num_go2genes_all > 6000
if taxid in taxid2pc.keys():
rpt_coverage(taxid, asscs, taxid2pc[taxid], log)
def rpt_coverage(taxid, asscs, pc2nt, log):
"""Calculate and report GO coverage on protein-coding genes.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
taxid GOs GeneIDs Description
----- ------ ------- -----------
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# List of all protein-coding genes have GO terms associated with them
geneid2gos = asscs['GeneID2GOs']
pcgene_w_gos = set(geneid2gos.keys()).intersection(set(pc2nt.keys()))
num_pcgene_w_gos = len(pcgene_w_gos)
num_pc_genes = len(pc2nt)
perc_cov = 100.0*num_pcgene_w_gos/num_pc_genes
# Get list of GOs associated with protein-coding genes
gos_pcgenes = set()
for geneid in pcgene_w_gos:
gos_pcgenes |= geneid2gos[geneid]
txt = " {TAXID:>6} {N:>6,} {M:>7,} {COV:2.0f}% GO coverage of {TOT:,} protein-coding genes\n"
log.write(txt.format(
TAXID=taxid, N=len(gos_pcgenes), M=num_pcgene_w_gos, COV=perc_cov, TOT=num_pc_genes))
if __name__ == '__main__':
test_ncbi_gene2go()
| [
"dvklopfenstein@users.noreply.github.com"
] | dvklopfenstein@users.noreply.github.com |
a9b1343d26aab1a47f18de34c95953d66bfe7238 | 50e2012ecea8307e278d1132ca0094adb940aff2 | /lib/review/my_sort/xuan_ze_sort.py | 2d147534f7a757a5ad6e0191d111ab11785b0a36 | [] | no_license | Lewescaiyong/my_library | 6689cae2db4aaa980b4bd5ed9f21691eefbff2fe | 35d0d29097823ccef74fa29ca8756a7f59ceeb78 | refs/heads/master | 2020-11-25T09:20:56.484275 | 2019-12-17T10:58:20 | 2019-12-17T10:58:20 | 228,593,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
a_list = range(30)
random.shuffle(a_list)
print a_list
# 使用选择排序对列表进行排序
for i in range(len(a_list) - 1):
for j in range(i + 1, len(a_list)):
if a_list[i] > a_list[j]:
a_list[i], a_list[j] = a_list[j], a_list[i]
print a_list
| [
"1351153527@qq.com"
] | 1351153527@qq.com |
d14de9dd1f4ecedfcd933de9d92811fb01d16fe3 | 35fd40fbc4cfa46272c4031b9ca0cb88572e3fa4 | /xmonitor/common/scripts/utils.py | 2d365bad42cc5429e98be796e44ca4910938b1aa | [
"Apache-2.0"
] | permissive | froyobin/xmonitor | 3d662541387226a4ff1c18ef450fdc77a769d0b8 | 092dcaa01f834353ffd8dd3c40edf9e97543bfe8 | refs/heads/master | 2020-12-23T22:33:15.758127 | 2016-06-30T06:18:05 | 2016-06-30T06:18:05 | 62,284,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'get_task',
'unpack_task_input',
'set_base_image_properties',
'validate_location_uri',
'get_image_data_iter',
]
from oslo_log import log as logging
from six.moves import urllib
from xmonitor.common import exception
from xmonitor.i18n import _, _LE
LOG = logging.getLogger(__name__)
def get_task(task_repo, task_id):
"""Gets a TaskProxy object.
:param task_repo: TaskRepo object used to perform DB operations
:param task_id: ID of the Task
"""
task = None
try:
task = task_repo.get(task_id)
except exception.NotFound:
msg = _LE('Task not found for task_id %s') % task_id
LOG.exception(msg)
return task
def unpack_task_input(task):
"""Verifies and returns valid task input dictionary.
:param task: Task domain object
"""
task_input = task.task_input
# NOTE: until we support multiple task types, we just check for
# input fields related to 'import task'.
for key in ["import_from", "import_from_format", "image_properties"]:
if key not in task_input:
msg = _("Input does not contain '%(key)s' field") % {"key": key}
raise exception.Invalid(msg)
return task_input
def set_base_image_properties(properties=None):
"""Sets optional base properties for creating Image.
:param properties: Input dict to set some base properties
"""
if isinstance(properties, dict) and len(properties) == 0:
# TODO(nikhil): We can make these properties configurable while
# implementing the pipeline logic for the scripts. The below shown
# are placeholders to show that the scripts work on 'devstack'
# environment.
properties['disk_format'] = 'qcow2'
properties['container_format'] = 'bare'
def validate_location_uri(location):
"""Validate location uri into acceptable format.
:param location: Location uri to be validated
"""
if not location:
raise exception.BadStoreUri(_('Invalid location: %s') % location)
elif location.startswith(('http://', 'https://')):
return location
# NOTE: file type uri is being avoided for security reasons,
# see LP bug #942118 #1400966.
elif location.startswith(("file:///", "filesystem:///")):
msg = _("File based imports are not allowed. Please use a non-local "
"source of image data.")
# NOTE: raise BadStoreUri and let the encompassing block save the error
# msg in the task.message.
raise exception.BadStoreUri(msg)
else:
# TODO(nikhil): add other supported uris
supported = ['http', ]
msg = _("The given uri is not valid. Please specify a "
"valid uri from the following list of supported uri "
"%(supported)s") % {'supported': supported}
raise urllib.error.URLError(msg)
def get_image_data_iter(uri):
"""Returns iterable object either for local file or uri
:param uri: uri (remote or local) to the datasource we want to iterate
Validation/sanitization of the uri is expected to happen before we get
here.
"""
# NOTE(flaper87): This is safe because the input uri is already
# verified before the task is created.
if uri.startswith("file://"):
uri = uri.split("file://")[-1]
# NOTE(flaper87): The caller of this function expects to have
# an iterable object. FileObjects in python are iterable, therefore
# we are returning it as is.
# The file descriptor will be eventually cleaned up by the garbage
# collector once its ref-count is dropped to 0. That is, when there
# wont be any references pointing to this file.
#
# We're not using StringIO or other tools to avoid reading everything
# into memory. Some images may be quite heavy.
return open(uri, "r")
return urllib.request.urlopen(uri)
| [
"froyo.bin@gmail.com"
] | froyo.bin@gmail.com |
659ce6eedcc37d786b1fbb227a329de04172d815 | 7981914523b28c54576ce548ec4c326314a997cf | /setup.py | 39858b2cb46006739ec3dd881ee76b1df57430a3 | [] | no_license | geyang/tf_logger | 83168499c0acb45890ef68ffce2528848e574ab2 | e68f7d19e014d2d7878513c276802db4aa37d8d2 | refs/heads/master | 2021-09-12T13:13:25.920198 | 2018-04-17T04:35:40 | 2018-04-17T04:35:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | from os import path
from setuptools import setup
with open(path.join(path.abspath(path.dirname(__file__)), 'README'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(path.abspath(path.dirname(__file__)), 'VERSION'), encoding='utf-8') as f:
version = f.read()
setup(name="tf_logger",
description="A print and debugging utility that makes your error printouts look nice",
long_description=long_description,
version=version,
url="https://github.com/episodeyang/tf_logger",
author="Ge Yang",
author_email="yangge1987@gmail.com",
license=None,
keywords=["tf_logger", "tensorflow", "logging", "debug", "debugging"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3"
],
packages=["tf_logger"],
install_requires=["typing", "tensorflow", "numpy", "termcolor", "pprint"]
)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
ccae434857ea91044d3f302e99af40d91f834838 | ec6cb8542c8ed962d24ca32fc1f060ef63fdfea7 | /第一阶段/review_month01/month01/day10/demo03.py | 4912dc1a0e7e42d288f7093cb349fbabc706c428 | [] | no_license | singerdo/songers | 27859a4ff704318d149b2aa6613add407d88bb5d | 9c5dcd80c6772272c933b06c156b33058cbd3ce4 | refs/heads/master | 2022-04-16T11:00:11.002138 | 2020-04-18T07:15:16 | 2020-04-18T07:15:16 | 256,686,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | """
实例成员: 对象.成员名
实例变量
对象.变量名
实例方法
对象.方法名称()
"""
# 全局变量
a = 10
def func01():
# 局部变量
b = 20
class MyClass:
def __init__(self, c):
# 实例变量
self.c = c
def func02(self):
pass
mc01 = MyClass(30)
print(mc01.c)
print(mc01.__dict__)# 系统提供的,可以获取对象所有实例变量 {'c': 30}
# mc01.d = 40 # 实例变量(不可取)
mc01.func02() # 通过对象地址访问实例方法
# MyClass.func02(mc01) # 不建议 | [
"569593546@qq.com"
] | 569593546@qq.com |
202548ecc73466cb75e0fc08f83fc70b4de52c7f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_194/ch36_2019_04_04_17_41_44_521451.py | 41f5c5173746f4ebf02bbff3c65de5db745362a3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | def eh_primo(n):
primo = True
divisor = 2
if n < 2:
primo = False
elif n == 2:
primo = True
else:
while divisor < n:
if n % divisor == 0:
primo = False
divisor += 1
return primo | [
"you@example.com"
] | you@example.com |
268bb911d88b9c496bb99a9f29b74403225a2e3d | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/scattercarpet/line/_width.py | ee5a9364a2f3fac047fec5cebbf518dc64a31c27 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 544 | py | import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='width', parent_name='scattercarpet.line', **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
49f358698f83b5d59960551bec1a1439c00161b0 | 614271299ef8145ad40d7ff197897b1a5d598bea | /slackchannel2pdf/settings.py | cb39f8fcfec46ca3eb9dc13659ec6ce215026ad4 | [
"MIT"
] | permissive | 17500mph/slackchannel2pdf | 14498685cdb8d9ac4bdc586948560e8adbd1151a | 2848dfaaffbf9a5255c6dbe87dcc1e90d062b820 | refs/heads/master | 2023-03-04T16:49:28.638394 | 2021-02-21T11:28:16 | 2021-02-21T11:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | """Defines all global settings incl. from configuration files"""
from ast import literal_eval
import configparser
from pathlib import Path
_FILE_NAME_BASE = "slackchannel2pdf"
_CONF_FILE_NAME = f"{_FILE_NAME_BASE}.ini"
_LOG_FILE_NAME = f"{_FILE_NAME_BASE}.log"
_DEFAULTS_PATH = Path(__file__).parent
def _configparser_convert_str(x):
result = literal_eval(x)
if not isinstance(result, str):
raise configparser.ParsingError(f"Needs to be a string type: {x}")
return result
def config_parser(
defaults_path: Path, home_path: Path = None, cwd_path: Path = None
) -> configparser.ConfigParser:
parser = configparser.ConfigParser(converters={"str": _configparser_convert_str})
config_file_paths = [defaults_path / _CONF_FILE_NAME]
if home_path:
config_file_paths.append(home_path / _CONF_FILE_NAME)
if cwd_path:
config_file_paths.append(cwd_path / _CONF_FILE_NAME)
found = parser.read(config_file_paths)
if not found:
raise RuntimeError("Can not find a configuration file anywhere")
return parser
_my_config = config_parser(
defaults_path=_DEFAULTS_PATH, home_path=Path.home(), cwd_path=Path.cwd()
)
# style and layout settings for PDF
PAGE_UNITS_DEFAULT = "mm"
FONT_FAMILY_DEFAULT = "NotoSans"
FONT_FAMILY_MONO_DEFAULT = "NotoSansMono"
PAGE_ORIENTATION_DEFAULT = _my_config.getstr("pdf", "page_orientation")
PAGE_FORMAT_DEFAULT = _my_config.getstr("pdf", "page_format")
FONT_SIZE_NORMAL = _my_config.getint("pdf", "font_size_normal")
FONT_SIZE_LARGE = _my_config.getint("pdf", "font_size_large")
FONT_SIZE_SMALL = _my_config.getint("pdf", "font_size_small")
LINE_HEIGHT_DEFAULT = _my_config.getint("pdf", "line_height_default")
LINE_HEIGHT_SMALL = _my_config.getint("pdf", "line_height_small")
MARGIN_LEFT = _my_config.getint("pdf", "margin_left")
TAB_WIDTH = _my_config.getint("pdf", "tab_width")
# locale
FALLBACK_LOCALE = _my_config.getstr("locale", "fallback_locale")
# slack
MINUTES_UNTIL_USERNAME_REPEATS = _my_config.getint(
"slack", "minutes_until_username_repeats"
)
MAX_MESSAGES_PER_CHANNEL = _my_config.getint("slack", "max_messages_per_channel")
SLACK_PAGE_LIMIT = _my_config.getint("slack", "slack_page_limit")
def setup_logging(config: configparser.ConfigParser) -> None:
config_logging = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {"format": "[%(levelname)s] %(message)s"},
"file": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"},
},
"handlers": {
"console": {
"level": config.getstr("logging", "console_log_level"),
"formatter": "console",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout", # Default is stderr
}
},
"loggers": {
"": { # root logger
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
# add log file if configured
log_file_enabled = config.getboolean("logging", "log_file_enabled", fallback=False)
if log_file_enabled:
file_log_path_full = config.getstr("logging", "log_file_path", fallback=None)
filename = (
Path(file_log_path_full) / _LOG_FILE_NAME
if file_log_path_full
else _LOG_FILE_NAME
)
config_logging["handlers"]["file"] = {
"level": config.getstr("logging", "file_log_level"),
"formatter": "file",
"class": "logging.FileHandler",
"filename": filename,
"mode": "a",
}
config_logging["loggers"][""]["handlers"].append("file")
return config_logging
DEFAULT_LOGGING = setup_logging(_my_config)
| [
"erik.kalkoken@gmail.com"
] | erik.kalkoken@gmail.com |
7b74499dbf42c018aec13de219672e212ca40a5a | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_11/masa_project/branches/common/budget_custom_report/wizard/budget_main.py | 5ae0836ad9cdc04d75cfc1505aa9649e63578f1a | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,678 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api , _
from datetime import date, datetime, timedelta
from odoo.exceptions import UserError, ValidationError
####################################### Budget Custom Reports ##################################################################
class BudgetReportMain(models.TransientModel):
_name = 'budget.custom.report.main'
_inherit = 'budget.custom.report'
report_show = fields.Selection([('sum', 'Summation'),
('details', 'Details')],default ='sum')
report_type = fields.Selection([('cost_center', 'By Cost Centers'),
('bud_position', 'By Budgetry Position')],
required=1, default='cost_center')
budgetry_position_show = fields.Selection([('without_analytic','Only Budgetry Positons'),
('with_analytic','With Analytics')],default='without_analytic')
def print_report(self,data):
if self.date_from > self.date_to:
raise ValidationError(_('Start Date must be equal to or less than Date To'))
# starter filter ^_^
data = data
#Get all filter in data Dict
data.update({'report_type': self.report_type})
data.update({'report_show': self.report_show})
data.update({'budget_type': self.budget_type})
data.update({'date_from': self.date_from})
data.update({'date_to': self.date_to})
#read_group filters and pass it to all functions we need
filters = [('date_from', '>=', self.date_from),
('date_to', '<=', self.date_to),
('general_budget_id.type', '=', self.budget_type)
]
data.update({'filters': filters})
#read_group fields , pass it to all functions that have read_group
budget_fields = ['general_budget_id', 'general_budget_id.code', 'analytic_account_id', 'planned_amount',
'practical_amount', 'total_operation', 'transfer_amount', 'confirm','residual','percentage',
'deviation']
data.update({'fields': budget_fields})
if self.report_type == 'cost_center':
#if user not select any analytic then select all analytics
if len(self.mapped('analytic_account_ids')) == 0:
analytic_ids = self.env['account.analytic.account'].search([],order='code').ids
else:
tuple_analytic_ids = tuple(self.mapped('analytic_account_ids').ids)
analytic_ids = tuple([line.id for line in self.env['account.analytic.account'].search([('id','child_of',tuple_analytic_ids)])])
data.update({'analytic_ids':analytic_ids})
elif self.report_type == 'bud_position':
#budgetry_position_type
data.update({'budgetry_position_show': self.budgetry_position_show})
# if user not select any Budgetary then select all Budgetaries
if len(self.mapped('budgetry_position_ids')) == 0:
budgetary_ids = self.env['crossovered.budget.lines'].search([]).ids
else:
tuple_budgetary_ids = tuple(self.mapped('budgetry_position_ids').ids)
budgetary_ids = tuple([line.id for line in self.env['crossovered.budget.lines'].search(
[('id', 'in', tuple_budgetary_ids)])])
data.update({'budgetary_ids': budgetary_ids})
if self.budgetry_position_show == 'with_analytic':
# if user not select any analytic then select all analytics
if len(self.mapped('analytic_account_ids')) == 0:
analytic_ids = self.env['account.analytic.account'].search([], order='code').ids
else:
tuple_analytic_ids = tuple(self.mapped('analytic_account_ids').ids)
analytic_ids = tuple([line.id for line in self.env['account.analytic.account'].search(
[('id', 'child_of', tuple_analytic_ids)])])
data.update({'analytic_ids': analytic_ids})
return self.env.ref('budget_custom_report.action_budget_custom_report').with_context(landscape=True).report_action(
self, data=data)
class budgetCustomReport(models.AbstractModel):
_name = 'report.budget_custom_report.budget_main_report_tamplate'
@api.model
def get_report_values(self, docids, data=None):
return {
'data': data,
'get':self.env['budget.custom.report'],
'current_model': self.env['budget.custom.report.main']
}
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
bd7dadc54b85e08c31e8ca417b8a7925d903e09a | 5dc7dc7e33122e8c588eb6e13f23bf032c704d2e | /scripts/transfer_from_TEXT_to_SQLITE.py | d149f8918fe194ad0b74376c404e553546a1a822 | [
"Apache-2.0"
] | permissive | brianr747/platform | a3319e84858345e357c1fa9a3916f92122775b30 | 84b1bd90fc2e35a51f32156a8d414757664b4b4f | refs/heads/master | 2022-01-23T16:06:26.855556 | 2022-01-12T18:13:22 | 2022-01-12T18:13:22 | 184,085,670 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | """
Script that transfers all series from the TEXT database to the SQLite database.
Note: the TEXT database series to have the correct ticker_full as the column header.
"""
import econ_platform_core
import econ_platform.start
econ_platform_core.start_log()
econ_platform_core.Databases['SQLITE'].LogSQL = True
ticker_list = econ_platform_core.Databases['TEXT'].GetAllValidSeriesTickers()
for ticker in ticker_list:
econ_platform_core.Databases.TransferSeries(ticker, 'TEXT', 'SQLITE')
| [
"brianr747@gmail.com"
] | brianr747@gmail.com |
b020c98bdd919c3fa176f6133cb896944293d497 | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/z3c.form-3.0.2-py2.7.egg/z3c/form/tests/test_doc.py | f5a7184d64deb6152f01ed8826dfdc78c10e1f9a | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,194 | py | ##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""z3c.form Test Module"""
import doctest
import itertools
import re
import unittest
from zope.testing import renormalizing
from z3c.form import testing
# This package will allways provide z3c.pt for it's test setup.
# The Z3CPT_AVAILABLE hickup is usefull if someone will run the z3c.form tests
# in his own project and doesn't use z3c.pt.
try:
import z3c.pt
import z3c.ptcompat
Z3CPT_AVAILABLE = True
except ImportError:
Z3CPT_AVAILABLE = False
try:
import zope.app.container
except ImportError:
ADDING_AVAILABLE = False
else:
ADDING_AVAILABLE = True
def test_suite():
flags = \
doctest.NORMALIZE_WHITESPACE | \
doctest.ELLIPSIS | \
doctest.IGNORE_EXCEPTION_DETAIL
if Z3CPT_AVAILABLE:
setups = (testing.setUpZPT, testing.setUpZ3CPT)
else:
setups = (testing.setUpZPT, )
tests = ((
doctest.DocFileSuite(
'../form.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../action.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../datamanager.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../field.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../contentprovider.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../validator.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../error.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../widget.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../button.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../zcml.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../testing.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../converter.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=renormalizing.RENormalizing([
(re.compile(
r"(invalid literal for int\(\)) with base 10: '(.*)'"),
r'\1: \2'),
(re.compile(
r"Decimal\('(.*)'\)"),
r'Decimal("\1")'),
]) + testing.outputChecker
),
doctest.DocFileSuite(
'../group.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../subform.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../util.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../hint.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
))
for setUp in setups)
if ADDING_AVAILABLE:
tests = itertools.chain(tests, ((
doctest.DocFileSuite(
'../adding.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),)
for setUp in setups))
return unittest.TestSuite(itertools.chain(*tests))
| [
"gso@abv.bg"
] | gso@abv.bg |
e07fd9c9d4cb47e3567dbc210bfade4565653d47 | bed837cdf2cc0302b435c65f20140601c0b6ca71 | /pandas/tests/series/test_constructors.py | c5783779c67c812362a015186edf330497658976 | [
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] | permissive | gustavodemari/pandas | e670b99cc3de81c2c658dc60022128d6db0fc1c3 | 2ce897f99b82b756926806a35473e3db0f3f4822 | refs/heads/master | 2020-11-30T13:03:02.879501 | 2016-02-12T03:10:46 | 2016-02-12T03:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,248 | py | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas import Index, Series, isnull, date_range, period_range
from pandas.core.index import MultiIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
import pandas.core.common as com
import pandas.lib as lib
from pandas.compat import lrange, range, zip, OrderedDict, long
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesConstructors(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
self.assertNotIsInstance(scalar, float)
# coercion
self.assertEqual(float(Series([1.])), 1.0)
self.assertEqual(int(Series([1.])), 1)
self.assertEqual(long(Series([1.])), 1)
def test_TimeSeries_deprecation(self):
# deprecation TimeSeries, #10890
with tm.assert_produces_warning(FutureWarning):
pd.TimeSeries(1, index=date_range('20130101', periods=3))
def test_constructor(self):
# Recognize TimeSeries
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(self.ts.is_time_series)
self.assertTrue(self.ts.index.is_all_dates)
# Pass in Series
derived = Series(self.ts)
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(derived.is_time_series)
self.assertTrue(derived.index.is_all_dates)
self.assertTrue(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assertEqual(mixed.dtype, np.object_)
self.assertIs(mixed[1], np.NaN)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(self.empty.is_time_series)
self.assertFalse(self.empty.index.is_all_dates)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(Series({}).is_time_series)
self.assertFalse(Series({}).index.is_all_dates)
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
self.assertEqual(rs, xp)
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
self.assertRaises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
# the are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)), dtype='int64')
result = Series(range(10), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
fastpath=True)
res = Series(cat)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
from pandas import tslib
data = ma.masked_all((3, ), dtype='M8[ns]')
result = Series(data)
expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), tslib.iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
tm.assertIsInstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
self.assertEqual(s.dtype, np.dtype('i8'))
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
self.assertEqual(s.dtype, np.dtype('f8'))
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
self.assertEqual(s.dtype, np.float64)
s = Series(None, index=lrange(5), dtype=object)
self.assertEqual(s.dtype, np.object_)
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s, expected)
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
self.assertEqual(s[1], 5)
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly infering on dateimelike looking when object dtype is
# specified
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
self.assertEqual(s.iloc[0], Timestamp('20130101'))
self.assertEqual(s.iloc[1], 'NOV')
self.assertTrue(s.dtype == object)
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame(
{'wing1': wing1,
'wing2': wing2,
'mat': mat}, index=belly)
result = df.loc['3T19']
self.assertTrue(result.dtype == object)
result = df.loc['216']
self.assertTrue(result.dtype == object)
def test_constructor_dtype_datetime64(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(tslib.iNaT, index=lrange(5))
self.assertFalse(isnull(s).all())
s = Series(nan, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
self.assertEqual(s.dtype, 'M8[ns]')
s.ix[0] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
self.assertRaises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
self.assertRaises(TypeError,
lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
self.assertEqual(result[0], datetime(2, 1, 1, 0, 0))
result = Series([datetime(3000, 1, 1)])
self.assertEqual(result[0], datetime(3000, 1, 1, 0, 0))
# don't mix types
result = Series([Timestamp('20130101'), 1], index=['a', 'b'])
self.assertEqual(result['a'], Timestamp('20130101'))
self.assertEqual(result['b'], 1)
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values, dates2)
self.assertEqual(series1.dtype, object)
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype, 'datetime64[ns]')
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype, 'datetime64[ns]')
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype, 'datetime64[ns]')
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype, 'datetime64[ns]')
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
self.assertTrue(Series(dr).iloc[0].tz is None)
dr = date_range('20130101', periods=3, tz='UTC')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
dr = date_range('20130101', periods=3, tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is np.nan)
self.assertTrue('NaN' in str(s))
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')
self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')
self.assertTrue(com.is_datetime64tz_dtype(s.dtype))
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# export
result = s.values
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == 'datetime64[ns]')
self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize(
'UTC').tz_convert(tz=s.dt.tz)))
# indexing
result = s.iloc[0]
self.assertEqual(result, Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', offset='D'))
result = s[0]
self.assertEqual(result, Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', offset='D'))
result = s[Series([True, True, False], index=s.index)]
assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
assert_series_equal(result, s)
# astype
result = s.astype(object)
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
assert_series_equal(result, s)
# astype - datetime64[ns, tz]
result = Series(s.values).astype('datetime64[ns, US/Eastern]')
assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
assert_series_equal(result, s)
result = s.astype('datetime64[ns, CET]')
expected = Series(date_range('20130101 06:00:00', periods=3, tz='CET'))
assert_series_equal(result, expected)
# short str
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# formatting with NaT
result = s.shift()
self.assertTrue('datetime64[ns, US/Eastern]' in str(result))
self.assertTrue('NaT' in str(result))
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
self.assertTrue('datetime64[ns, US/Eastern]' in str(t))
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')
self.assertTrue(lib.infer_dtype(s) == 'datetime64')
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
self.assertTrue(s.dtype == 'object')
self.assertTrue(lib.infer_dtype(s) == 'datetime')
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern'))
assert_series_equal(s, expected)
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.ix[0] = 0
expected.ix[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series([x[1] for x in _d], index=Index(
[x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=['A', 'B', 'C'],
index=pd.to_timedelta([0, 10, 20], unit='s')
)
result = Series(
data={pd.to_timedelta(0, unit='s'): 'A',
pd.to_timedelta(10, unit='s'): 'B',
pd.to_timedelta(20, unit='s'): 'C'},
index=pd.to_timedelta([0, 10, 20], unit='s')
)
# this should work
assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
self.assertRaises(TypeError, Series, values)
values = frozenset(values)
self.assertRaises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
self.assertTrue(tm.is_sorted(series.index))
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
self.assertEqual(series.dtype, np.float64)
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assertEqual(nans.dtype, np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assertEqual(strings.dtype, np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assertEqual(dates.dtype, 'M8[ns]')
self.assertEqual(len(dates), len(self.ts))
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(
1, 's')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
td = Series([timedelta(days=1), tslib.NaT], dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1), np.nan], dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), pd.NaT], dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), pd.NaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), tslib.iNaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), np.nan])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(1, 's')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
self.assertRaises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'], dtype='m8[ns]')
self.assertRaises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
self.assertEqual(s.dtype, 'timedelta64[ns]')
s = Series([np.nan, pd.NaT, '1 Day'])
self.assertEqual(s.dtype, 'timedelta64[ns]')
s = Series([pd.NaT, None, '1 Day'])
self.assertEqual(s.dtype, 'timedelta64[ns]')
s = Series([pd.NaT, np.nan, '1 Day'])
self.assertEqual(s.dtype, 'timedelta64[ns]')
| [
"jeff@reback.net"
] | jeff@reback.net |
68a763f7ac69f4b4787488d915ee751df8e07af7 | a913d347c5a46fd7ff28415dfebe9b10829fdef9 | /tests/test_puzzle.py | d6a4847a136bbbf2416b8eac474fb403a4a2fe16 | [] | no_license | RZachLamberty/logicpuzzlesolver | 6d9ba414e549b13396573ea0d875dae8f592c9b9 | b8a93e5bda76bb8e3afc66ef902c42b06c532e8c | refs/heads/master | 2021-01-10T16:11:38.742828 | 2015-12-30T19:42:20 | 2015-12-30T19:42:20 | 47,845,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module: test_puzzle.py
Author: zlamberty
Created: 2015-12-19
Description:
test the puzzle class
Usage:
<usage>
"""
import os
import pandas as pd
import unittest
import categories
import rulelist
import puzzle
CONFIG = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'config'
)
FMT = os.path.join(CONFIG, '{num:0>3.0f}.{ftype:}.{ext:}')
class TestLogicPuzzle(unittest.TestCase):
def __init__(self, num, *args, **kwargs):
self.num = num
self.fcatyaml = FMT.format(num=self.num, ftype='categories', ext='yaml')
self.fruleyaml = FMT.format(num=self.num, ftype='rules', ext='yaml')
self.fruletxt = FMT.format(num=self.num, ftype='rules', ext='txt')
self.fsolyaml = FMT.format(num=self.num, ftype='solution', ext='yaml')
self.fsoltxt = FMT.format(num=self.num, ftype='solution', ext='txt')
self.fsolcsv = FMT.format(num=self.num, ftype='solution', ext='csv')
super(TestLogicPuzzle, self).__init__(*args, **kwargs)
def setUp(self):
self.c = categories.CategoriesFromYaml(self.fcatyaml)
self.r = rulelist.RulesFromFile(self.fruletxt, self.c)
self.p = puzzle.LogicPuzzle(self.c, self.r)
def test_solve(self):
self.p.solve()
a = self.p.solution
a = a.reset_index()
b = pd.read_csv(self.fsolcsv)
self.assertEqual(a, b)
if __name__ == '__main__':
unittest.main()
| [
"r.zach.lamberty@gmail.com"
] | r.zach.lamberty@gmail.com |
dfc4e92940c3353a5ef279149b589cbbb49540bf | 0958c33f05f5d3922c47cccadebc9e70394b8a78 | /PowerClassification/ResultAnalysis-Test09/CompareTwoExperiments.py | 50818376fc11311b1a9bfed095e101098bca4b6b | [] | no_license | jabarragann/eeg_project_gnaut_power_band_analysis | 0b33a5ebdeffd37b64094ba01f8dbd94f9baf961 | 3f7a7183593eb54a63efcff3762fb2144a0af2df | refs/heads/master | 2021-08-10T20:47:23.266562 | 2021-07-21T02:37:53 | 2021-07-21T02:37:53 | 245,556,675 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import pandas as pd
from pathlib import Path
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
# sns.set_theme(style="whitegrid")
# experiment1Path = {"path":"aa16b_pyprep_complete/", "condition":"FullSet"}
# experiment2Path = {"path":"aa16_pyprep_complete/" , "condition":"SubSet"}
if __name__ =='__main__':
experiment1Path = {"path":"aa14_pyprep_complete/", "condition":"FullSet"}
experiment2Path = {"path":"aa15b_pyprep_complete/" , "condition":"SubSet"}
windowToAnalyze = 20
sizeToAnalyze = 150
rootPath = Path('./').resolve().parent / 'results' / 'EegResults' /'results_transfer9'
total = []
for exp in [experiment1Path, experiment2Path]:
p = rootPath / exp['path']
for file in p.glob('*.csv'):
windowSize = int(re.findall('(?<=dow)[0-9]+(?=s)',file.name)[0][-2:])
sampleSize = int(re.findall('(?<=Size)[0-9]+(?=s\.csv)',file.name)[0])
if windowToAnalyze == windowSize and sizeToAnalyze == sampleSize:
print(file.name, windowSize, sampleSize)
#Load data
df = pd.read_csv(file, sep = ',')
df['condition'] = exp['condition']
total.append(df)
final = pd.concat(total)
# ax = sns.boxplot(x="User", y="TestAcc", hue="condition",
# data=final, palette="Set3")
ax = sns.boxplot(y="TestAcc", x="condition",
data=final, palette="Set3")
plt.show()
x = 0 | [
"barragan@purdue.edu"
] | barragan@purdue.edu |
0ef3cfd0055514e113ce24309ed7507310fba9f9 | 3dd2d572213cdddbc4e1a47656b344bada018b61 | /server/code/game/mgr/tbox.py | 30b8f796e4868668e70901a4bf0b2ca80dc4d7a1 | [
"MIT"
] | permissive | hw233/twisted_zdzl | d7632dfd20ac6e8fcf7beecaeeda327de075359d | fb97923c503aa032b08774323cda2b2c603efe41 | refs/heads/master | 2020-03-19T09:48:13.108622 | 2016-05-05T06:51:34 | 2016-05-05T06:51:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,682 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
from corelib import log, RLock
from store.store import StoreObj, GameObj
from game import BaseGameMgr, Game
from game.base import common
from game.base.msg_define import (MSG_START, MSG_TBOX_FPASS,
MSG_TBOX_MPASS, MSG_TBOX_PASS, MSG_TBOX_MDIE)
from game.store import TN_P_TBOX, TN_P_TBOXNEWS
from game.base.constant import (WAITBAG_TYPE_TIMEBOX, TBOX_FREE_NUM, TBOX_FREE_NUM_V,
TBOX_COIN_NUM, TBOX_COIN_NUM_V, TBOX_COINS, TBOX_COINS_V, TBOX_KILL_LEVEL,
TBOX_KILL_LEVEL_V, PLAYER_ATTR_TBOX, PT_FT, PT_CH, PT_WAITS, REPORT_TYPE_TBOX,
REPORT_TBOX_URL, PT_VIP, TBOX_HITE_TIME,
)
from game.glog.common import COIN_TBOX_RESET
from game.base import errcode
from game.base.msg_define import MSG_RES_RELOAD
import app
#章节
PLAYER_TBOX_CHAPTER = 'chapter'
#怪物死活monster
MONSTER_DIE = 0
MONSTER_LIVE = 1
#每章怪物数目
MONSTER_NUM = 5
#最高星级
TBOX_LEVEL_MAX = 5
#每个怪物的排名个数
ORDER_MAX = 3
#战报
#初始化战报
INIT_NEWS = [None, None, None]
#player_data索引
INDEX_ID = 0
INDEX_NAME = 1
INDEX_LEVEL = 2
#防止多人同时访问
def _wrap_lock(func):
def _func(self, *args, **kw):
with self._lock:
return func(self, *args, **kw)
return _func
class GTboxNewsMgr(object):
""" 时光盒战报管理 """
_rpc_name_ = 'rpc_tboxnews_mgr'
def __init__(self):
setattr(Game, self._rpc_name_, self)
app.sub(MSG_START, self.start)
#保存战报 {(章节id1, 时光盒基础表id1):[第一名对象,第二名对象,第三名对象]...}
#该章有部分无名次 保存None{(章节id1, 时光盒基础表id1):[第一名对象, None, None]...}
self.news = {}
#
self._lock = RLock()
def start(self):
all_news = Game.rpc_store.load_all(TN_P_TBOXNEWS)
for news in all_news:
oTboxNews = TboxNews(news)
key = (oTboxNews.data.chapter, oTboxNews.data.tbid)
tNews = self.news.setdefault(key, INIT_NEWS[:])
#log.debug('tews----%s %s', oTboxNews.data.ranking, key)
if oTboxNews.data.ranking > ORDER_MAX:
continue
tNews[oTboxNews.data.ranking-1] = oTboxNews
self.news[key] = tNews
def clear(self):
""" 清楚战报数据 """
store = Game.rpc_store
for news in self.news.itervalues():
if not news:
continue
for new in news:
if not new:
continue
new.delete(store)
self.news.clear()
@_wrap_lock
def handle_rank(self, player_data, aChapter, aHitLevel, aFight, tResTBoxId):
""" 处理排名 """
#log.debug('handle_rank :: %s', aFight)
if not aFight:
return False
key = (aChapter, tResTBoxId)
#log.debug('achapter : %s, tResTBoxId: %s', aChapter, tResTBoxId)
all_news = self.news.setdefault(key, INIT_NEWS[:])
for index, news in enumerate(all_news):
if not news:
self._add_news(player_data, aChapter, aHitLevel, index+1, aFight, tResTBoxId)
return True
if player_data[INDEX_ID] == news.data.pid:
return False
if not self._is_updae_rank(player_data, aHitLevel, aFight, news):
continue
self._update_news_rank(player_data, news, aHitLevel, aFight)
return True
return False
def _is_updae_rank(self, player_data, aHitLevel, aFight, news):
""" 判断条件是否要更新排名 """
if aHitLevel < news.data.hitLevel:
return False
elif aHitLevel == news.data.hitLevel:
if player_data[INDEX_LEVEL] > news.data.level:
return False
elif player_data[INDEX_LEVEL] == news.data.level:
if aFight >= news.data.fight:
return False
return True
def _add_news(self, player_data, aChapter, aHitLevel, aRank, aFight, aResTBoxId):
""" 添加战报 """
oTboxNews = self._new_news(player_data, aChapter, aHitLevel, aRank, aFight, aResTBoxId)
key = (aChapter, aResTBoxId)
self.news[key][aRank-1] = oTboxNews
def _new_news(self, player_data, aChapter, aHitLevel, aRank, aFight, aResTBoxId):
""" 添加战报 """
oTboxNews = TboxNews()
oTboxNews.data.chapter = aChapter
oTboxNews.data.tbid = aResTBoxId
oTboxNews.data.ranking = aRank
oTboxNews.data.pid = player_data[INDEX_ID]
oTboxNews.data.name = player_data[INDEX_NAME]
oTboxNews.data.hitLevel = aHitLevel
oTboxNews.data.level = player_data[INDEX_LEVEL]
oTboxNews.data.fight = aFight
#保存数据到数据库
store = Game.rpc_store
oTboxNews.save(store)
return oTboxNews
def _update_news_rank(self, player_data, aTboxNews, aHitLevel, aFight):
""" 更新战报 """
oTboxNews = self._new_news(player_data, aTboxNews.data.chapter,
aHitLevel, aTboxNews.data.ranking, aFight, aTboxNews.data.tbid)
key = (aTboxNews.data.chapter, aTboxNews.data.tbid)
all_news = self.news[key]
#log.debug('len(all_news) = %s',len(all_news))
all_news.insert(oTboxNews.data.ranking-1, oTboxNews)
all_len = len(all_news)
store = Game.rpc_store
while all_len > ORDER_MAX:
all_len -= 1
box_news = all_news.pop()
if not box_news:
continue
store.delete(TN_P_TBOXNEWS, box_news.data.id)
for index, tbox_news in enumerate(all_news):
rank = index + 1
if tbox_news and rank != tbox_news.data.ranking:
tbox_news.data.ranking = rank
store.save(TN_P_TBOXNEWS, tbox_news.to_dict())
def sub_news(self, pid, chapter, tbid, war_news):
""" 提交战报 """
key = (chapter, tbid)
#log.debug('sub_news--key = %s, len = %s', key, len(war_news))
if not self.news.has_key(key):
return False, errcode.EC_TBOX_NORANK
newsList = self.news[key]
o = None
for o in newsList:
if o is None:
return False, errcode.EC_TBOX_NORANK
if o.data.pid != pid:
continue
self.save_war_news(key, o, war_news)
break
if not o:
return False, errcode.EC_TBOX_NORANK
return True, None
def save_war_news(self, key, news, war_news):
""" 保存战报 """
file = '%d_%d_%d_%d' % (key[0], key[1], news.data.ranking, int(time.time()))
pids = [news.data.pid]
fid = Game.rpc_report_mgr.save(REPORT_TYPE_TBOX, pids, war_news,
url=[REPORT_TBOX_URL, file])
news.data.fid = fid
store = Game.rpc_store
store.save(TN_P_TBOXNEWS, news.to_dict())
def get_rank(self, aChapter, tbid):
""" 获取排名 """
key = (aChapter, tbid)
if not self.news.has_key(key):
return False, errcode.EC_TBOX_NORANK
send = []
for news in self.news[key]:
if not news:
continue
send.append({'rank':news.data.ranking,
'name':news.data.name,
'fid':news.data.fid})
return True, {'ranks': send}
def get_news(self, key, rank):
""" 获取战报 """
if self.news.has_key(key):
tBoxNews = self.news[key][rank-1]
return True, tBoxNews.data.news
return False, errcode.EC_TBOX_NORANK
def new_tboxnews_mgr():
mgr = GTboxNewsMgr()
return mgr
class TboxMgr(BaseGameMgr):
""" 时光盒管理类 """
def __init__(self, game):
super(TboxMgr, self).__init__(game)
self._free_nums = 0
self.set_active_num(0)
def start(self):
""" 开启时光盒管理器 """
self._game.sub(MSG_RES_RELOAD, self.lood)
self.lood()
def init_player_tbox(self, player):
""" 获取玩家时光管理对象 """
oPTboxDatas = getattr(player.runtimes, TN_P_TBOX, None)
if oPTboxDatas is None:
oPTboxDatas = PlayTbox(player, self)
oPTboxDatas.load()
setattr(player.runtimes, TN_P_TBOX, oPTboxDatas)
return oPTboxDatas
def enter(self, player, aChapter):
""" 进入指定章节的时光盒 """
ptbox = self.init_player_tbox(player)
return ptbox.enter(aChapter)
def hit_end(self, player, aChapter, aFight=0, aLevel=0):
""" 猎怪结束 """
ptbox = self.init_player_tbox(player)
rs, data, tResTBoxId = ptbox.hit_end(aChapter, aLevel)
if rs is False:
return rs, data
if data['sub']:
rpc_news = self._game.rpc_tboxnews_mgr
player_data = (player.data.id, player.data.name, player.data.level)
rs = rpc_news.handle_rank(player_data, aChapter, aLevel, aFight, tResTBoxId)
data['sub'] = rs
return True, data
def reset(self, player, aChapter):
""" 重置 """
ptbox = self.init_player_tbox(player)
return ptbox.reset(aChapter)
def kill(self, player, aChapter):
""" 秒杀 """
ptbox = self.init_player_tbox(player)
return ptbox.kill(aChapter)
def get_rank(self, aChapter, tbid):
""" 获取排名 """
return self._game.rpc_tboxnews_mgr.get_rank(aChapter, tbid)
def get_news(self, chapter, tbid, rank):
""" 获取战报 """
if rank > 3 or rank < 1:
return False, errcode.EC_VALUE
key = (chapter, tbid)
return self._game.rpc_tboxnews_mgr.get_news(key, rank)
def sub_news(self, pid, chapter, tbid, war_news):
""" 提交战报 """
return self._game.rpc_tboxnews_mgr.sub_news(pid, chapter, tbid, war_news)
def add_monster(self, player, res_tbox_id):
""" 完成某任务将怪物添加到时光盒 """
if 1:
return
tResTbox = self._game.res_mgr.tboxs.get(res_tbox_id)
if not tResTbox:
return False, errcode.EC_NORES
ptbox = self.init_player_tbox(player)
if not ptbox.p_tboxs.has_key(tResTbox.chapter):
#添加章节
obox = self._new_box(player, ptbox, tResTbox.chapter, res_tbox_id)
ptbox.p_tboxs[tResTbox.chapter] = obox
return
#更新章节
tPTbox = ptbox.p_tboxs[tResTbox.chapter]
if len(tPTbox.data.tbids) >= MONSTER_NUM or res_tbox_id in tPTbox.data.tbids:
return
tPTbox.data.tbids.append(res_tbox_id)
tPTbox.data.isLives.append(MONSTER_LIVE)
tPTbox.data.levels.append(0)
ptbox.p_tboxs[tResTbox.chapter] = tPTbox
ptbox.update_tbox(tPTbox)
return
def _new_box(self, player, ptbox, chapter, res_tbox_id):
""" 新建时光盒数据 """
free_num = self.get_free_num(ptbox.p_attr[PT_VIP])
tCoinNum = self._game.setting_mgr.setdefault(TBOX_COIN_NUM, TBOX_COIN_NUM_V)
oTbox = TBox()
oTbox.data.pid = player.data.id
oTbox.data.chapter = chapter
oTbox.data.tbids = [res_tbox_id]
oTbox.data.isLives = [MONSTER_LIVE]
oTbox.data.levels = [0]
oTbox.data.re1 = free_num
oTbox.data.re2 = tCoinNum
return oTbox
def get_free_num(self, vip):
""" 根据玩家vip获取免费重置次数 """
return self._free_nums(vip) + self._active_num
def lood(self):
free_nums = self._game.setting_mgr.setdefault(TBOX_FREE_NUM, TBOX_FREE_NUM_V)
self.set_max_free_num(free_nums)
def set_max_free_num(self, num_str):
self._free_nums = common.make_lv_regions(num_str)
def set_active_num(self, num):
self._active_num = num
class TBoxNewsData(StoreObj):
__slots__ = ('id', 'chapter', 'tbid', 'ranking', 'fid', 'pid',
'name', 'hitLevel','level', 'fight'
)
def init(self):
#ID(id, int)
self.id = None
#章节(chapter, int)
self.chapter = 0
#基础表时光盒id(tbid, int)
self.tbid = 0
#名次(ranking, int)
self.ranking = 0
#战报id(fid, int)
self.fid = 0
#角色id(pid, str)
self.pid = 0
#角色名字(name, str)
self.name = ""
#猎怪星级(hitLevel, int)
self.hitLevel = 0
#角色等级(level, int)
self.level = 0
#角色战斗力(fight, int)
self.fight = 0
class TboxNews(GameObj):
__slots__ = GameObj.__slots__
TABLE_NAME = TN_P_TBOXNEWS
DATA_CLS = TBoxNewsData
def __init__(self, adict=None):
super(TboxNews, self).__init__(adict=adict)
def update(self, adict):
super(TboxNews, self).update(adict)
class TBoxData(StoreObj):
__slots__ = ('id', 'pid', 'chapter', 'tbids', 'isLives', 'levels', 're1', 're2')
def init(self):
#ID(id, int)
self.id = None
#玩家id
self.pid = 0
#章节(chapter, int)
self.chapter = 0
#时光盒boss id(tbids, str)
self.tbids = []
#怪物是否存活(isLives, str)
self.isLives = []
#猎怪的最好星级(Levels, str)
self.levels = []
#当天免费剩余重置次数(re1, int)
self.re1 = 0
#当天元宝剩余重置次数(re2, int)
self.re2 = 0
class TBox(GameObj):
__slots__ = GameObj.__slots__
TABLE_NAME = TN_P_TBOX
DATA_CLS = TBoxData
def __init__(self, adict=None):
super(TBox, self).__init__(adict=adict)
def new_tbox(self, pid, chapter, res_tboxids, re1, re2):
""" 直接添加某章的所有怪物 """
self.data.pid = pid
self.data.chapter = chapter
self.data.tbids = res_tboxids
self.data.isLives = [MONSTER_LIVE] * MONSTER_NUM
self.data.levels = [0] * MONSTER_NUM
self.data.re1 = re1
self.data.re2 = re2
def update_tbox(self, res_tboxids):
""" 补全该章未出现的怪物 """
self.data.tbids = res_tboxids
self.data.isLives = [MONSTER_LIVE] * MONSTER_NUM
self.data.levels = [0] * MONSTER_NUM
class PlayTbox(object):
""" 玩家数据管理类 """
def __init__(self, player, tbox_mgr):
self.player = player
self.tbox_mgr = tbox_mgr
#玩家数据 {章节id1:o1时间盒,...}
self.p_tboxs = {}
#玩家属性数据 {k:v, ...}
self.p_attr = {}
#记录打完怪物的时间
self.hit_end_time = 0
def __getstate__(self):
return self.p_tboxs, self.p_attr, self.hit_end_time
def __setstate__(self, data):
self.p_tboxs, self.p_attr, self.hit_end_time = data
def uninit(self):
self.player = None
self.p_tboxs = {}
self.p_attr = {}
def load(self, player = None):
""" 获取数据 """
tTBoxs = self.player._game.rpc_store.query_loads(TN_P_TBOX, dict(pid=self.player.data.id))
for tTBox in tTBoxs:
o = TBox(tTBox)
self.p_tboxs[tTBox[PLAYER_TBOX_CHAPTER]] = o
v = self.player.play_attr.get(PLAYER_ATTR_TBOX)
if v is None:
v = self.make_tbox_data()
self.player.play_attr.set(PLAYER_ATTR_TBOX, v)
if not v.has_key(PT_VIP):
v[PT_VIP] = self.player.data.vip
self.p_attr = v
self.add_monster()
def add_monster(self):
""" 添加所有的时光盒怪物 """
chapter_id = self.player.data.chapter
pid = self.player.data.id
r1 = self.tbox_mgr.get_free_num(self.p_attr[PT_VIP])
r2 = self.player._game.setting_mgr.setdefault(TBOX_COIN_NUM, TBOX_COIN_NUM_V)
res_mgr = self.player._game.res_mgr
while chapter_id > 1:
res_tbox_ids = res_mgr.tboxs_by_chapter.get(chapter_id)
res_tbox_ids.sort()
if self.p_tboxs.has_key(chapter_id):
tbox = self.p_tboxs.get(chapter_id)
if len(tbox.data.tbids) < MONSTER_NUM:
tbox.update_tbox(res_tbox_ids)
else:
res_chapter = res_mgr.chapters.get(chapter_id)
#第一章直接加入
if chapter_id == 2 or res_chapter.startTid in self.player.task.tid_bm:
oTbox = TBox()
oTbox.new_tbox(pid, chapter_id, res_tbox_ids, r1, r2)
self.p_tboxs[chapter_id] = oTbox
chapter_id -= 1
def gm_change_data(self, chapter, tbids):
""" gm改变时光盒数据 """
tbox = self.p_tboxs.get(chapter)
if not tbox or not tbids:
return False
tbids = tbids.split(',')
tbids = map(int, tbids)
old_tbids_len = len(tbox.data.tbids)
tbox.data.tbids = tbids
new_tbids_len = len(tbids)
if new_tbids_len == old_tbids_len:
return True
if new_tbids_len > old_tbids_len:
add_num = new_tbids_len - old_tbids_len
for i in xrange(add_num):
tbox.data.isLives.append(MONSTER_LIVE)
tbox.data.levels.append(0)
else:
tbox.data.isLives = tbox.data.isLives[:new_tbids_len]
tbox.data.levels = tbox.data.levels[:new_tbids_len]
return True
def make_tbox_data(self):
""" 创建玩家属性表结构 """
return {PT_FT:int(time.time()), PT_CH:0, PT_WAITS:[], PT_VIP:self.player.data.vip}
def save(self, store):
""" 保存数据 """
for tPTbox in self.p_tboxs.itervalues():
tPTbox.save(store)
self.player.play_attr.update_attr({PLAYER_ATTR_TBOX:self.p_attr})
def copy_from(self, player):
from_p_tboxs = getattr(player.runtimes, TN_P_TBOX)
if from_p_tboxs is None:
return
import copy
from_p_tboxs = copy.deepcopy(from_p_tboxs.p_tboxs)
if self.p_tboxs:
for o in self.p_tboxs.itervalues():
o.delete(self.player._game.rpc_store)
self.p_tboxs = {}
if not from_p_tboxs and not self.p_tboxs:
return
for o in from_p_tboxs.itervalues():
#log.debug('chapter1---[%s]', o.data.chapter)
no = TBox(adict=o.data.to_dict())
no.data.id = None
no.data.pid = self.player.data.id
no.save(self.player._game.rpc_store)
self.p_tboxs[no.data.chapter] = no
def handle_pass_day(self, fetch=False):
""" 处理超过一天(超过则更新数据)
fetch 处理待收取 超过凌晨时更新数据 """
#判断是否已过一天
if common.is_pass_day(self.p_attr[PT_FT]):
#更新时光盒数据
setting_mgr = self.player._game.setting_mgr
tCoinNum = setting_mgr.setdefault(TBOX_COIN_NUM, TBOX_COIN_NUM_V)
#log.debug('p_tboxs::[%s]', self.p_tboxs)
for tbox in self.p_tboxs.itervalues():
tbox.data.isLives = [MONSTER_LIVE]*len(tbox.data.tbids)
tbox.data.re1 = self.tbox_mgr.get_free_num(self.player.data.vip)
tbox.data.re2 = tCoinNum
self.update_tbox(tbox)
#更新玩家属性
if not fetch:
del_wids = []
for wid in self.p_attr[PT_WAITS]:
#删除待收物品
self.player.wait_bag.delete(wid)
del_wids.append(wid)
self.p_attr[PT_CH] = 0
self.p_attr[PT_WAITS] = []
self.p_attr[PT_FT] = int(time.time())
self.p_attr[PT_VIP] = self.player.data.vip
return del_wids
return
def _handle_vip_up(self):
""" vip升级增加差值的免费重置次数 """
if isinstance(self.p_attr[PT_WAITS], int):
self.p_attr[PT_WAITS] = []
now_vip = self.player.data.vip
old_vip = self.p_attr[PT_VIP]
if now_vip == old_vip:
return
old_free_num = self.tbox_mgr.get_free_num(old_vip)
now_free_num = self.tbox_mgr.get_free_num(now_vip)
add_num = now_free_num - old_free_num
for tbox in self.p_tboxs.itervalues():
tbox.data.re1 += add_num
self.update_tbox(tbox)
self.p_attr[PT_VIP]= now_vip
def enter(self, aChapter):
""" 进入指定章节的时光盒 """
if not self.p_tboxs:
return False, errcode.EC_TBOX_NOCHAPTER
self._handle_vip_up()
#处理超过一天的待收物品
del_wids = self.handle_pass_day()
data = {}
if del_wids:
data = self.player.pack_msg_data(del_wids=del_wids)
#是否有待收物品
if self.p_attr[PT_CH] and self.p_attr[PT_WAITS]:
aChapter = self.p_attr[PT_CH]
keys = self.p_tboxs.keys()
keys.sort()
max_cid = keys[-1]
if self.player.data.chapter > max_cid:
self.add_monster()
if not self.p_tboxs.has_key(aChapter):
aChapter = max_cid
data.update({'tbox':self.p_tboxs[aChapter].to_dict(), 'maxc':max_cid})
#log.debug('enter - aChapter::[%s]', aChapter)
return True, data
def get_bids(self):
"""获取所有通过的时光盒BOSS id"""
rs = {}
for tbox in self.p_tboxs.itervalues():
bids = rs.setdefault(tbox.data.chapter, [])
win_tbids = []
for i, level in enumerate(tbox.data.levels):
if level:
win_tbids.append(tbox.data.tbids[i])
bids.extend(win_tbids)
return rs
def hit_end(self, chapter, aLevel=0):
""" 猎怪结束 """
#log.debug('hit_end::[%s]', chapter)
now = common.current_time()
use_time = now - self.hit_end_time
self.hit_end_time = now
if use_time < TBOX_HITE_TIME:
return False, errcode.EC_VALUE, None
if not self.p_tboxs.has_key(chapter):
return False, errcode.EC_TBOX_NOCHAPTER, None
tPTbox = self.p_tboxs[chapter]
tPlace = tPTbox.data.isLives.count(MONSTER_DIE)
is_kill_first = tPTbox.data.levels[tPlace]
#当前星级高于上一次星级才保存
if aLevel and aLevel > tPTbox.data.levels[tPlace]:
old_level = tPTbox.data.levels[tPlace]
tPTbox.data.levels[tPlace] = aLevel
self.handle_horn(tPTbox, old_level, chapter)
tPTbox.data.isLives[tPlace] = MONSTER_DIE
#获取奖励
tResTBoxId = tPTbox.data.tbids[tPlace]
tResTBox = self.player._game.res_mgr.tboxs.get(tResTBoxId)
tRw = self.player._game.reward_mgr.get(tResTBox.rid)
tRsTtem = tRw.reward(params=self.player.reward_params())
#添加到待收物品
oWaitItem = self.player.wait_bag.add_waitItem(WAITBAG_TYPE_TIMEBOX, tRsTtem)
#更新时光盒数据
self.update_tbox(tPTbox)
rs = self.player.pack_msg_data(waits=[oWaitItem])
#更新玩家属性
self.p_attr[PT_CH] = chapter
self.p_attr[PT_WAITS] = [oWaitItem.data.id]
#判断是否能排到名词
rs['sub'] = False
if not is_kill_first:
rs['sub'] = True
rs['tbox'] = tPTbox.to_dict()
#log.debug('tResTBoxId::[%s]', tResTBoxId)
#通关广播
if tPlace +1 == MONSTER_NUM:
self.player.pub(MSG_TBOX_PASS, chapter)
return True, rs, tResTBoxId
def handle_horn(self, p_tbox, old_level, chapter_id):
""" 处理大喇叭广播 """
num = len(p_tbox.data.tbids)
if num != MONSTER_NUM:
return
is_levels_max = True
is_pass = True
if old_level:
is_pass = False
for level in p_tbox.data.levels:
if is_pass and not level:
is_pass = False
if is_levels_max and level != TBOX_LEVEL_MAX:
is_levels_max = False
#本章第一次通关
if is_pass:
self.player.pub(MSG_TBOX_FPASS, chapter_id)
#本章第一次五星通关广播
if is_levels_max:
self.player.pub(MSG_TBOX_MPASS, chapter_id)
def reset(self, chapter):
""" 重置 """
if not self.p_tboxs.has_key(chapter):
return False, errcode.EC_TBOX_NOCHAPTER
tPTbox = self.p_tboxs[chapter]
if not tPTbox.data.re1 and not tPTbox.data.re2:
return False, errcode.EC_TBOX_NORESET
data = {}
if tPTbox.data.re1:
tPTbox.data.re1 -= 1
else:
tCoin2 = self._get_reset_coin(tPTbox.data.re2)
tCoin2 = tCoin2 * tPTbox.data.isLives.count(MONSTER_DIE)
if not self.player.cost_coin(aCoin2 = tCoin2, log_type=COIN_TBOX_RESET):
return False, errcode.EC_COST_ERR
tPTbox.data.re2 -= 1
data = self.player.pack_msg_data(coin=True)
#更新时waitFetch光盒
tPTbox.data.isLives = len(tPTbox.data.isLives) * [MONSTER_LIVE]
self.update_tbox(tPTbox)
#清空待收物品
self.delete_wait()
data.update({'tbox':tPTbox.to_dict()})
return True, data
def delete_wait(self):
""" 清楚玩家待收物品 """
if self.p_attr[PT_CH]:
self.p_attr[PT_CH] = 0
for wid in self.p_attr[PT_WAITS]:
self.player.wait_bag.delete(wid)
self.p_attr[PT_WAITS] = []
def kill(self, aChapter):
""" 秒杀 """
if not self.p_tboxs.has_key(aChapter):
return False, errcode.EC_TBOX_NOCHAPTER
tPTbox = self.p_tboxs[aChapter]
tResLevel = self.player._game.setting_mgr.setdefault(TBOX_KILL_LEVEL, TBOX_KILL_LEVEL_V)
if self.player.data.level < tResLevel:
return False, errcode.EC_NOLEVEL
if len(tPTbox.data.tbids) != MONSTER_NUM:
return False, errcode.EC_TBOX_KILL_NOCOND
wait_ids = []
tRsTtems = []
#首先检查是否全部5星,否则不处理
for i in xrange(MONSTER_NUM):
if tPTbox.data.levels[i] != TBOX_LEVEL_MAX:
return False, errcode.EC_TBOX_KILL_NOCOND
kill_num = 0
for i in xrange(MONSTER_NUM):
if not tPTbox.data.isLives[i]:
continue
kill_num += 1
tPTbox.data.isLives[i] = MONSTER_DIE
tResTBoxId = tPTbox.data.tbids[i]
tResTBox = self.player._game.res_mgr.tboxs.get(tResTBoxId)
tRw = self.player._game.reward_mgr.get(tResTBox.rid)
tRsTtem = tRw.reward(params=self.player.reward_params())
tRsTtems.extend(tRsTtem)
self.player.pub(MSG_TBOX_MDIE, kill_num)
#添加到待收物品
oWaitItem = self.player.wait_bag.add_waitItem(WAITBAG_TYPE_TIMEBOX, tRsTtems)
if not oWaitItem:
return False, errcode.EC_TBOX_KILL_NOMONSTER
#更新玩家属性
self.p_attr[PT_CH] = aChapter
self.p_attr[PT_WAITS] = wait_ids
#更新时光盒数据
self.update_tbox(tPTbox)
rs = self.player.pack_msg_data(waits=[oWaitItem])
rs['tbox'] = tPTbox.to_dict()
return True, rs
def _get_reset_coin(self, aNum):
""" 获取元宝重置的元宝数 """
tRes = self.player._game.setting_mgr.setdefault(TBOX_COINS, TBOX_COINS_V)
if isinstance(tRes, int):
return tRes
tValues = tRes.split('|')
return int(tValues[-aNum])
def update_tbox(self, aPTbox):
""" 玩家时光盒更新 """
aPTbox.modify()
| [
"you@example.com"
] | you@example.com |
b541fe78bc55ab89ff27e9be669b95806c47396b | b3c17f6b3b1c5322a5bf8dd262d01a85e6de2849 | /web-api/customSite/ocr/custom_ocr_module/src/detection/ctpn/utils/rpn_msr/generate_anchors.py | 8ac5cee0f28ee718219da4393b4a31c54e874f8f | [] | no_license | arxrean/CelebRecognition | 6e65a76e984e54ef6a34e9b3dc44e0d19b79bcd6 | b202c6ef8bd6314b9c43a02b5afdbad64522f5ee | refs/heads/master | 2022-11-23T07:42:13.584305 | 2019-09-02T08:03:05 | 2019-09-02T08:03:05 | 204,247,279 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import numpy as np
# 生成基础anchor box
def generate_basic_anchors(sizes, base_size=16):
base_anchor = np.array([0, 0, base_size - 1, base_size - 1], np.int32)
anchors = np.zeros((len(sizes), 4), np.int32) # (10, 4)
index = 0
for h, w in sizes:
anchors[index] = scale_anchor(base_anchor, h, w)
index += 1
return anchors
# 根据base anchor和设定的anchor的高度和宽度进行设定的anchor生成
def scale_anchor(anchor, h, w):
x_ctr = (anchor[0] + anchor[2]) * 0.5
y_ctr = (anchor[1] + anchor[3]) * 0.5
scaled_anchor = anchor.copy()
scaled_anchor[0] = x_ctr - w / 2 # xmin
scaled_anchor[2] = x_ctr + w / 2 # xmax
scaled_anchor[1] = y_ctr - h / 2 # ymin
scaled_anchor[3] = y_ctr + h / 2 # ymax
return scaled_anchor
# 生成anchor box
# 此处使用的是宽度固定,高度不同的anchor设置
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6)):
heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]
widths = [16]
sizes = []
for h in heights:
for w in widths:
sizes.append((h, w)) # [(11, 16), (16, 16), (23, 16), (33, 16), ...]
return generate_basic_anchors(sizes)
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed;
embed()
| [
"602052254@qq.com"
] | 602052254@qq.com |
6d52867e4a517db70b6d40506b0c61cf314f3338 | cf025ea3bf079748472557304a290593c753b884 | /Algorithm/SWEA/시험문제/시험문제_2.py | 46481ac0b5ea15cf3b3aaba6f2a5c020c00f7f52 | [] | no_license | Silentsoul04/my_software_study | 7dbb035ceea74f42c7ce2051b2320f6cae75ed88 | c27d33c57f59fe5244a1041c11bbd826dd481546 | refs/heads/master | 2023-03-29T02:43:40.861045 | 2019-07-10T08:09:55 | 2019-07-10T08:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | import sys
sys.stdin = open('test_2.txt','r')
from itertools import combinations
testcases = int(input())
for tc in range(testcases):
N, M = list(map(int,input().split())) # N 은 행의크기, M 은 열의크기
mat = [list(map(int,input().split())) for _ in range(N)]
max_ = 0
for s_ in range(1, N):
s = s_
for g1_ in range(1, M-1):
g1 = g1_
for g2_ in range(g1+1, M):
g2 = g2_
t1, t2, t3, t4, t5, t6 = 0, 0, 0, 0, 0, 0
for i in range(N):
for j in range(M):
if i < s and j < g1: # 1구역
t1 += mat[i][j]
elif i < s and g1 <= j < g2: # 2구역
t2 += mat[i][j]
elif i < s and g2 <= j: # 3구역
t3 += mat[i][j]
elif s <= i and j < g1: # 4구역
t4 += mat[i][j]
elif s <= i and g1 <= j < g2: # 5구역
t5 += mat[i][j]
elif s <= i and g2 <= j: # 6구역
t6 += mat[i][j]
scores = list(combinations([t1, t2, t3, t4, t5, t6], 3))
for _ in range(len(scores)):
score = list(combinations(scores[_], 2))
sc = 0
for __ in score:
a,b = __
c = a - b
sc+= abs(c)
scores[_] = sc
max_ = max(max_, max(scores))
print(f'#{tc+1} {max_}') | [
"pok_winter@naver.com"
] | pok_winter@naver.com |
bbb44934c8f2e091c86000447893dbfe722bdb59 | 29d0131660ab0392861df94d4a3198f963db233c | /scripts/delcomments.py | 50a8f9a01cfd2a2dc639c3c6dd5c4a538a88a0b1 | [
"MIT"
] | permissive | kalinochkind/vkbot | be7123d82063c6c0ce108e532b2798e1bde898e4 | 306a244cb15745057fd838cd7c3163f0b6754d4b | refs/heads/master | 2020-04-04T06:24:16.429204 | 2019-05-13T16:49:31 | 2019-05-13T16:49:31 | 46,568,794 | 39 | 16 | null | 2015-11-21T19:45:13 | 2015-11-20T15:06:54 | Python | UTF-8 | Python | false | false | 839 | py | import logging
import accounts
import cppbot
import log
def isBad(bot, comm):
return bot.interact('comm ' + bot.escape(comm)) == '$blacklisted'
# noinspection PyUnusedLocal
def main(a, args):
a.timeout = 10
dm = a.delayed()
bot = cppbot.CppBot('', 0, None)
self_id = a.users.get()[0]['id']
def wall_cb(req, resp):
for post in resp['items']:
dm.wall.getComments(post_id=post['id'], count=100).walk(post_cb)
def post_cb(req, resp):
for comm in resp['items']:
if comm['from_id'] != self_id and comm.get('text') and isBad(bot, comm['text']):
dm.wall.deleteComment(comment_id=comm['id'])
log.write('_delcomments', '{}: {}'.format(comm['from_id'], comm['text']))
dm.wall.get(count=100, filter='others').walk(wall_cb)
dm.sync()
| [
"kalinochkind@gmail.com"
] | kalinochkind@gmail.com |
3db213af205e3d138da0facbb69dc6031244a12b | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-CoreData/PyObjCTest/test_nspersistentstorecoordinator.py | 61d00ec508e6fe1d4afdd4ff5bebc35403d01347 | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,326 | py | import CoreData
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSPersistentStoreCoordinator(TestCase):
def testConstants(self):
self.assertIsInstance(CoreData.NSSQLiteStoreType, str)
self.assertIsInstance(CoreData.NSXMLStoreType, str)
self.assertIsInstance(CoreData.NSBinaryStoreType, str)
self.assertIsInstance(CoreData.NSInMemoryStoreType, str)
self.assertIsInstance(CoreData.NSStoreTypeKey, str)
self.assertIsInstance(CoreData.NSStoreUUIDKey, str)
self.assertIsInstance(
CoreData.NSPersistentStoreCoordinatorStoresDidChangeNotification, str
)
self.assertIsInstance(CoreData.NSAddedPersistentStoresKey, str)
self.assertIsInstance(CoreData.NSRemovedPersistentStoresKey, str)
self.assertIsInstance(CoreData.NSUUIDChangedPersistentStoresKey, str)
self.assertIsInstance(CoreData.NSReadOnlyPersistentStoreOption, str)
self.assertIsInstance(CoreData.NSValidateXMLStoreOption, str)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(CoreData.NSPersistentStoreTimeoutOption, str)
self.assertIsInstance(CoreData.NSSQLitePragmasOption, str)
self.assertIsInstance(CoreData.NSIgnorePersistentStoreVersioningOption, str)
self.assertIsInstance(
CoreData.NSMigratePersistentStoresAutomaticallyOption, str
)
self.assertIsInstance(CoreData.NSStoreModelVersionHashesKey, str)
self.assertIsInstance(CoreData.NSStoreModelVersionIdentifiersKey, str)
self.assertIsInstance(CoreData.NSPersistentStoreOSCompatibility, str)
self.assertIsInstance(
CoreData.NSPersistentStoreCoordinatorWillRemoveStoreNotification, str
)
self.assertIsInstance(CoreData.NSSQLiteAnalyzeOption, str)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertIsInstance(CoreData.NSSQLiteManualVacuumOption, str)
self.assertIsInstance(CoreData.NSInferMappingModelAutomaticallyOption, str)
self.assertIsInstance(CoreData.NSXMLExternalRecordType, str)
self.assertIsInstance(CoreData.NSBinaryExternalRecordType, str)
self.assertIsInstance(CoreData.NSExternalRecordsFileFormatOption, str)
self.assertIsInstance(CoreData.NSExternalRecordsDirectoryOption, str)
self.assertIsInstance(CoreData.NSExternalRecordExtensionOption, str)
self.assertIsInstance(CoreData.NSEntityNameInPathKey, str)
self.assertIsInstance(CoreData.NSStoreUUIDInPathKey, str)
self.assertIsInstance(CoreData.NSStorePathKey, str)
self.assertIsInstance(CoreData.NSModelPathKey, str)
self.assertIsInstance(CoreData.NSObjectURIKey, str)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(CoreData.NSPersistentStoreUbiquitousContentNameKey, str)
self.assertIsInstance(CoreData.NSPersistentStoreUbiquitousContentURLKey, str)
self.assertIsInstance(
CoreData.NSPersistentStoreDidImportUbiquitousContentChangesNotification, str
)
@min_os_level("10.8")
def testConstants10_8(self):
self.assertIsInstance(CoreData.NSPersistentStoreForceDestroyOption, str)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(
CoreData.NSPersistentStoreUbiquitousTransitionTypeAccountAdded, 1
)
self.assertEqual(
CoreData.NSPersistentStoreUbiquitousTransitionTypeAccountRemoved, 2
)
self.assertEqual(
CoreData.NSPersistentStoreUbiquitousTransitionTypeContentRemoved, 3
)
self.assertEqual(
CoreData.NSPersistentStoreUbiquitousTransitionTypeInitialImportCompleted, 4
)
self.assertIsInstance(
CoreData.NSPersistentStoreCoordinatorStoresWillChangeNotification, str
)
self.assertIsInstance(
CoreData.NSPersistentStoreUbiquitousTransitionTypeKey, str
)
self.assertIsInstance(CoreData.NSPersistentStoreUbiquitousPeerTokenOption, str)
self.assertIsInstance(
CoreData.NSPersistentStoreRemoveUbiquitousMetadataOption, str
)
self.assertIsInstance(
CoreData.NSPersistentStoreUbiquitousContainerIdentifierKey, str
)
self.assertIsInstance(
CoreData.NSPersistentStoreRebuildFromUbiquitousContentOption, str
)
@min_os_level("10.12")
def testConstants10_12(self):
self.assertIsInstance(CoreData.NSPersistentStoreConnectionPoolMaxSizeKey, str)
@min_os_level("10.13")
def testConstants10_13(self):
self.assertIsInstance(CoreData.NSPersistentHistoryTrackingKey, str)
self.assertIsInstance(CoreData.NSBinaryStoreSecureDecodingClasses, str)
self.assertIsInstance(
CoreData.NSBinaryStoreInsecureDecodingCompatibilityOption, str
)
@min_os_level("10.14")
def testConstants10_14(self):
self.assertIsInstance(CoreData.NSPersistentStoreRemoteChangeNotification, str)
self.assertIsInstance(CoreData.NSPersistentStoreURLKey, str)
self.assertIsInstance(CoreData.NSPersistentHistoryTokenKey, str)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertIsInstance(
CoreData.NSPersistentStoreRemoteChangeNotificationPostOptionKey, str
)
def testMethods(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.addPersistentStoreWithType_configuration_URL_options_error_,
4,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.removePersistentStore_error_
)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.removePersistentStore_error_, 1
)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.migratePersistentStore_toURL_options_withType_error_,
4,
)
self.assertResultIsBOOL(CoreData.NSPersistentStoreCoordinator.tryLock)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.metadataForPersistentStoreWithURL_error_,
1,
)
@min_os_level("10.5")
def testMethods10_5(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.metadataForPersistentStoreOfType_URL_error_,
2,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.setMetadata_forPersistentStoreOfType_URL_error_
)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.setMetadata_forPersistentStoreOfType_URL_error_,
3,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.setURL_forPersistentStore_
)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.importStoreWithIdentifier_fromExternalRecordsDirectory_toURL_options_withType_error_, # noqa: B950
5,
)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.executeRequest_withContext_error_, 2
)
@min_os_level("10.9")
def testMethods10_9(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.setMetadata_forPersistentStoreOfType_URL_options_error_,
4,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.setMetadata_forPersistentStoreOfType_URL_options_error_
)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.removeUbiquitousContentAndPersistentStoreAtURL_options_error_,
2,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.removeUbiquitousContentAndPersistentStoreAtURL_options_error_
)
@min_os_level("10.10")
def testMethods10_10(self):
self.assertArgIsBlock(
CoreData.NSPersistentStoreCoordinator.performBlock_, 0, b"v"
)
self.assertArgIsBlock(
CoreData.NSPersistentStoreCoordinator.performBlockAndWait_, 0, b"v"
)
@min_os_level("10.11")
def testMethods10_11(self):
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.destroyPersistentStoreAtURL_withType_options_error_,
3,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.destroyPersistentStoreAtURL_withType_options_error_
)
self.assertArgIsOut(
CoreData.NSPersistentStoreCoordinator.replacePersistentStoreAtURL_destinationOptions_withPersistentStoreFromURL_sourceOptions_storeType_error_, # noqa: B950
5,
)
self.assertResultIsBOOL(
CoreData.NSPersistentStoreCoordinator.replacePersistentStoreAtURL_destinationOptions_withPersistentStoreFromURL_sourceOptions_storeType_error_ # noqa: B950
)
@min_os_level("10.12")
def testMethods10_12(self):
self.assertArgIsBlock(
CoreData.NSPersistentStoreCoordinator.addPersistentStoreWithDescription_completionHandler_,
1,
b"v@@",
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a24dd232f54e280a80ade479a457b4adf4a3472f | 0effd6f590f0d6d17f080e4c41660df13ffa64a7 | /commands/explain_document.py | 24083bf2e441b972d713ec47253e5897d2c125f0 | [] | no_license | vinodpanicker/sublime-elasticsearch-client | a6b2b5992979fa30d1b01cc98d962c657851ae1d | 62b8894f2dbbc776569bc4c6ab0586f7c89dd8c7 | refs/heads/master | 2021-01-13T06:21:02.945999 | 2015-09-14T04:01:33 | 2015-09-14T04:01:33 | 48,857,093 | 1 | 0 | null | 2015-12-31T17:30:18 | 2015-12-31T17:30:18 | null | UTF-8 | Python | false | false | 485 | py | from .base import BaseCommand
class ExplainDocumentCommand(BaseCommand):
command_name = "elasticsearch:explain-document"
def run_request(self, id=None):
if not id:
self.show_input_panel('Document Id: ', '', self.run)
return
options = dict(
index=self.settings.index,
doc_type=self.settings.doc_type,
body=self.get_text(),
id=id
)
return self.client.explain(**options)
| [
"kido@knowledge-works.co.jp"
] | kido@knowledge-works.co.jp |
de613a6cd9d0c48a3facb4d8e463cfccd9f9b0d4 | a9c3db07c29a46baf4f88afe555564ed0d8dbf2e | /src/0059-spiral-matrix-ii/spiral-matrix-ii.py | c7d45edc7205ebac9381b2f8b6de35e9d65e0110 | [] | no_license | HLNN/leetcode | 86d2f5b390be9edfceadd55f68d94c78bc8b7644 | 35010d67341e6038ae4ddffb4beba4a9dba05d2a | refs/heads/master | 2023-03-13T16:44:58.901326 | 2023-03-03T00:01:05 | 2023-03-03T00:01:05 | 165,402,662 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # Given a positive integer n, generate an n x n matrix filled with elements from 1 to n2 in spiral order.
#
#
# Example 1:
#
#
# Input: n = 3
# Output: [[1,2,3],[8,9,4],[7,6,5]]
#
#
# Example 2:
#
#
# Input: n = 1
# Output: [[1]]
#
#
#
# Constraints:
#
#
# 1 <= n <= 20
#
#
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
m = [[0] * n for _ in range(n)]
dr = [0, 1, 0, -1]
dc = [1, 0, -1, 0]
r = c = d = i = 0
for _ in range(n * n):
i += 1
m[r][c] = i
nr, nc = r + dr[d], c + dc[d]
if 0 <= nr < n and 0 <= nc < n and m[nr][nc] == 0:
r, c = nr, nc
else:
d = (d + 1) % 4
r, c = r + dr[d], c + dc[d]
return m
| [
"Huangln555@gmail.com"
] | Huangln555@gmail.com |
4e4eabaf2b55526f75580bb1803ec0d48f306489 | eb00755d9d0f2630ffdb21e3ab6685b2fbcb0d9e | /tests/bench/bench_scripts/bench_sum.py | 7f7d72c9637914d79320c2cdc9f9ffae2db9ff9b | [
"BSD-3-Clause"
] | permissive | mlangill/biom-format | aca45518c71b807cf30b0f548ad726880802a2b5 | 4cebfbdba8b6b64ff0d503df33634e3d52de1de0 | refs/heads/master | 2021-01-16T21:59:51.218830 | 2013-12-04T16:41:50 | 2013-12-04T16:41:50 | 9,486,201 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | #!/usr/bin/env python
from sys import argv
from gzip import open as gzip_open
from biom.parse import parse_biom_table
if __name__ == '__main__':
table = parse_biom_table(gzip_open(argv[1]))
foo = table.sum()
| [
"mcdonadt@colorado.edu"
] | mcdonadt@colorado.edu |
4f55e6834dceee351d0654d188cdf146462d96c7 | f042383cbc9f10837ebdb5b9033a0263f6a43698 | /python_modules/dagster/dagster/core/execution/execute_in_process_result.py | 501d3a953acaebf4aa2a3e49f006830814226899 | [
"Apache-2.0"
] | permissive | helloworld/dagster | 664e6636d68bafa5151418c9d4316a565717f5ee | 779e27faa3e46b7d043cb9624617e655a9ed570c | refs/heads/master | 2022-03-24T12:15:36.626783 | 2022-02-26T01:34:29 | 2022-02-26T01:34:29 | 464,019,094 | 0 | 0 | Apache-2.0 | 2022-03-05T20:23:14 | 2022-02-27T02:38:17 | null | UTF-8 | Python | false | false | 9,132 | py | from typing import Any, Dict, List, Optional, Union, cast
from dagster import check
from dagster.core.definitions import NodeDefinition, NodeHandle
from dagster.core.definitions.events import AssetMaterialization, AssetObservation, Materialization
from dagster.core.definitions.utils import DEFAULT_OUTPUT
from dagster.core.errors import DagsterError, DagsterInvariantViolationError
from dagster.core.events import (
AssetObservationData,
DagsterEvent,
DagsterEventType,
StepMaterializationData,
)
from dagster.core.execution.plan.outputs import StepOutputHandle
from dagster.core.storage.pipeline_run import DagsterRun
class ExecuteInProcessResult:
def __init__(
self,
node_def: NodeDefinition,
all_events: List[DagsterEvent],
dagster_run: DagsterRun,
output_capture: Optional[Dict[StepOutputHandle, Any]],
):
self._node_def = node_def
# If top-level result, no handle will be provided
self._handle = NodeHandle(node_def.name, parent=None)
self._event_list = all_events
self._dagster_run = dagster_run
self._output_capture = check.opt_dict_param(
output_capture, "output_capture", key_type=StepOutputHandle
)
@property
def success(self) -> bool:
"""bool: Whether execution was successful."""
return self._dagster_run.is_success
@property
def all_node_events(self) -> List[DagsterEvent]:
"""List[DagsterEvent]: All dagster events from the in-process execution."""
step_events = []
for node_name in self._node_def.ensure_graph_def().node_dict.keys():
handle = NodeHandle(node_name, None)
step_events += _filter_events_by_handle(self._event_list, handle)
return step_events
@property
def all_events(self) -> List[DagsterEvent]:
"""List[DagsterEvent]: All dagster events emitted during in-process execution."""
return self._event_list
@property
def run_id(self) -> str:
"""str: The run id for the executed run"""
return self._dagster_run.run_id
@property
def dagster_run(self) -> DagsterRun:
"""DagsterRun: the DagsterRun object for the completed execution."""
return self._dagster_run
def events_for_node(self, node_name: str) -> List[DagsterEvent]:
"""Retrieves all dagster events for a specific node.
Args:
node_name (str): The name of the node for which outputs should be retrieved.
Returns:
List[DagsterEvent]: A list of all dagster events associated with provided node name.
"""
check.str_param(node_name, "node_name")
return _filter_events_by_handle(self._event_list, NodeHandle.from_string(node_name))
def asset_materializations_for_node(
self, node_name
) -> List[Union[Materialization, AssetMaterialization]]:
return [
cast(StepMaterializationData, event.event_specific_data).materialization
for event in self.events_for_node(node_name)
if event.event_type_value == DagsterEventType.ASSET_MATERIALIZATION.value
]
def asset_observations_for_node(self, node_name) -> List[AssetObservation]:
return [
cast(AssetObservationData, event.event_specific_data).asset_observation
for event in self.events_for_node(node_name)
if event.event_type_value == DagsterEventType.ASSET_OBSERVATION.value
]
def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:
"""Retrieves output of top-level job, if an output is returned.
If the top-level job has no output, calling this method will result in a
DagsterInvariantViolationError.
Args:
output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,
the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
check.str_param(output_name, "output_name")
graph_def = self._node_def.ensure_graph_def()
if not graph_def.has_output(output_name) and len(graph_def.output_mappings) == 0:
raise DagsterInvariantViolationError(
f"Attempted to retrieve top-level outputs for '{graph_def.name}', which has no outputs."
)
elif not graph_def.has_output(output_name):
raise DagsterInvariantViolationError(
f"Could not find top-level output '{output_name}' in '{graph_def.name}'."
)
# Resolve the first layer of mapping
output_mapping = graph_def.get_output_mapping(output_name)
mapped_node = graph_def.solid_named(output_mapping.maps_from.solid_name)
origin_output_def, origin_handle = mapped_node.definition.resolve_output_to_origin(
output_mapping.maps_from.output_name,
NodeHandle(mapped_node.name, None),
)
# Get output from origin node
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def output_for_node(self, node_str: str, output_name: Optional[str] = DEFAULT_OUTPUT) -> Any:
"""Retrieves output value with a particular name from the in-process run of the job.
Args:
node_str (str): Name of the op/graph whose output should be retrieved. If the intended
graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.
output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to
`result`, the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
# resolve handle of node that node_str is referring to
target_handle = NodeHandle.from_string(node_str)
target_node_def = self._node_def.ensure_graph_def().get_solid(target_handle).definition
origin_output_def, origin_handle = target_node_def.resolve_output_to_origin(
output_name, NodeHandle.from_string(node_str)
)
# retrieve output value from resolved handle
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def get_job_success_event(self):
"""Returns a DagsterEvent with type DagsterEventType.PIPELINE_SUCCESS if it ocurred during
execution
"""
events = list(
filter(
lambda event: event.event_type == DagsterEventType.PIPELINE_SUCCESS, self.all_events
)
)
if len(events) == 0:
raise DagsterError("No event of type DagsterEventType.PIPELINE_SUCCESS found.")
return events[0]
def get_job_failure_event(self):
"""Returns a DagsterEvent with type DagsterEventType.PIPELINE_FAILURE if it ocurred during
execution
"""
events = list(
filter(
lambda event: event.event_type == DagsterEventType.PIPELINE_FAILURE, self.all_events
)
)
if len(events) == 0:
raise DagsterError("No event of type DagsterEventType.PIPELINE_FAILURE found.")
return events[0]
def _filter_events_by_handle(
event_list: List[DagsterEvent], handle: NodeHandle
) -> List[DagsterEvent]:
step_events = []
for event in event_list:
if event.is_step_event:
event_handle = cast(
NodeHandle, event.solid_handle
) # step events are guaranteed to have a node handle.
if event_handle.is_or_descends_from(handle):
step_events.append(event)
return step_events
def _filter_outputs_by_handle(
output_dict: Dict[StepOutputHandle, Any],
node_handle: NodeHandle,
output_name: str,
) -> Any:
mapped_outputs = {}
step_key = str(node_handle)
output_found = False
for step_output_handle, value in output_dict.items():
# For the mapped output case, where step keys are in the format
# "step_key[upstream_mapped_output_name]" within the step output handle.
if step_output_handle.step_key.startswith(f"{step_key}["):
output_found = True
key_start = step_output_handle.step_key.find("[")
key_end = step_output_handle.step_key.find("]")
upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]
mapped_outputs[upstream_mapped_output_name] = value
# For all other cases, search for exact match.
elif (
step_key == step_output_handle.step_key
and step_output_handle.output_name == output_name
):
output_found = True
if not step_output_handle.mapping_key:
return output_dict[step_output_handle]
mapped_outputs[step_output_handle.mapping_key] = value
if not output_found:
raise DagsterInvariantViolationError(f"No outputs found for node '{node_handle}'.")
return mapped_outputs
| [
"noreply@github.com"
] | helloworld.noreply@github.com |
1150b554ca225799561fdcf23ca5e95515d27372 | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /other/character.py | 660ebb5286f5391e2ab709d75d3d45db874ddaae | [] | no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,280 | py | import shutil
import os
import logging
import maya.cmds as mc
import xgenm as xgen
import xgenm.xgGlobal as xgg
from PySide2.QtWidgets import *
tex_template = "M:/BA/publish/assets/Character/{asset_name}/Shd/Shd/_tex"
publish_template = "M:/BA/publish/assets/Character/{asset_name}/Shd/Shd/_publish/maya/BA_{asset_name}_Shd_Shd.mb"
xgen_template = "M:/BA/publish/assets/Character/{asset_name}/Hair/Hair/_xgen/maya"
logger = logging.getLogger("Character")
class Path(object):
def __init__(self, asset_name):
self.asset_name = asset_name
@property
def tex_dir(self):
return tex_template.format(asset_name=self.asset_name)
@property
def publish_path(self):
return publish_template.format(asset_name=self.asset_name)
@property
def xgen_dir(self):
return xgen_template.format(asset_name=self.asset_name)
class Xgen(object):
def export_palette(self, palette, xgen_path):
xgen_dir = os.path.dirname(xgen_path)
if not os.path.isdir(xgen_dir):
os.makedirs(xgen_dir)
xgen.exportPalette(palette, xgen_path)
def import_palette(self, xgen_path, deltas, namespace=None):
if isinstance(deltas, basestring):
deltas = [deltas]
if not os.path.isfile(xgen_path):
logger.warning("%s is not an exist path." % xgen_path)
return
xgen.importPalette(xgen_path, deltas, namespace)
def create_delta(self, palette, delta_path):
delta_dir = os.path.dirname(delta_path)
if not os.path.isdir(delta_dir):
os.makedirs(delta_dir)
xgen.createDelta(palette, "D:/temp.xgd")
shutil.copy("D:/temp.xgd", delta_path)
os.remove("D:/temp.gxd")
def set_abs_path(self, xgen_dir):
if not xgg.Maya:
return
# palette is collection, use palettes to get collections first.
palettes = xgen.palettes()
for palette in palettes:
# Use descriptions to get description of each collection
descriptions = xgen.descriptions(palette)
for description in descriptions:
commaon_objs = xgen.objects(palette, description, True)
fx_objs = xgen.fxModules(palette, description)
objs = commaon_objs + fx_objs
# Get active objs,e.g. SplinePrimtives
for obj in objs:
attrs = xgen.allAttrs(palette, description, obj)
for attr in attrs:
value = xgen.getAttr(attr, palette, description, obj)
if "${DESC}" in value:
print palette, description, obj, attr
description_dir = os.path.join(xgen_dir, "collections", palette, description).replace("\\", "/")
new_value = value.replace("${DESC}", description_dir)
xgen.setAttr(attr, new_value, palette, description, obj)
de = xgg.DescriptionEditor
de.refresh("Full")
class Maya(object):
def __init__(self, asset_name):
self.asset_name = asset_name
self.path = Path(self.asset_name)
self.xg = Xgen()
def copy_textures(self):
file_nodes = mc.ls(type="file")
if not file_nodes:
return
tex_dir = self.path.tex_dir
if not os.path.isdir(tex_dir):
os.makedirs(tex_dir)
for file_node in file_nodes:
texture = mc.getAttr("%s.fileTextureName" % file_node)
if not os.path.isfile(texture):
print "%s is not an exist file" % texture
continue
base_name = os.path.basename(texture)
new_path = "%s/%s" % (tex_dir, base_name)
if texture != new_path:
shutil.copy(texture, new_path)
mc.setAttr("%s.fileTextureName" % file_node, new_path, type="string")
def copy_xgen_dir(self, old_xgen_dir):
xgen_dir = self.path.xgen_dir
if not os.path.isdir(xgen_dir):
os.makedirs(xgen_dir)
from distutils.dir_util import copy_tree
copy_tree(old_xgen_dir, xgen_dir)
return xgen_dir
def set_xgen_path(self, old_xgen_dir):
xgen_dir = self.copy_xgen_dir(old_xgen_dir)
self.xg.set_abs_path(xgen_dir)
def save_to_publish(self):
publish_path = self.path.publish_path
if not os.path.isdir(os.path.dirname(publish_path)):
os.makedirs(os.path.dirname(publish_path))
mc.file(rename=publish_path)
mc.file(save=1, f=1, type="mayaBinary")
def run():
# todo delete rig
# ############
asset_name, ok = QInputDialog.getText(None, "Input", "Asset Name")
if not ok:
return
palettes = xgen.palettes()
if palettes:
xgen_dir = QFileDialog.getExistingDirectory()
if not xgen_dir:
return
maya = Maya(asset_name)
maya.copy_textures()
logger.info("Copy textures done.")
if palettes:
maya.set_xgen_path(xgen_dir)
logger.info("xgen done.")
maya.save_to_publish()
logger.info("Save to publish done.")
QMessageBox(None, "Warming Tip", "Congratulations, All done.")
if __name__ == "__main__":
run()
| [
"276575758@qq.com"
] | 276575758@qq.com |
7187d46526272377f13ea5606ed70cafad22cc94 | 5095200e9ca55cd3a37af34ed44448c02e2a1bb5 | /modules/image/object_detection/yolov3_darknet53_venus/yolo_head.py | cfe796c2edebedbff19f302b24533849ee09c2e3 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleHub | 8712603ef486c45e83eb0bc5725b0b3ed3ddbbde | b402610a6f0b382a978e82473b541ea1fc6cf09a | refs/heads/develop | 2023-07-24T06:03:13.172978 | 2023-03-28T11:49:55 | 2023-03-28T11:49:55 | 162,672,577 | 12,914 | 2,239 | Apache-2.0 | 2023-07-06T21:38:19 | 2018-12-21T06:00:48 | Python | UTF-8 | Python | false | false | 8,781 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
__all__ = ['MultiClassNMS', 'YOLOv3Head']
class MultiClassNMS(object):
# __op__ = fluid.layers.multiclass_nms
def __init__(self, background_label, keep_top_k, nms_threshold, nms_top_k, normalized, score_threshold):
super(MultiClassNMS, self).__init__()
self.background_label = background_label
self.keep_top_k = keep_top_k
self.nms_threshold = nms_threshold
self.nms_top_k = nms_top_k
self.normalized = normalized
self.score_threshold = score_threshold
class YOLOv3Head(object):
"""Head block for YOLOv3 network
Args:
norm_decay (float): weight decay for normalization layer weights
num_classes (int): number of output classes
ignore_thresh (float): threshold to ignore confidence loss
label_smooth (bool): whether to use label smoothing
anchors (list): anchors
anchor_masks (list): anchor masks
nms (object): an instance of `MultiClassNMS`
"""
def __init__(self,
norm_decay=0.,
num_classes=80,
ignore_thresh=0.7,
label_smooth=True,
anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198],
[373, 326]],
anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
nms=MultiClassNMS(
background_label=-1,
keep_top_k=100,
nms_threshold=0.45,
nms_top_k=1000,
normalized=True,
score_threshold=0.01),
weight_prefix_name=''):
self.norm_decay = norm_decay
self.num_classes = num_classes
self.ignore_thresh = ignore_thresh
self.label_smooth = label_smooth
self.anchor_masks = anchor_masks
self._parse_anchors(anchors)
self.nms = nms
self.prefix_name = weight_prefix_name
def _conv_bn(self, input, ch_out, filter_size, stride, padding, act='leaky', is_test=True, name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
act=None,
param_attr=ParamAttr(name=name + ".conv.weights"),
bias_attr=False)
bn_name = name + ".bn"
bn_param_attr = ParamAttr(regularizer=L2Decay(self.norm_decay), name=bn_name + '.scale')
bn_bias_attr = ParamAttr(regularizer=L2Decay(self.norm_decay), name=bn_name + '.offset')
out = fluid.layers.batch_norm(
input=conv,
act=None,
is_test=is_test,
param_attr=bn_param_attr,
bias_attr=bn_bias_attr,
moving_mean_name=bn_name + '.mean',
moving_variance_name=bn_name + '.var')
if act == 'leaky':
out = fluid.layers.leaky_relu(x=out, alpha=0.1)
return out
def _detection_block(self, input, channel, is_test=True, name=None):
assert channel % 2 == 0, \
"channel {} cannot be divided by 2 in detection block {}" \
.format(channel, name)
conv = input
for j in range(2):
conv = self._conv_bn(
conv, channel, filter_size=1, stride=1, padding=0, is_test=is_test, name='{}.{}.0'.format(name, j))
conv = self._conv_bn(
conv, channel * 2, filter_size=3, stride=1, padding=1, is_test=is_test, name='{}.{}.1'.format(name, j))
route = self._conv_bn(
conv, channel, filter_size=1, stride=1, padding=0, is_test=is_test, name='{}.2'.format(name))
tip = self._conv_bn(
route, channel * 2, filter_size=3, stride=1, padding=1, is_test=is_test, name='{}.tip'.format(name))
return route, tip
def _upsample(self, input, scale=2, name=None):
out = fluid.layers.resize_nearest(input=input, scale=float(scale), name=name)
return out
def _parse_anchors(self, anchors):
"""
Check ANCHORS/ANCHOR_MASKS in config and parse mask_anchors
"""
self.anchors = []
self.mask_anchors = []
assert len(anchors) > 0, "ANCHORS not set."
assert len(self.anchor_masks) > 0, "ANCHOR_MASKS not set."
for anchor in anchors:
assert len(anchor) == 2, "anchor {} len should be 2".format(anchor)
self.anchors.extend(anchor)
anchor_num = len(anchors)
for masks in self.anchor_masks:
self.mask_anchors.append([])
for mask in masks:
assert mask < anchor_num, "anchor mask index overflow"
self.mask_anchors[-1].extend(anchors[mask])
def _get_outputs(self, input, is_train=True):
"""
Get YOLOv3 head output
Args:
input (list): List of Variables, output of backbone stages
is_train (bool): whether in train or test mode
Returns:
outputs (list): Variables of each output layer
"""
outputs = []
# get last out_layer_num blocks in reverse order
out_layer_num = len(self.anchor_masks)
if isinstance(input, OrderedDict):
blocks = list(input.values())[-1:-out_layer_num - 1:-1]
else:
blocks = input[-1:-out_layer_num - 1:-1]
route = None
for i, block in enumerate(blocks):
if i > 0: # perform concat in first 2 detection_block
block = fluid.layers.concat(input=[route, block], axis=1)
route, tip = self._detection_block(
block, channel=512 // (2**i), is_test=(not is_train), name=self.prefix_name + "yolo_block.{}".format(i))
# out channel number = mask_num * (5 + class_num)
num_filters = len(self.anchor_masks[i]) * (self.num_classes + 5)
block_out = fluid.layers.conv2d(
input=tip,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(name=self.prefix_name + "yolo_output.{}.conv.weights".format(i)),
bias_attr=ParamAttr(
regularizer=L2Decay(0.), name=self.prefix_name + "yolo_output.{}.conv.bias".format(i)))
outputs.append(block_out)
if i < len(blocks) - 1:
# do not perform upsample in the last detection_block
route = self._conv_bn(
input=route,
ch_out=256 // (2**i),
filter_size=1,
stride=1,
padding=0,
is_test=(not is_train),
name=self.prefix_name + "yolo_transition.{}".format(i))
# upsample
route = self._upsample(route)
return outputs, blocks
def get_prediction(self, outputs, im_size):
"""
Get prediction result of YOLOv3 network
Args:
outputs (list): list of Variables, return from _get_outputs
im_size (Variable): Variable of size([h, w]) of each image
Returns:
pred (Variable): The prediction result after non-max suppress.
"""
boxes = []
scores = []
downsample = 32
for i, output in enumerate(outputs):
box, score = fluid.layers.yolo_box(
x=output,
img_size=im_size,
anchors=self.mask_anchors[i],
class_num=self.num_classes,
conf_thresh=self.nms.score_threshold,
downsample_ratio=downsample,
name=self.prefix_name + "yolo_box" + str(i))
boxes.append(box)
scores.append(fluid.layers.transpose(score, perm=[0, 2, 1]))
downsample //= 2
yolo_boxes = fluid.layers.concat(boxes, axis=1)
yolo_scores = fluid.layers.concat(scores, axis=2)
pred = fluid.layers.multiclass_nms(
bboxes=yolo_boxes,
scores=yolo_scores,
score_threshold=self.nms.score_threshold,
nms_top_k=self.nms.nms_top_k,
keep_top_k=self.nms.keep_top_k,
nms_threshold=self.nms.nms_threshold,
background_label=self.nms.background_label,
normalized=self.nms.normalized,
name="multiclass_nms")
return pred
| [
"wuzewu@baidu.com"
] | wuzewu@baidu.com |
ef8f97c0542c2a9778f59095320d840c7079067f | fced0a1451b458b5eb1af5851b75f1da9748a187 | /.venv/bin/chardetect | 5228ca95a09547c3d4e48d55eedb4327fe5554c2 | [] | no_license | Osama-Yousef/Data-Analysis | d4e4ef5fe743803cac24c587ec9bf3f6a7fc6465 | bc1f66f45054fa05141c8ec6132f004ca9144fb6 | refs/heads/master | 2022-12-12T08:24:43.239569 | 2020-09-05T12:33:08 | 2020-09-05T12:33:08 | 291,679,861 | 3 | 0 | null | 2020-09-05T12:33:09 | 2020-08-31T10:04:10 | Python | UTF-8 | Python | false | false | 243 | #!/home/osama/vg-stats/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"osamawalidyousef@gmail.com"
] | osamawalidyousef@gmail.com | |
f91de1717dfd7a91f697029607b32f03528f3f2f | c25fa1b8dd48b6dc4302f626f797d3411d1a991d | /bacon.py | 1241c3cbecb8e47a2a61a1329ee2a36a132c67c1 | [] | no_license | jasminh925/bacon | 8a3c542828f93814b7212cb734e043336e53dbad | b3303ca1f82a27282c8109f79d093fa474cf033f | refs/heads/master | 2020-05-30T23:23:17.420030 | 2016-08-03T04:21:07 | 2016-08-03T04:21:07 | 64,814,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | def bacon():
pork = raw_input("Should you eat that bacon?")
angels = raw_input("Do you want to feel like angels are frolicking on your taste buds?").lower()
if angels == "yes":
return "Eat it!"
elif angels == "no":
print "You've clearly never tasted bacon."
return "Eat it"
else:
coward = raw_input("I see you are afraid bacon might kill you. Are you a coward?").lower()
if coward == "yes":
return "You are a coward. Bacon will turn you into a true warrior."
else:
return "Then eat it!"
def main():
print bacon()
if __name__ == '__main__':
main() | [
"you@example.com"
] | you@example.com |
2117b97c99737ddfd126db5b1411d0278a6cc213 | c69e2eb04c5bff10dd1ec214e6dbe3917a156cef | /test-pandas.py | bb34c9838161b66dd3dba21b7fab57e056ad5d9f | [] | no_license | Jizishuo/daily_code | c865bb1df063dd2cfd9c0e8ab9f64bc9f33ef5c2 | cc9b7423bfc7e4a990c3554d91613ee9144ed9e2 | refs/heads/master | 2021-08-10T20:04:42.541355 | 2020-04-11T05:59:26 | 2020-04-11T05:59:26 | 154,241,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import pandas as pd
#VIDEO_INFO_PATH = 'F:\\test-data-ex\\0729(0-12).csv'
VIDEO_INFO_PATH = 'F:\\0729-history-alarm.csv'
# Series & DataFrame是Pandas中最常用的两个对象
# Series
if __name__ == '__main__':
#video_info = pd.read_csv(VIDEO_INFO_PATH, encoding='ISO-8859-1')
video_info = pd.read_csv(VIDEO_INFO_PATH, low_memory=False)
# shape 可以得到行数和列数
print(video_info.shape)
print(video_info.head(n=5))
'''
# index保存行索引,columns保存列索引
print(video_info.columns)
print(video_info.columns.name)
# 行索引是一个表示多级索引的MultiIndex对象,每级的索引名可以通过names属性存取
print(video_info.index)
print(video_info.index.names)
# DataFrame对象有两个轴,第0轴为纵轴,第一轴为横轴
# []运算符可以通过索引标签获取指定的列,当下标是单个标签时,所得到的是Series对象
# 而当下标是列表时,则得到一个DataFrame对象
video_id = video_info['VideoID']
video_object = video_info[['VideoID', 'Start', 'End']]
# 进行去重操作
video_object = video_object.drop_duplicates()
print(video_object)
print(video_object.values)
# video_test = video_info[video_info['VideoID'].unique()]
# .loc[]可通过索引标签获取指定的行,或指定行的某个元素
# 因为这里没有行索引,所以这里报错video_one = video_info.loc['mv89psg6zh4']
s = pd.Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])
print(u" index", s.index)
print(u" values", s.values)
print(s[1:3])
print(s['b':'d'])
''' | [
"948369894@qq.com"
] | 948369894@qq.com |
9df72bb5dd9d18de36606776a1ca5a76d0e7c0b1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_076/ch141_2020_04_01_20_36_34_858929.py | 9a70d3a247b9894c9efe74e20b17d5431601ec88 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | import random
dinheiros = 1000
a=input("Jogo iniciado. Você quer apostar?")
while a != "não":
dinheiros -= 30
dado1 = random.randint(1,6)
dado2 = random.randint(1,6)
b= input("Os dados foram lançados. Chute o valor da soma por 30 dinheiros.")
if b == dado1 + dado2:
dinheiros += 50
print ("Você ganhou 20 dinheiros!")
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
a = input("Jogo iniciado. Você quer apostar?")
else:
c = input ("Você quer continuar apostando ou desistir? Se quiser apostar, tente novamente o valor da soma por 20 dinheiros.")
if c == "continuar":
dinheiros -= 20
if c == dado1 + dado2:
dinheiros +=50
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
else:
d = ("Um dos dados apontou {}. Você deseja continuar tentando ou desistir?".format(dado1))
if d=="continuar":
e = input("Então tente novamente. Isso lhe custara mais 10 dinheiros. ")
dinheiros -=10
if e== dado1+dado2:
dinheiros +=50
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
a = input("Jogo iniciado. Você quer apostar?")
a = input("Jogo iniciado. Você quer apostar?")
print('Você terminou a partida com {0} dinheiros'.format(dinheiros)) | [
"you@example.com"
] | you@example.com |
42684450fa04e067ffbbf06157502666d0a88556 | ea6267b0a3508fd262acdefa51e5ad0f8f2a0563 | /src/commcare_cloud/environment/constants.py | e5126c33b55b4d8e78fb02d8aa392eaa39ae8c28 | [
"BSD-3-Clause"
] | permissive | rohit-dimagi/commcare-cloud | a606bc269f36be594d38ba6ff516411d63f37aad | 55576713f3a12acc3f2df4f24c405df9c30143b3 | refs/heads/master | 2020-06-15T16:08:46.350848 | 2019-07-17T07:34:41 | 2019-07-17T12:05:13 | 195,337,852 | 0 | 0 | BSD-3-Clause | 2019-07-05T04:17:48 | 2019-07-05T04:17:48 | null | UTF-8 | Python | false | false | 479 | py | import jsonobject
class _Constants(jsonobject.JsonObject):
commcarehq_main_db_name = jsonobject.StringProperty(default='commcarehq')
formplayer_db_name = jsonobject.StringProperty(default='formplayer')
ucr_db_name = jsonobject.StringProperty(default='commcarehq_ucr')
synclogs_db_name = jsonobject.StringProperty(default='commcarehq_synclogs')
form_processing_proxy_db_name = jsonobject.StringProperty(default='commcarehq_proxy')
constants = _Constants()
| [
"droberts@dimagi.com"
] | droberts@dimagi.com |
31c86fbea013718e4e447491328b433b47f69512 | 9bd861d47402c81f9cc608dc414c9827baa88dd5 | /_estudoPython_solid/string_lista.py | 74ef590ab5c66d20b8d082b79f770e764908f505 | [
"Apache-2.0"
] | permissive | c4rl0sFr3it4s/Python_Analise_De_Dados | 8d4cf637573a5610489a2bd44e2a34c749456e73 | 74a72772179f45684f4f12acd4ad607c99ed8107 | refs/heads/master | 2023-02-02T21:13:16.150017 | 2020-08-03T17:52:19 | 2020-08-03T17:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | '''
string grupo de caracteres, trata a frase como uma LISTA
de caracteres.
para imprimir a primeira letra frase[0]
lista é assim 1 caractere 0, 2 caractere 1, 3 caractere 2 etc
[0,1,2,3,4,5,6,7,8,9,10] -> indice
O,i, ,t,u,d,o, ,b,e,m
LISTA é uma estrutura de dados coleção que pode guardar varios tipos de dados dentro
separado por virgula, para o python é mesma coisa que uma string cada letra é um nome.
['João', 'Maria', 'Carlos', 'Francine']
0, 1, 2, 3 -> indice
impresso
['João', 'Maria', 'Carlos', 'Francine', 10, 10.2]
'''
#string
frase = 'Oi, tudo bem?'
#operacoes com string
frase_separada = frase.split(',')
#lista
lista_nomes = ['João', 'Maria', 'Carlos', 'Francine', 10, 10.2]
#operações com LISTAS
print('Lista -> \'', lista_nomes[0:2], '\'')
lista_nomes.append('Geralda')
lista_nomes.append('Lorena')
lista_nomes.remove('Geralda')
#lista_nomes.clear()
lista_nomes.insert(5, 'Creuza')
lista_nomes[0] = 'Robervania'
contador_carlos = lista_nomes.count('Maria')
#saida lista
print(lista_nomes)
print('Contando João \"', contador_carlos, '\"')
print('Tamanho da Coleção \"', len(lista_nomes), '\"')
print('Função de pilha Pop\"', lista_nomes.pop(), '\"')
print(lista_nomes)
'''
lista_nomes, imprime tudo
lista_nomes[0:2], imprime do indice 0 ate 1, não incluiso o 2
lista_nome[-1], de trás para frente
append adiciona no ultimo lugar da lista
remove remove da lista
clear limpa toda a lista
insert insere no indice que escolher
lista_nome[0] = 'Robervania', adicionando no indice escolhido
count('Maria') contagem quantas vezes contém na Coleção
len(lista_nomes) traz o tamanho da coleção
pop(), funcao de pilha o primeiro que entra é o ultimo que sai, e retira da lista
LISTA é mutável, e ordenada do jeito que é inserido
'''
#saida com string
print('Frase -> \'', frase[0:5:1], '\'')
print('Frase em caixa baixa -> \"', frase.lower(), '\"')
print('Frase separada com split() \"', frase_separada, '\"')
print('Acessando o indice da coleção do split() \"', frase_separada[0], '\"')
'''
pegando do indice 0 ate o 5 frase[0:5], dando um step passos frase[0:5:1], 1 e de quantos em quantos vai pular
passando somente o passo ele retorna ao contrario frase[::-1]
[inicio:dividir:passo]
lower(), traz tudo para caixa baixa
split(), transforma a frase em uma lista, indicando aode você quer
'''
| [
"ti.carlosfreitas@outlook.com"
] | ti.carlosfreitas@outlook.com |
42e71d3412b993c5e8075303e52189bb54e275a6 | 26cadb387da6dc71f5536b9d74ad44b7b974d26d | /launch/test/launch/actions/test_include_launch_description.py | 469e71475c914e2d65be6fed1f360e96f554be9c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ros2/launch | 84971e86f6131976bdfaf872fca12f1a6a377cd6 | f2b232555900d62c3cec839a49afd4cdc01cda58 | refs/heads/rolling | 2023-08-24T14:33:18.237122 | 2023-08-23T17:12:30 | 2023-08-23T17:12:30 | 32,485,326 | 116 | 139 | Apache-2.0 | 2023-09-14T12:07:30 | 2015-03-18T21:27:58 | Python | UTF-8 | Python | false | false | 10,122 | py | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the IncludeLaunchDescription action class."""
import os
from launch import LaunchContext
from launch import LaunchDescription
from launch import LaunchDescriptionSource
from launch import LaunchService
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.actions import ResetLaunchConfigurations
from launch.actions import SetLaunchConfiguration
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.utilities import perform_substitutions
import pytest
def test_include_launch_description_constructors():
"""Test the constructors for IncludeLaunchDescription class."""
IncludeLaunchDescription(LaunchDescriptionSource(LaunchDescription()))
IncludeLaunchDescription(
LaunchDescriptionSource(LaunchDescription()),
launch_arguments={'foo': 'FOO'}.items())
def test_include_launch_description_methods():
"""Test the methods of the IncludeLaunchDescription class."""
ld = LaunchDescription()
action = IncludeLaunchDescription(LaunchDescriptionSource(ld))
assert 'IncludeLaunchDescription' in action.describe()
assert isinstance(action.describe_sub_entities(), list)
assert isinstance(action.describe_conditional_sub_entities(), list)
# Result should only contain the launch description as there are no launch arguments.
assert action.visit(LaunchContext()) == [ld]
assert action.get_asyncio_future() is None
assert len(action.launch_arguments) == 0
ld2 = LaunchDescription([action])
action2 = IncludeLaunchDescription(LaunchDescriptionSource(ld2))
assert 'IncludeLaunchDescription' in action2.describe()
assert isinstance(action2.describe_sub_entities(), list)
assert isinstance(action2.describe_conditional_sub_entities(), list)
# Result should only contain the launch description as there are no launch arguments.
assert action2.visit(LaunchContext()) == [ld2]
assert action2.get_asyncio_future() is None
assert len(action2.launch_arguments) == 0
def test_include_launch_description_launch_file_location():
"""Test the ability of the IncludeLaunchDescription class to set the launch file location."""
ld = LaunchDescription()
action = IncludeLaunchDescription(LaunchDescriptionSource(ld, '<script>'))
assert 'IncludeLaunchDescription' in action.describe()
assert isinstance(action.describe_sub_entities(), list)
assert isinstance(action.describe_conditional_sub_entities(), list)
lc1 = LaunchContext()
# Result should only contain the launch description as there are no launch arguments.
assert action.visit(lc1) == [ld]
assert lc1.locals.current_launch_file_directory == '<script>'
assert action.get_asyncio_future() is None
this_file = os.path.abspath(__file__)
ld2 = LaunchDescription()
action2 = IncludeLaunchDescription(LaunchDescriptionSource(ld2, this_file))
assert 'IncludeLaunchDescription' in action2.describe()
assert isinstance(action2.describe_sub_entities(), list)
assert isinstance(action2.describe_conditional_sub_entities(), list)
lc2 = LaunchContext()
# Result should only contain the launch description as there are no launch arguments.
assert action2.visit(lc2) == [ld2]
assert lc2.locals.current_launch_file_directory == os.path.dirname(this_file)
assert action2.get_asyncio_future() is None
def test_include_launch_description_launch_arguments():
"""Test the interactions between declared launch arguments and IncludeLaunchDescription."""
# test that arguments are set when given, even if they are not declared
ld1 = LaunchDescription([])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1),
launch_arguments={'foo': 'FOO'}.items(),
)
assert len(action1.launch_arguments) == 1
lc1 = LaunchContext()
result1 = action1.visit(lc1)
assert len(result1) == 2
assert isinstance(result1[0], SetLaunchConfiguration)
assert perform_substitutions(lc1, result1[0].name) == 'foo'
assert perform_substitutions(lc1, result1[0].value) == 'FOO'
assert result1[1] == ld1
# test that a declared argument that is not provided raises an error
ld2 = LaunchDescription([DeclareLaunchArgument('foo')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2)
)
lc2 = LaunchContext()
with pytest.raises(RuntimeError) as excinfo2:
action2.visit(lc2)
assert 'Included launch description missing required argument' in str(excinfo2.value)
# test that a declared argument that is not provided raises an error, but with other args set
ld2 = LaunchDescription([DeclareLaunchArgument('foo')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'not_foo': 'NOT_FOO'}.items(),
)
lc2 = LaunchContext()
with pytest.raises(RuntimeError) as excinfo2:
action2.visit(lc2)
assert 'Included launch description missing required argument' in str(excinfo2.value)
assert 'not_foo' in str(excinfo2.value)
# test that a declared argument with a default value that is not provided does not raise
ld2 = LaunchDescription([DeclareLaunchArgument('foo', default_value='FOO')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2)
)
lc2 = LaunchContext()
action2.visit(lc2)
# Test that default arguments in nested IncludeLaunchDescription actions do not raise
ld1 = LaunchDescription([DeclareLaunchArgument('foo', default_value='FOO')])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1),
)
ld2 = LaunchDescription([action1, DeclareLaunchArgument('foo2')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'foo2': 'FOO2'}.items(),
)
lc2 = LaunchContext()
action2.visit(lc2)
# Test that provided launch arguments of nested IncludeLaunchDescription actions do not raise
ld1 = LaunchDescription([DeclareLaunchArgument('foo')])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1), launch_arguments={'foo': 'FOO'}.items(),
)
ld2 = LaunchDescription([action1, DeclareLaunchArgument('foo2')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'foo2': 'FOO2'}.items(),
)
lc2 = LaunchContext()
action2.visit(lc2)
# Test that arguments can not be passed from the parent launch description
ld1 = LaunchDescription([DeclareLaunchArgument('foo')])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1)
)
ld2 = LaunchDescription([action1, DeclareLaunchArgument('foo2')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'foo': 'FOO', 'foo2': 'FOO2'}.items(),
)
ld3 = LaunchDescription([action2])
ls = LaunchService()
ls.include_launch_description(ld3)
assert 1 == ls.run()
# Test that arguments can be redeclared in the parent launch description
ld1 = LaunchDescription([DeclareLaunchArgument('foo')])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1)
)
ld2 = LaunchDescription([action1, DeclareLaunchArgument('foo'), DeclareLaunchArgument('foo2')])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'foo': 'FOO', 'foo2': 'FOO2'}.items(),
)
lc2 = LaunchContext()
action2.visit(lc2)
# Test that arguments after a ResetLaunchConfigurations action are not checked
ld1 = LaunchDescription([DeclareLaunchArgument('foo')])
action1 = IncludeLaunchDescription(
LaunchDescriptionSource(ld1)
)
ld2 = LaunchDescription(
[
DeclareLaunchArgument('foo2'),
ResetLaunchConfigurations(),
SetLaunchConfiguration('foo', 'asd'),
action1])
action2 = IncludeLaunchDescription(
LaunchDescriptionSource(ld2),
launch_arguments={'foo2': 'FOO2'}.items(),
)
lc2 = LaunchContext()
action2.visit(lc2)
def test_include_python():
"""Test including Python, with and without explicit PythonLaunchDescriptionSource."""
this_dir = os.path.dirname(os.path.abspath(__file__))
simple_launch_file_path = os.path.join(this_dir,
'..',
'launch_description_source',
'simple.launch.py')
# Explicitly construct with PythonLaunchDescriptionSource
plds = PythonLaunchDescriptionSource(simple_launch_file_path)
action0 = IncludeLaunchDescription(plds)
# Construct action with path instead of PythonLaunchDescriptionSource object
action1 = IncludeLaunchDescription(simple_launch_file_path)
# The two actions should be equivalent
for action in [action0, action1]:
assert 'IncludeLaunchDescription' in action.describe()
assert isinstance(action.describe_sub_entities(), list)
assert isinstance(action.describe_conditional_sub_entities(), list)
# Result should only contain a single launch description as there are no launch arguments.
assert len(action.visit(LaunchContext())) == 1
assert action.get_asyncio_future() is None
assert len(action.launch_arguments) == 0
assert action.launch_description_source.location == simple_launch_file_path
| [
"noreply@github.com"
] | ros2.noreply@github.com |
52141d7e15eb082e5473257ada7c24392d5df779 | e85e846960750dd498431ac8412d9967646ff98d | /cms/migrations/0024_auto_20170702_0605.py | 66f85c2b82850a5caef978b4485dd70afb174bf6 | [] | no_license | onosaburo/clublink_django | 19368b4a59b3aed3632883ceffe3326bfc7a61a6 | d2f6024b6224ea7f47595481b3382b8d0670584f | refs/heads/master | 2022-03-30T05:30:12.288354 | 2020-01-27T18:09:11 | 2020-01-27T18:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-07-02 10:05
from __future__ import unicode_literals
from django.db import migrations, models
def generate_full_path_recursive(page):
page.full_path = page.parent.full_path if page.parent else ''
if page.slug:
page.full_path += '{}/'.format(page.slug)
page.save()
for child in page.children.all():
generate_full_path_recursive(child)
def populate_full_path(apps, schema_editor):
ClubPage = apps.get_model('cms', 'ClubPage')
CorpPage = apps.get_model('cms', 'CorpPage')
for p in ClubPage.objects.filter(parent=None):
generate_full_path_recursive(p)
for p in CorpPage.objects.filter(parent=None):
generate_full_path_recursive(p)
class Migration(migrations.Migration):
dependencies = [
('cms', '0023_auto_20170702_0309'),
]
operations = [
migrations.AddField(
model_name='clubpage',
name='full_path',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='corppage',
name='full_path',
field=models.CharField(max_length=255, null=True),
),
migrations.RunPython(populate_full_path, lambda x, y: None),
migrations.AlterField(
model_name='clubpage',
name='full_path',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='corppage',
name='full_path',
field=models.CharField(max_length=255),
),
]
| [
"bestwork888@outlook.com"
] | bestwork888@outlook.com |
a9f88520d671011631489d87cf5c6144898c4ccd | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/googlecloudsdk/core/console/console_attr.py | 0fbf7194add57d89609f648025dde6094ed3181d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 22,745 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A module for console attributes, special characters and functions.
The target architectures {linux, macos, windows} support inline encoding for
all attributes except color. Windows requires win32 calls to manipulate the
console color state.
Usage:
# Get the console attribute state.
out = log.out
con = console_attr.GetConsoleAttr(out=out)
# Get the ISO 8879:1986//ENTITIES Box and Line Drawing characters.
box = con.GetBoxLineCharacters()
# Print an X inside a box.
out.write(box.dr)
out.write(box.h)
out.write(box.dl)
out.write('\n')
out.write(box.v)
out.write('X')
out.write(box.v)
out.write('\n')
out.write(box.ur)
out.write(box.h)
out.write(box.ul)
out.write('\n')
# Print the bullet characters.
for c in con.GetBullets():
out.write(c)
out.write('\n')
# Print FAIL in red.
out.write('Epic ')
con.Colorize('FAIL', 'red')
out.write(', my first.')
# Print italic and bold text.
bold = con.GetFontCode(bold=True)
italic = con.GetFontCode(italic=True)
normal = con.GetFontCode()
out.write('This is {bold}bold{normal}, this is {italic}italic{normal},'
' and this is normal.\n'.format(bold=bold, italic=italic,
normal=normal))
# Read one character from stdin with echo disabled.
c = con.GetRawKey()
if c is None:
print 'EOF\n'
# Return the display width of a string that may contain FontCode() chars.
display_width = con.DisplayWidth(string)
# Reset the memoized state.
con = console_attr.ResetConsoleAttr()
# Print the console width and height in characters.
width, height = con.GetTermSize()
print 'width={width}, height={height}'.format(width=width, height=height)
# Colorize table data cells.
fail = console_attr.Colorizer('FAIL', 'red')
pass = console_attr.Colorizer('PASS', 'green')
cells = ['label', fail, 'more text', pass, 'end']
for cell in cells;
if isinstance(cell, console_attr.Colorizer):
cell.Render()
else:
out.write(cell)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import unicodedata
from googlecloudsdk.core.console import console_attr_os
from googlecloudsdk.core.util import encoding as encoding_util
import six
class BoxLineCharacters(object):
"""Box/line drawing characters.
The element names are from ISO 8879:1986//ENTITIES Box and Line Drawing//EN:
http://www.w3.org/2003/entities/iso8879doc/isobox.html
"""
class BoxLineCharactersUnicode(BoxLineCharacters):
"""unicode Box/line drawing characters (cp437 compatible unicode)."""
dl = '┐'
dr = '┌'
h = '─'
hd = '┬'
hu = '┴'
ul = '┘'
ur = '└'
v = '│'
vh = '┼'
vl = '┤'
vr = '├'
d_dl = '╗'
d_dr = '╔'
d_h = '═'
d_hd = '╦'
d_hu = '╩'
d_ul = '╝'
d_ur = '╚'
d_v = '║'
d_vh = '╬'
d_vl = '╣'
d_vr = '╠'
class BoxLineCharactersAscii(BoxLineCharacters):
"""ASCII Box/line drawing characters."""
dl = '+'
dr = '+'
h = '-'
hd = '+'
hu = '+'
ul = '+'
ur = '+'
v = '|'
vh = '+'
vl = '+'
vr = '+'
d_dl = '#'
d_dr = '#'
d_h = '='
d_hd = '#'
d_hu = '#'
d_ul = '#'
d_ur = '#'
d_v = '#'
d_vh = '#'
d_vl = '#'
d_vr = '#'
class ProgressTrackerSymbols(object):
"""Characters used by progress trackers."""
class ProgressTrackerSymbolsUnicode(ProgressTrackerSymbols):
"""Characters used by progress trackers."""
@property
def spin_marks(self):
return ['⠏', '⠛', '⠹', '⠼', '⠶', '⠧']
success = '✓'
failed = 'X'
interrupted = '-'
not_started = '.'
prefix_length = 2
class ProgressTrackerSymbolsAscii(ProgressTrackerSymbols):
"""Characters used by progress trackers."""
@property
def spin_marks(self):
return ['|', '/', '-', '\\',]
success = 'OK'
failed = 'X'
interrupted = '-'
not_started = '.'
prefix_length = 3
class ConsoleAttr(object):
"""Console attribute and special drawing characters and functions accessor.
Use GetConsoleAttr() to get a global ConsoleAttr object shared by all callers.
Use ConsoleAttr() for abstracting multiple consoles.
If _out is not associated with a console, or if the console properties cannot
be determined, the default behavior is ASCII art with no attributes.
Attributes:
_ANSI_COLOR: The ANSI color control sequence dict.
_ANSI_COLOR_RESET: The ANSI color reset control sequence string.
_csi: The ANSI Control Sequence indicator string, '' if not supported.
_encoding: The character encoding.
ascii: ASCII art. This is the default.
utf8: UTF-8 unicode.
win: Windows code page 437.
_font_bold: The ANSI bold font embellishment code string.
_font_italic: The ANSI italic font embellishment code string.
_get_raw_key: A function that reads one keypress from stdin with no echo.
_out: The console output file stream.
_term: TERM environment variable value.
_term_size: The terminal (x, y) dimensions in characters.
"""
_CONSOLE_ATTR_STATE = None
_ANSI_COLOR = {
'red': '31;1m',
'yellow': '33;1m',
'green': '32m',
'blue': '34;1m'
}
_ANSI_COLOR_RESET = '39;0m'
_BULLETS_UNICODE = ('▪', '◆', '▸', '▫', '◇', '▹')
_BULLETS_WINDOWS = ('■', '≡', '∞', 'Φ', '·') # cp437 compatible unicode
_BULLETS_ASCII = ('o', '*', '+', '-')
def __init__(self, encoding=None):
"""Constructor.
Args:
encoding: Encoding override.
ascii -- ASCII art. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
"""
# Normalize the encoding name.
if not encoding:
encoding = self._GetConsoleEncoding()
elif encoding == 'win':
encoding = 'cp437'
self._encoding = encoding or 'ascii'
self._term = os.getenv('TERM', '').lower()
# ANSI "standard" attributes.
if self.SupportsAnsi():
# Select Graphic Rendition paramaters from
# http://en.wikipedia.org/wiki/ANSI_escape_code#graphics
# Italic '3' would be nice here but its not widely supported.
self._csi = '\x1b['
self._font_bold = '1'
self._font_italic = '4'
else:
self._csi = None
self._font_bold = ''
self._font_italic = ''
# Encoded character attributes.
if self._encoding == 'utf8':
self._box_line_characters = BoxLineCharactersUnicode()
self._bullets = self._BULLETS_UNICODE
self._progress_tracker_symbols = ProgressTrackerSymbolsUnicode()
elif self._encoding == 'cp437':
self._box_line_characters = BoxLineCharactersUnicode()
self._bullets = self._BULLETS_WINDOWS
# Windows does not suport the unicode characters used for the spinner.
self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()
else:
self._box_line_characters = BoxLineCharactersAscii()
self._bullets = self._BULLETS_ASCII
self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()
# OS specific attributes.
self._get_raw_key = [console_attr_os.GetRawKeyFunction()]
self._term_size = console_attr_os.GetTermSize()
self._display_width_cache = {}
def _GetConsoleEncoding(self):
"""Gets the encoding as declared by the stdout stream.
Returns:
str, The encoding name or None if it could not be determined.
"""
console_encoding = getattr(sys.stdout, 'encoding', None)
if not console_encoding:
return None
console_encoding = console_encoding.lower()
if 'utf-8' in console_encoding:
return 'utf8'
elif 'cp437' in console_encoding:
return 'cp437'
return None
def Colorize(self, string, color, justify=None):
"""Generates a colorized string, optionally justified.
Args:
string: The string to write.
color: The color name -- must be in _ANSI_COLOR.
justify: The justification function, no justification if None. For
example, justify=lambda s: s.center(10)
Returns:
str, The colorized string that can be printed to the console.
"""
if justify:
string = justify(string)
if self._csi and color in self._ANSI_COLOR:
return '{csi}{color_code}{string}{csi}{reset_code}'.format(
csi=self._csi,
color_code=self._ANSI_COLOR[color],
reset_code=self._ANSI_COLOR_RESET,
string=string)
# TODO(b/35939231): Add elif self._encoding == 'cp437': code here.
return string
def ConvertOutputToUnicode(self, buf):
"""Converts a console output string buf to unicode.
Mainly used for testing. Allows test comparisons in unicode while ensuring
that unicode => encoding => unicode works.
Args:
buf: The console output string to convert.
Returns:
The console output string buf converted to unicode.
"""
if isinstance(buf, six.text_type):
buf = buf.encode(self._encoding)
return six.text_type(buf, self._encoding, 'replace')
def GetBoxLineCharacters(self):
"""Returns the box/line drawing characters object.
The element names are from ISO 8879:1986//ENTITIES Box and Line Drawing//EN:
http://www.w3.org/2003/entities/iso8879doc/isobox.html
Returns:
A BoxLineCharacters object for the console output device.
"""
return self._box_line_characters
def GetBullets(self):
"""Returns the bullet characters list.
Use the list elements in order for best appearance in nested bullet lists,
wrapping back to the first element for deep nesting. The list size depends
on the console implementation.
Returns:
A tuple of bullet characters.
"""
return self._bullets
def GetProgressTrackerSymbols(self):
"""Returns the progress tracker characters object.
Returns:
A ProgressTrackerSymbols object for the console output device.
"""
return self._progress_tracker_symbols
def GetControlSequenceIndicator(self):
"""Returns the control sequence indicator string.
Returns:
The conrol sequence indicator string or None if control sequences are not
supported.
"""
return self._csi
def GetControlSequenceLen(self, buf):
"""Returns the control sequence length at the beginning of buf.
Used in display width computations. Control sequences have display width 0.
Args:
buf: The string to check for a control sequence.
Returns:
The conrol sequence length at the beginning of buf or 0 if buf does not
start with a control sequence.
"""
if not self._csi or not buf.startswith(self._csi):
return 0
n = 0
for c in buf:
n += 1
if c.isalpha():
break
return n
def GetEncoding(self):
"""Returns the current encoding."""
return self._encoding
def GetFontCode(self, bold=False, italic=False):
"""Returns a font code string for 0 or more embellishments.
GetFontCode() with no args returns the default font code string.
Args:
bold: True for bold embellishment.
italic: True for italic embellishment.
Returns:
The font code string for the requested embellishments. Write this string
to the console output to control the font settings.
"""
if not self._csi:
return ''
codes = []
if bold:
codes.append(self._font_bold)
if italic:
codes.append(self._font_italic)
return '{csi}{codes}m'.format(csi=self._csi, codes=';'.join(codes))
def GetRawKey(self):
"""Reads one key press from stdin with no echo.
Returns:
The key name, None for EOF, <KEY-*> for function keys, otherwise a
character.
"""
return self._get_raw_key[0]()
def GetTermSize(self):
"""Returns the terminal (x, y) dimensions in characters.
Returns:
(x, y): A tuple of the terminal x and y dimensions.
"""
return self._term_size
def DisplayWidth(self, buf):
"""Returns the display width of buf, handling unicode and ANSI controls.
Args:
buf: The string to count from.
Returns:
The display width of buf, handling unicode and ANSI controls.
"""
if not isinstance(buf, six.string_types):
# Handle non-string objects like Colorizer().
return len(buf)
cached = self._display_width_cache.get(buf, None)
if cached is not None:
return cached
width = 0
max_width = 0
i = 0
while i < len(buf):
if self._csi and buf[i:].startswith(self._csi):
i += self.GetControlSequenceLen(buf[i:])
elif buf[i] == '\n':
# A newline incidates the start of a new line.
# Newline characters have 0 width.
max_width = max(width, max_width)
width = 0
i += 1
else:
width += GetCharacterDisplayWidth(buf[i])
i += 1
max_width = max(width, max_width)
self._display_width_cache[buf] = max_width
return max_width
def SplitIntoNormalAndControl(self, buf):
"""Returns a list of (normal_string, control_sequence) tuples from buf.
Args:
buf: The input string containing one or more control sequences
interspersed with normal strings.
Returns:
A list of (normal_string, control_sequence) tuples.
"""
if not self._csi or not buf:
return [(buf, '')]
seq = []
i = 0
while i < len(buf):
c = buf.find(self._csi, i)
if c < 0:
seq.append((buf[i:], ''))
break
normal = buf[i:c]
i = c + self.GetControlSequenceLen(buf[c:])
seq.append((normal, buf[c:i]))
return seq
def SplitLine(self, line, width):
"""Splits line into width length chunks.
Args:
line: The line to split.
width: The width of each chunk except the last which could be smaller than
width.
Returns:
A list of chunks, all but the last with display width == width.
"""
lines = []
chunk = ''
w = 0
keep = False
for normal, control in self.SplitIntoNormalAndControl(line):
keep = True
while True:
n = width - w
w += len(normal)
if w <= width:
break
lines.append(chunk + normal[:n])
chunk = ''
keep = False
w = 0
normal = normal[n:]
chunk += normal + control
if chunk or keep:
lines.append(chunk)
return lines
def SupportsAnsi(self):
return (self._encoding != 'ascii' and
('screen' in self._term or 'xterm' in self._term))
class Colorizer(object):
"""Resource string colorizer.
Attributes:
_con: ConsoleAttr object.
_color: Color name.
_string: The string to colorize.
_justify: The justification function, no justification if None. For example,
justify=lambda s: s.center(10)
"""
def __init__(self, string, color, justify=None):
"""Constructor.
Args:
string: The string to colorize.
color: Color name used to index ConsoleAttr._ANSI_COLOR.
justify: The justification function, no justification if None. For
example, justify=lambda s: s.center(10)
"""
self._con = GetConsoleAttr()
self._color = color
self._string = string
self._justify = justify
def __eq__(self, other):
return self._string == six.text_type(other)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._string > six.text_type(other)
def __lt__(self, other):
return self._string < six.text_type(other)
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not self > other
def __len__(self):
return self._con.DisplayWidth(self._string)
def __str__(self):
return self._string
def Render(self, stream, justify=None):
"""Renders the string as self._color on the console.
Args:
stream: The stream to render the string to. The stream given here *must*
have the same encoding as sys.stdout for this to work properly.
justify: The justification function, self._justify if None.
"""
stream.write(
self._con.Colorize(self._string, self._color, justify or self._justify))
def GetConsoleAttr(encoding=None, reset=False):
"""Gets the console attribute state.
If this is the first call or reset is True or encoding is not None and does
not match the current encoding or out is not None and does not match the
current out then the state is (re)initialized. Otherwise the current state
is returned.
This call associates the out file stream with the console. All console related
output should go to the same stream.
Args:
encoding: Encoding override.
ascii -- ASCII. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
reset: Force re-initialization if True.
Returns:
The global ConsoleAttr state object.
"""
attr = ConsoleAttr._CONSOLE_ATTR_STATE # pylint: disable=protected-access
if not reset:
if not attr:
reset = True
elif encoding and encoding != attr.GetEncoding():
reset = True
if reset:
attr = ConsoleAttr(encoding=encoding)
ConsoleAttr._CONSOLE_ATTR_STATE = attr # pylint: disable=protected-access
return attr
def ResetConsoleAttr(encoding=None):
"""Resets the console attribute state to the console default.
Args:
encoding: Reset to this encoding instead of the default.
ascii -- ASCII. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
Returns:
The global ConsoleAttr state object.
"""
return GetConsoleAttr(encoding=encoding, reset=True)
def GetCharacterDisplayWidth(char):
"""Returns the monospaced terminal display width of char.
Assumptions:
- monospaced display
- ambiguous or unknown chars default to width 1
- ASCII control char width is 1 => don't use this for control chars
Args:
char: The character to determine the display width of.
Returns:
The monospaced terminal display width of char: either 0, 1, or 2.
"""
if not isinstance(char, six.text_type):
# Non-unicode chars have width 1. Don't use this function on control chars.
return 1
# Normalize to avoid special cases.
char = unicodedata.normalize('NFC', char)
if unicodedata.combining(char) != 0:
# Modifies the previous character and does not move the cursor.
return 0
elif unicodedata.category(char) == 'Cf':
# Unprintable formatting char.
return 0
elif unicodedata.east_asian_width(char) in 'FW':
# Fullwidth or Wide chars take 2 character positions.
return 2
else:
# Don't use this function on control chars.
return 1
def SafeText(data, encoding=None, escape=True):
br"""Converts the data to a text string compatible with the given encoding.
This works the same way as Decode() below except it guarantees that any
characters in the resulting text string can be re-encoded using the given
encoding (or GetConsoleAttr().GetEncoding() if None is given). This means
that the string will be safe to print to sys.stdout (for example) without
getting codec exceptions if the user's terminal doesn't support the encoding
used by the source of the text.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
encoding: The encoding name to ensure compatibility with. Defaults to
GetConsoleAttr().GetEncoding().
escape: Replace unencodable characters with a \uXXXX or \xXX equivalent if
True. Otherwise replace unencodable characters with an appropriate unknown
character, '?' for ASCII, and the unicode unknown replacement character
\uFFFE for unicode.
Returns:
A text string representation of the data, but modified to remove any
characters that would result in an encoding exception with the target
encoding. In the worst case, with escape=False, it will contain only ?
characters.
"""
if data is None:
return 'None'
encoding = encoding or GetConsoleAttr().GetEncoding()
string = encoding_util.Decode(data, encoding=encoding)
try:
# No change needed if the string encodes to the output encoding.
string.encode(encoding)
return string
except UnicodeError:
# The string does not encode to the output encoding. Encode it with error
# handling then convert it back into a text string (which will be
# guaranteed to only contain characters that can be encoded later.
return (string
.encode(encoding, 'backslashreplace' if escape else 'replace')
.decode(encoding))
def EncodeToBytes(data):
r"""Encode data to bytes.
The primary use case is for base64/mime style 7-bit ascii encoding where the
encoder input must be bytes. "safe" means that the conversion always returns
bytes and will not raise codec exceptions.
If data is text then an 8-bit ascii encoding is attempted, then the console
encoding, and finally utf-8.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
Returns:
A bytes string representation of the data.
"""
if data is None:
return b''
if isinstance(data, bytes):
# Already bytes - our work is done.
return data
# Coerce to text that will be converted to bytes.
s = six.text_type(data)
try:
# Assume the text can be directly converted to bytes (8-bit ascii).
return s.encode('iso-8859-1')
except UnicodeEncodeError:
pass
try:
# Try the output encoding.
return s.encode(GetConsoleAttr().GetEncoding())
except UnicodeEncodeError:
pass
# Punt to utf-8.
return s.encode('utf-8')
def Decode(data, encoding=None):
"""Converts the given string, bytes, or object to a text string.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
encoding: A suggesting encoding used to decode. If this encoding doesn't
work, other defaults are tried. Defaults to
GetConsoleAttr().GetEncoding().
Returns:
A text string representation of the data.
"""
encoding = encoding or GetConsoleAttr().GetEncoding()
return encoding_util.Decode(data, encoding=encoding)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
11726609336d3f6cd333ea5b5aa5ab7fa4187742 | 66d05b6b42245e4df51a80b593770f761812eb92 | /PYTHON/python_code/object-oriented/turkeys/turkeys.py | 602ea90fa89e3bf24fa78291613fcf9f05ce608d | [] | no_license | techemstudios/intro_cs | 8f32b8b40974c49c65255df8f8e3a835df218df3 | dd2ee57394ab04e86b6d78b70a038ba0f04f661f | refs/heads/master | 2021-10-19T13:04:28.354963 | 2019-02-21T05:28:10 | 2019-02-21T05:28:10 | 119,568,375 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | """
Object Oriented Programming (Thanksgiving Giving style)
We start defining classes with Animals.
We can define that parent of the class, Animals, as well as the parent of that class.
However, to save time we start at Animals. Starting here; instead of
directly at the turkey class, should help keep in mind we can define everything
under the sun using OOP is we wanted to. Again, we will save time!
"""
class Animals():
pass
class Turkey(Animals):
"""Attempt to model a turkey."""
# Create an instance based on the class Turkey.
# This instance will have three parameters: self, name, age
def __init__(self, name, age):
"""Initialize name age and age attributes."""
# Make accessible attributes or variables:
self.name = name
self.age = age
# Tell Python what a turkey can do by
# defining the methods of the class
# Gobble Method
def gobble(self):
"""Simulate a turkey gobbling in response to something."""
print(self.name.title() + " is now gobbling!")
# More methods can follow here
# Make an instance representing a specific turkey
my_turkey = Turkey('scrappy', 3)
print("My turkey's name is " + my_turkey.name.title() + ".")
print("My turkey is " +str(my_turkey.age) + " years old.")
# Calling methods
my_turkey.gobble()
# Make multiple instances
your_turkey = Turkey('lassy', 1)
| [
"joe@techemstudios.com"
] | joe@techemstudios.com |
46867e9e4e92fddc64d6f93273ad2155bae22037 | efe58c533fb211d457c598fb1fdabbaf1f284a09 | /asdf.py | 2e2d3210c9c44d407d87329799d571a09a56d8ce | [] | no_license | uniphil/beep-beep-edge | 91d59fb04649a72717ca4afa4cc50d5d6bb4c3e8 | 94c11bae2527edb037cca6a1cec912e53b7a242a | refs/heads/main | 2023-01-13T16:25:23.939980 | 2020-11-15T07:33:56 | 2020-11-15T07:33:56 | 312,756,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | #!/usr/bin/env python3
import hashlib
import json
import re
import struct
import urllib.request
def unwrap_syslog(line):
_, rest = line.split(']: ', 1)
return rest
def parse_httpd_log(line):
m = re.match(r'^\w+ (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) \-.*\- \[.*\] "GET (.*) HTTP/1\.[01]" 200 0 "(.*?)" "(.*)"\n$', line)
return m.groups()
def compress(ip, ua):
ip_bytes = bytes(map(int, ip.split('.')))
bits = hashlib.blake2b(ua.encode(), digest_size=4, key=ip_bytes)
as_uint, = struct.unpack('I', bits.digest())
bucket = as_uint & 0b111111111111 # 12
clz = 20 - (as_uint >> 12).bit_length() + 1 # never zero
return bucket, clz
def run_lines(lines):
for line in lines:
if line.strip() == '':
continue
desysed = unwrap_syslog(line)
ip, path, referrer, ua = parse_httpd_log(desysed)
bucket, zeros = compress(ip, ua)
yield json.dumps(['v1', 'ok', path, referrer, bucket, zeros])
def postit(url):
r = urllib.request.Request(url, b'', {'Content-Type': 'application/json'})
while True:
r.data = yield
with urllib.request.urlopen(r, timeout=2) as resp:
if resp.status != 202:
raise Exception('ohno', resp.status)
if __name__ == '__main__':
import fileinput
import os
post_office = postit(os.environ['DESTINATION'])
next(post_office) # unfortunate init
for compressed in run_lines(fileinput.input()):
post_office.send(compressed.encode())
| [
"uniphil@gmail.com"
] | uniphil@gmail.com |
752f484dc8427e86fe36a0e5d5f6301b62be3e66 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py | f779b3298b8dcadbbf7d846adafe260e6bb99497 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 2,235 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
import paddle.distributed.fleet as fleet
class TestDistTraning(unittest.TestCase):
def setUp(self):
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1
}
fleet.init(is_collective=True, strategy=strategy)
def test_cuda_rng_tracker(self):
seed_1 = 2021
seed_2 = 1024
size = [20, 15]
paddle.seed(seed_1)
target_11 = paddle.randn(size, "float32")
target_12 = paddle.randn(size, "float32")
paddle.seed(seed_2)
target_21 = paddle.randn(size, "float32")
target_22 = paddle.randn(size, "float32")
paddle.seed(seed_1)
fleet.meta_parallel.get_rng_state_tracker().add("test", seed_2)
result_11 = paddle.randn(size, "float32")
with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
result_21 = paddle.randn(size, "float32")
result_12 = paddle.randn(size, "float32")
with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
result_22 = paddle.randn(size, "float32")
np.testing.assert_allclose(result_11.numpy(), target_11.numpy())
np.testing.assert_allclose(result_12.numpy(), target_12.numpy())
np.testing.assert_allclose(result_21.numpy(), target_21.numpy())
np.testing.assert_allclose(result_22.numpy(), target_22.numpy())
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Qengineering.noreply@github.com |
8835836e75417a9af60bebd2cad24160cdc3265b | df2d967d02f004e46d44bfcd3cc8cdbf1ae54c9d | /tests/test_core/test_factory.py | f6fcce00563512aeb5481e7805c0669307edf8ad | [
"MIT",
"CC-BY-4.0"
] | permissive | skasberger/owat_api | f3df29cb1466753390f72c2b4603a7c48d1a4e8f | d40860eeef0de151d51200161baaf10c55810fb1 | refs/heads/master | 2023-01-24T11:01:05.223602 | 2020-11-14T11:26:57 | 2020-11-14T11:26:57 | 310,701,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,387 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test factory."""
from fastapi import FastAPI
import os
import pytest
from app.config import get_config
from app.database import get_engine, get_SessionLocal
from app.main import create_app
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
def test_config_development():
for config_name in ["default", "development"]:
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat_dev"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat_dev"
)
assert config.DEBUG == True
assert app.__dict__["extra"]["DEBUG"] == True
assert app._debug == True
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_testing():
config_name = "testing"
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat_test"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat_test"
)
assert config.DEBUG == False
assert app.debug == False
assert app.__dict__["extra"]["DEBUG"] == False
assert config.SECRET_KEY == "secret-env-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "secret-env-key"
assert config.ADMIN_EMAIL == "testing_admin@offenewahlen.at"
assert app.__dict__["extra"]["ADMIN_EMAIL"] == "testing_admin@offenewahlen.at"
assert config.APP_EMAIL == "testing_app@offenewahlen.at"
assert app.__dict__["extra"]["APP_EMAIL"] == "testing_app@offenewahlen.at"
assert config.MIN_CONNECTIONS_COUNT == 10
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] == 10
assert config.MAX_CONNECTIONS_COUNT == 10
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] == 10
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_travis():
config_name = "travis"
app = create_app(config_name)
config = get_config(config_name)
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
assert (
config.SQLALCHEMY_DATABASE_URI
== "postgresql+psycopg2://postgres@localhost:5432/travis_ci_test"
)
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql+psycopg2://postgres@localhost:5432/travis_ci_test"
)
assert config.DEBUG == False
assert app.__dict__["extra"]["DEBUG"] == False
assert app._debug == False
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_production():
config_name = "production"
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat"
)
assert config.DEBUG == False
assert app.debug == False
assert app.__dict__["extra"]["DEBUG"] == False
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
| [
"mail@stefankasberger.at"
] | mail@stefankasberger.at |
0156df3dfcfc720906440e213664f5d8e437dfc7 | 81bad22641705683c68ff89f19362ba202891652 | /napari/utils/progress.py | 2f4dfaa2388ece8d1656f3c87ede44c8642dce93 | [
"BSD-3-Clause"
] | permissive | sofroniewn/napari | ee2a39a1a1132910db6f2a47994671e8138edb51 | beaa98efe5cf04ba659086e7a514b2ade05277af | refs/heads/main | 2023-07-12T02:46:41.185932 | 2022-09-14T21:57:15 | 2022-09-14T21:57:15 | 154,751,137 | 2 | 3 | BSD-3-Clause | 2023-07-01T10:26:45 | 2018-10-25T23:43:01 | Python | UTF-8 | Python | false | false | 4,659 | py | from typing import Iterable, Optional
from tqdm import tqdm
from napari.utils.events.event import EmitterGroup, Event
from ..utils.events.containers import EventedSet
from ..utils.translations import trans
class progress(tqdm):
"""This class inherits from tqdm and provides an interface for
progress bars in the napari viewer. Progress bars can be created
directly by wrapping an iterable or by providing a total number
of expected updates.
While this interface is primarily designed to be displayed in
the viewer, it can also be used without a viewer open, in which
case it behaves identically to tqdm and produces the progress
bar in the terminal.
See tqdm.tqdm API for valid args and kwargs:
https://tqdm.github.io/docs/tqdm/
Examples
--------
>>> def long_running(steps=10, delay=0.1):
... for i in progress(range(steps)):
... sleep(delay)
it can also be used as a context manager:
>>> def long_running(steps=10, repeats=4, delay=0.1):
... with progress(range(steps)) as pbr:
... for i in pbr:
... sleep(delay)
or equivalently, using the `progrange` shorthand
.. code-block:: python
with progrange(steps) as pbr:
for i in pbr:
sleep(delay)
For manual updates:
>>> def manual_updates(total):
... pbr = progress(total=total)
... sleep(10)
... pbr.set_description("Step 1 Complete")
... pbr.update(1)
... # must call pbr.close() when using outside for loop
... # or context manager
... pbr.close()
"""
monitor_interval = 0 # set to 0 to disable the thread
# to give us a way to hook into the creation and update of progress objects
# without progress knowing anything about a Viewer, we track all instances in
# this evented *class* attribute, accessed through `progress._all_instances`
# this allows the ActivityDialog to find out about new progress objects and
# hook up GUI progress bars to its update events
_all_instances: EventedSet['progress'] = EventedSet()
def __init__(
self,
iterable: Optional[Iterable] = None,
desc: Optional[str] = None,
total: Optional[int] = None,
nest_under: Optional['progress'] = None,
*args,
**kwargs,
) -> None:
self.events = EmitterGroup(
value=Event,
description=Event,
overflow=Event,
eta=Event,
total=Event,
)
self.nest_under = nest_under
self.is_init = True
super().__init__(iterable, desc, total, *args, **kwargs)
if not self.desc:
self.set_description(trans._("progress"))
progress._all_instances.add(self)
self.is_init = False
def __repr__(self) -> str:
return self.desc
@property
def total(self):
return self._total
@total.setter
def total(self, total):
self._total = total
self.events.total(value=self.total)
def display(self, msg: str = None, pos: int = None) -> None:
"""Update the display and emit eta event."""
# just plain tqdm if we don't have gui
if not self.gui and not self.is_init:
super().display(msg, pos)
return
# TODO: This could break if user is formatting their own terminal tqdm
etas = str(self).split('|')[-1] if self.total != 0 else ""
self.events.eta(value=etas)
def update(self, n=1):
"""Update progress value by n and emit value event"""
super().update(n)
self.events.value(value=self.n)
def increment_with_overflow(self):
"""Update if not exceeding total, else set indeterminate range."""
if self.n == self.total:
self.total = 0
self.events.overflow()
else:
self.update(1)
def set_description(self, desc):
"""Update progress description and emit description event."""
super().set_description(desc, refresh=True)
self.events.description(value=desc)
def close(self):
"""Close progress object and emit event."""
if self.disable:
return
progress._all_instances.remove(self)
super().close()
def progrange(*args, **kwargs):
"""Shorthand for ``progress(range(*args), **kwargs)``.
Adds tqdm based progress bar to napari viewer, if it
exists, and returns the wrapped range object.
Returns
-------
progress
wrapped range object
"""
return progress(range(*args), **kwargs)
| [
"noreply@github.com"
] | sofroniewn.noreply@github.com |
637ec3627eafc1ba47fb3086852feafdb3e907d7 | 4d9a9546a5dc0b550aede272c4ba85af88dbb673 | /env/lib/python3.8/site-packages/retro/contrib/localfiles.py | caded7f9b85b400d368e60a8b7cd1a406b9e0d1f | [] | no_license | LuisGonzalezLopez/Luis-Gonzalez | f4064dc08ccbada80cc7b45f8fbaaf70f54f420c | 109f50e2e26a1c4abed5ba502deda9e212955c69 | refs/heads/master | 2022-08-04T14:24:42.992548 | 2022-07-23T09:03:08 | 2022-07-23T09:03:08 | 103,600,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,225 | py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Project : Retro - HTTP Toolkit
# -----------------------------------------------------------------------------
# Author : Sebastien Pierre <sebastien@ffctn.com>
# License : Revised BSD License
# -----------------------------------------------------------------------------
# Creation : 2006-04-12
# Last mod : 2017-07-13
# -----------------------------------------------------------------------------
# SEE:http://www.mnot.net/cache_docs/
__doc__ = """
The 'localfiles' module defines `LocalFiles` and `Library` component that can
be added to any application to serve local files and assets"""
import os, sys, stat, mimetypes, subprocess, base64
from retro import *
from retro.wsgi import SERVER_ERROR_CSS
from retro.contrib.cache import SignatureCache
FAVICON = base64.b64decode("""\
AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAQAAAAAAAAAAAAAAAAA
AAAAAAAAAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/
AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8A
AAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/z8/P/9/f3//b29v/w8PD/8AAAD/AAAA/w8P
D/9vb2//f39//z8/P/8AAAD/AAAA/wAAAP8AAAD/AAAA/x8fH///////j4+P/8/Pz/+vr6//AAAA
/wAAAP+vr6//z8/P/4+Pj///////Hx8f/wAAAP8AAAD/AAAA/wAAAP8/Pz//7+/v/wAAAP9PT0//
7+/v/wAAAP8AAAD/v7+//09PT/8AAAD/7+/v/z8/P/8AAAD/AAAA/wAAAP8AAAD/Hx8f/+/v7/+/
v7//n5+f//////9/f3//f39//9/f3/+fn5//v7+//+/v7/8fHx//AAAA/wAAAP8AAAD/AAAA/wAA
AP8fHx//b29v/7+/v///////f39//39/f///////v7+//39/f/8fHx//AAAA/wAAAP8AAAD/AAAA
/wAAAP8AAAD/AAAA/wAAAP9/f3///////wAAAP8AAAD//////39/f/8AAAD/AAAA/wAAAP8AAAD/
AAAA/wAAAP8AAAD/AAAA/x8fH/8/Pz//n5+f//////8/Pz//Pz8///////+fn5//Pz8//x8fH/8A
AAD/AAAA/wAAAP8AAAD/AAAA/wAAAP9/f3//////////////////////////////////////////
//9/f3//AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8/Pz///////wAAAP8AAAD/v7+/
/z8/P/8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/Pz8///////8PDw//
AAAA/7+/v/9/f3//AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/w8PD//v
7+//39/f/29vb/9/f3///////6+vr/+fn5//AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAA
AP8AAAD/Hx8f/39/f/9fX1//AAAA/19fX/9/f3//b29v/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA
/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/
AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8A
AAD/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAA==""")
try:
import jsmin
except:
jsmin = None
try:
import cssmin
except:
cssmin = None
try:
import clevercss
except:
clevercss = None
try:
import paml
except:
paml = None
try:
import pythoniccss
except:
pythoniccss = None
LIST_DIR_CSS = SERVER_ERROR_CSS + """
.retro-directory-listing {
list-style-type: none;
}
.retro-directory-listing li:hover{
background: #FFFFE0;
}
.retro-directory-listing li {
padding: 0.5em;
padding-top: 0.25em;
padding-bottom: 0.25em;
position: relative;
display: flex;
width: 100%;
}
.retro-directory-listing li .bullet {
color: #AAAAAA;
display: inline;
position: absolute;
left: 0.5em;
}
.retro-directory-listing li .name {
position: relative;
padding-left: 2.5em;
display: block;
padding-top: 0.10em;
padding-bottom: 0.10em;
flex-grow: 1;
}
.retro-directory-listing li .gz {
opacity: 0.25;
}
.retro-directory-listing li .size {
opacity: 0.5;
}
.retro-directory-listing .directory {
background: #EEEEEE;
}
.retro-directory-listing .hidden, .retro-directory-listing .hidden a {
color: #FFAAAA;
font-style: italic;
}
.retro-directory-listing .parent {
color: #AAAAAA;
padding-top: 0.5em;
padding-bottom: 0.5em;
}
"""
LIST_DIR_HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width,initial-scale=1.0,maximum-scale=1.0,user-scalable=no" />
<title>Content of %s</title>
<style><!--
%s
--></style>
</head>
<body>
<h1>Content of <span class="dirname">%s</span></h1>
<ul class="retro-directory-listing">
%s
</ul>
</body>
</html>
"""
# ------------------------------------------------------------------------------
#
# LOCAL FILE COMPONENT
#
# ------------------------------------------------------------------------------
class LocalFiles(Component):
"""The 'LocalFiles' component serves local files from the file system,
providing a directory-listing interface. This component is designed
to be used in development environments, where you need direct access
to local files and live file translation (using `processors`)."""
LIST_DIR = True
USE_LAST_MODIFIED = True
def __init__( self, root="", name=None, processors={}, resolver=None, optsuffix=(), lastModified=None, writable=False, prefix=None):
"""Creates a new LocalFiles, with the optional root, name and
processors. Processors are functions that modify the content
of the file and returned the processed data."""
Component.__init__(self, name="LocalFiles", prefix=prefix)
self._lastModified = self.USE_LAST_MODIFIED if lastModified is None else lastModified
self._localRoot = None
self._processors = {}
self._resolver = resolver
self._optSuffixes = optsuffix
self.isWritable = writable
self.setRoot(root or ".")
for key, value in list(processors.items()):
self._processors[key] = value
def start( self, root=None ):
if not (root is None) :
root = os.path.abspath(root)
self.setRoot(root or ".")
elif self._localRoot is None:
root = self.app.config("root")
def setRoot( self, root ):
"""Sets the root used by this component. This is where the
local files will be resolved."""
assert os.path.exists(root), "Given root doest not exist: %s" % (root)
self._localRoot = root
def getRoot( self, root ):
"""Returns the root for this component"""
return self._localRoot
def resolvePath( self, request, path ):
"""Forwards the call to the resolver if present or defaults to
`_resolvePath`."""
return self._resolver(self, request, path) if self._resolver else self._resolvePath(path)
def _resolvePath( self, path ):
"""Resolves the given path and returns an absolute file system
location for the given path (which is supposed to be relative)."""
real_path = self.app.localPath(os.path.join(self._localRoot, path))
if not os.path.exists(real_path):
for s in self._optSuffixes:
if os.path.exists(real_path + s):
return real_path + s
return real_path
def getContentType( self, path ):
"""A function that returns the mime type from the given file
path."""
if path.endswith(".json"):
return "application/json"
elif path.endswith(".mf"):
return "text/cache-manifest"
else:
return mimetypes.guess_type(path)[0] or "text/plain"
def getContent( self, path ):
"""Gets the content for this file."""
with open(path, "rt") as f:
return f.read()
def processorFor( self, path ):
"""Returns the processors for the given path."""
if isinstance(path, list) or isinstance(path, tuple): path = path[0]
matches = sorted([_ for _ in self._processors if path.endswith(_)])
return self._processors[matches[-1]] if matches else None
@on(GET_POST_HEAD="/favicon.ico",priority=10)
def favicon( self, request ):
for p in ["favicon.ico", "lib/images/favicon.ico"]:
rp = self.resolvePath(request, p)
if os.path.exists(rp):
return self.local(request, p)
return request.respond(FAVICON, "image/x-icon")
@on(GET_POST_HEAD="/")
def catchall( self, request ):
"""A catchall that will display the content of the current
directory."""
return self.local(request, ".")
@on(GET_POST_HEAD="/{path:any}")
def local( self, request, path ):
"""Serves the files located in the `Library` grand parent directory. This will
look for a .gz version if the file is not already there.
If `path` is a list or tuple, it will aggregate all the responses
together and use the first content type.
"""
resolved_path = self.resolvePath(request, path)
multi_paths = None
processor = self.processorFor(resolved_path)
if isinstance(resolved_path, list) or isinstance(resolved_path, tuple):
multi_paths = resolved_path
resolved_path = resolved_path[0]
elif not os.path.exists(resolved_path):
# If the file is not found we're still going to look for a .gz
if path.endswith(".gz"):
return request.respond("File not found: %s" % (resolved_path), status=404)
else:
res = self.local(request, path + ".gz")
if res.status >= 200 and res.status < 300:
res.setHeader("Content-Type", request.guessContentType(path)).setHeader("Content-Encoding", "gzip")
return res
elif os.path.isdir(resolved_path):
if self.LIST_DIR:
if request.param("format") == "json":
return request.returns(self.directoryAsList(path, resolved_path))
else:
return request.respond(self.directoryAsHtml(path, resolved_path))
else:
return request.respond("Component does not allows directory listing" % (resolved_path), status=403)
if processor and not request.has("raw"):
return self._respondWithProcessor( request, processor, resolved_path, multi_paths )
elif request.has("raw"):
return request.respondFile(resolved_path, contentType="text/plain", lastModified=self._lastModified)
else:
return request.respondFile(resolved_path, lastModified=self._lastModified)
def _respondWithProcessor( self, request, processor, resolvedPath=None, multiPaths=None):
if not multiPaths:
try:
content, content_type = processor(self.getContent(resolvedPath), resolvedPath, request)
return request.respond(content=content, contentType=content_type)
except Exception as e:
return request.fail(status=500, content=str(e))
else:
try:
content, content_type = processor(None, multiPaths, request)
return request.respond(content=content, contentType=content_type)
except Exception as e:
return request.fail(status=500, content=str(e))
@on(PUT_PATCH="/{path:any}")
def write( self, request, path ):
if self.isWritable:
# NOTE: We don't use self.resolvePath, as we want to bypass resolvers
local_path = self._resolvePath(path)
dirname = os.path.dirname(local_path)
if not os.path.exists(dirname): os.makedirs(dirname)
request.load()
data = request.data()
self.app.save(local_path, ensureBytes(data))
return request.returns(True)
else:
return self.local(request, path)
@on(DELETE="/{path:any}")
def delete( self, request, path ):
if self.isWritable:
# NOTE: We don't use self.resolvePath, as we want to bypass resolvers
local_path = self._resolvePath(path)
if os.path.exists(local_path):
os.unlink(local_path)
return request.returns(True)
else:
return request.returns(False)
else:
return self.local(request, path)
def directoryAsHtml( self, path, localPath ):
"""Returns a directory as HTML"""
dirs = []
files = []
dot_files = []
parent = os.path.dirname(path)
if path and path not in ("/", "."):
dirs.append("<li class='previous dir'><span class='bullet'>…</span><a class='parent' href='%s/%s'>(parent)</a></li>" % (self.PREFIX, parent))
local_files = os.listdir(localPath)
local_files.sort()
for file_name in local_files:
file_path = localPath + "/" + file_name
ext = os.path.splitext(file_path)[1].replace(".", "_")
if file_name.startswith("."): ext +=" hidden"
file_url = self.PREFIX + ("/" + path + "/" +file_name).replace("//","/")
if os.path.isdir(file_path):
dirs.append(
"<li class='directory %s'>"
"<span class='bullet'>ƒ</span>"
"<a class='name' href='%s'>%s</a>"
"</li>" % (
ext, file_url, file_name
))
else:
try:
size = os.stat(file_path)[stat.ST_SIZE]
except Exception as e:
size = 0
unit = None
if size < 1000:
unit = "b"
size = size
elif size < 1000000:
unit = "kb"
size = size / 1000.0
else:
unit = "mb"
size = size / 1000000.0
if size == int(size):
size = "{0:d}{1}".format(int(size),unit)
else:
size = "{0:0.2f}{1}".format(size,unit)
group = dot_files if file_name.startswith(".") else files
if file_name.endswith(".gz"):
group.append(
"<li class='file compressed %s'>"
"<span class='bullet'>—</span>"
"<a class='name' href='%s'>%s<span class=gz>.gz</span></a>"
"<span class='size'>%s</span>"
"</li>" % (
ext,
file_url[:-3], file_name[:-3],
size,
))
else:
group.append(
"<li class='file %s'>"
"<span class='bullet'>—</span>"
"<a class='name' href='%s'>%s</a>"
"<span class='size'>%s</span>"
"</li>" % (
ext, file_url, file_name, size,
))
return LIST_DIR_HTML % (path, LIST_DIR_CSS, path, "".join(dirs) + "".join(files + dot_files))
def directoryAsList( self, path, localPath ):
"""Returns a directory as JSON"""
dirs = []
files = []
parent = os.path.dirname(path)
local_files = list(os.path.join(parent, p) for p in os.listdir(localPath))
local_files.sort()
return [self._describePath(_) for _ in local_files]
def _describePath( self, path ):
s = os.stat(path)
return {
"name" : os.path.basename(path),
"path" : path,
"isDirectory" : os.path.isdir(path),
"isFile" : os.path.isfile(path),
"isLink" : os.path.islink(path),
"mtime" : s[stat.ST_MTIME],
"atime" : s[stat.ST_ATIME],
"mode" : s[stat.ST_MODE],
"size" : s[stat.ST_SIZE],
"uid" : s[stat.ST_UID],
"gid" : s[stat.ST_GID],
}
# ------------------------------------------------------------------------------
#
# LIBRARY SERVER COMPONENT
#
# ------------------------------------------------------------------------------
class LibraryServer(Component):
"""Servers files from a library directory and expose them as 'lib/'. The
library server supports the following file types:
- CSS ('lib/css')
- CleverCSS ('lib/ccss')
- PythonicCSS('lib/pcss')
- JavaScript ('lib/js')
- Sugar ('lib/sjs')
- Images ('lib/images', of type 'png', 'gif', 'jpg', 'ico' and 'svg')
- PDF ('lib/pdf')
- Fonts ('lib/fonts')
- XSL ('lib/xsl')
The implementation is not that flexible, but it's a very good start
for most web applications. You can specialize this class later if you
want to change the behaviour."""
CONTENT_TYPES = dict(
svg = "image/svg+xml",
ico = "image/vnd.microsoft.icon",
png = "image/png",
gif = "image/gif",
jpg = "image/jpeg",
jpeg = "image/jpeg",
)
def __init__( self, library="", name="LibraryServer", cache=None,
commands=dict(), minify=False, compress=False, cacheAggregates=True,
cacheDuration=24*60*60, prefix=None ):
Component.__init__(self, name=name, prefix=prefix)
self.library = library
self.cache = cache
self.minify = minify
self.compress = compress
self.commands = dict(sugar="sugar")
self.commands.update(commands)
self.cacheAggregates = cacheAggregates
self.cacheDuration = cacheDuration
def start( self ):
self.library = self.library or self.app.config("library.path")
def setCache( self, cache ):
self.cache = cache
return self
def _inCache( self, path ):
if self.cache:
if isinstance(self.cache, SignatureCache):
return self.cache.has(path, SignatureCache.mtime(path))
else:
return self.cache.has(path)
else:
return False
def _fromCache( self, path ):
assert self.cache
if isinstance(self.cache, SignatureCache):
return self.cache.get(path, SignatureCache.mtime(path))[1]
else:
return self.cache.get(path)
def _toCache( self, path, data ):
if self.cache:
if isinstance(self.cache, SignatureCache):
return self.cache.set(path, SignatureCache.mtime(path), data)
else:
return self.cache.set(path, data)
return data
@on(GET_HEAD="lib/fonts/{path:rest}")
def getFonts( self, request, path ):
if path.endswith(".css"):
return self._getFromLibrary(request, "fonts", path, "text/css; charset=utf-8")
else:
return request.respondFile(os.path.join(self.library, "fonts", path)).cache(seconds=self.cacheDuration)
@on(GET_HEAD="lib/images/{image:([\w\-_]+/)*[\w\-_]+(\.png|\.gif|\.jpg|\.ico|\.svg)*}")
def getImage( self, request, image ):
content_type = self.CONTENT_TYPES.get(image.rsplit(".",1)[-1])
# NOTE: I had to add the content type as not adding it blocks the system in production in some circumstances...
return request.respondFile(self._guessPath("images", image, extensions=(".png", ".gif", ".jpg", ".ico", ".svg")), content_type).cache(seconds=self.cacheDuration)
@on(GET_HEAD="lib/pdf/{script:[^/]+\.pdf}")
def getPDF( self, request, script ):
return request.respondFile(os.path.join(self.library, "pdf", script)).cache(seconds=self.cacheDuration)
@on(GET_HEAD="lib/{script:[^/]+\.mf}")
def getManifest( self, request, script ):
return request.respondFile(os.path.join(self.library,script), "text/cache-manifest").cache(seconds=self.cacheDuration)
@on(GET_HEAD="lib/css/{paths:rest}")
def getCSS( self, request, paths ):
return self._getFromLibrary(request, "css", paths, "text/css; charset=utf-8")
@on(GET_HEAD="lib/ccss/{paths:rest}")
def getCCSS( self, request, paths ):
return self._getFromLibrary(request, "ccss", paths, "text/css; charset=utf-8")
@on(GET_HEAD="lib/pcss/{paths:rest}")
def getPCSS( self, request, paths ):
return self._getFromLibrary(request, "pcss", paths, "text/css; charset=utf-8")
@on(GET_HEAD="lib/xsl/{paths:rest}")
def getXSL( self, request, paths ):
return self._getFromLibrary(request, "xsl", paths, "text/xsl; charset=utf-8")
@on(GET_HEAD="lib/{prefix:(js|sjs)}/{paths:rest}")
def getJavaScript( self, request, prefix, paths ):
return self._getFromLibrary(request, prefix, paths, "text/javascript; charset=utf-8")
def _getFromLibrary( self, request, prefix, paths, contentType ):
"""Gets the `+` separated list of files given in `paths`, relative to
this library's `root` and `prefix`, returning a concatenated result of
the given contentType."""
cache_path = prefix + paths
if not self.cacheAggregates or not self._inCache(cache_path) or cache_path.find("+") == -1:
result = []
for path in paths.split("+"):
root = self.library
path = os.path.join(root, prefix, path)
if not self._inCache(path):
if not os.path.exists(path):
return request.notFound()
data = self._processPath(path)
if data is None:
raise Exception("Processing path {0} returned None".format(path))
self._toCache(path, data)
else:
data = self._fromCache(path)
# FIXME: Maybe we should do UTF8?
result.append(ensureBytes(data))
response_data = b"\n".join(result)
self._toCache(cache_path, response_data)
else:
response_data = self._fromCache(cache_path)
return request.respond(response_data, contentType=contentType).compress(self.compress).cache(seconds=self.cacheDuration)
def _processPath( self, path ):
"""Processes the file at the given path using on of the dedicated
file processor."""
if path.endswith(".sjs"): return self._processSJS(path)
elif path.endswith(".js"): return self._processJS(path)
elif path.endswith(".ccss"): return self._processCCSS(path)
elif path.endswith(".pcss"): return self._processPCSS(path)
elif path.endswith(".css"): return self._processCSS(path)
elif path.endswith(".paml"): return self._processPAML(path)
elif path.endswith(".xsl"): return self._processXSL(path)
else: raise Exception("Format not supported: " + path)
def _processCSS( self, path ):
"""Processes a CSS file, minifyiing it if `cssmin` is installed."""
data = self.app.load(path)
if self.minify and cssmin: data = cssmin.cssmin(data)
return data
def _processCCSS( self, path ):
"""Processes a CCSS file, minifying it if `cssmin` is installed.
Requires `clevercss`"""
data = self.app.load(path)
data = clevercss.convert(data)
if self.minify and cssmin: data = cssmin.cssmin(data)
return data
def _processPCSS( self, path ):
"""Processes a PCSS file, minifying it if `cssmin` is installed.
Requires `clevercss`"""
data = None
tries = 0
# TODO: This does not work yet, but it is the best for an application
# Right now, we default to piping
# data = self.app.load(path)
# data = pythoniccss.convert(data)
while (not data) and tries < 3:
command = "%s %s" % (self.commands.get("pythoniccss", "pythoniccss"), path)
cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
data = cmd.stdout.read()
tries += 1
cmd.wait()
if data:
if self.minify and cssmin: data = cssmin.cssmin(data)
return data
def _processSJS( self, path ):
"""Processes a Sugar JS file, minifying it if `jsmin` is installed.
Requires `sugar`"""
data = ""
tries = 0
# NOTE: For some reason, sugar sometimes fails, so we add a
# number of retries so that we increase the "chances" of the
# file to be properly loaded
while (not data) and tries < 3:
command = "%s -cljs %s %s" % (self.commands["sugar"], "-L%s" % (self.library + "/sjs"), path)
cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
data = cmd.stdout.read()
tries += 1
cmd.wait()
if data:
if self.minify and jsmin: data = jsmin.jsmin(data)
return data
def _processPAML( self, path ):
"""Processes a JS file, minifying it if `jsmin` is installed."""
data = self.app.load(path)
return paml.process(data)
def _processXSL( self, path ):
"""Processes a JS file, minifying it if `jsmin` is installed."""
return self.app.load(path)
def _processJS( self, path ):
"""Processes a JS file, minifying it if `jsmin` is installed."""
data = self.app.load(path)
if self.minify and jsmin: data = jsmin.jsmin(data)
return data
def _guessPath( self, parent, filename, extensions ):
"""Tries to locate the file with the given `filename` in the `parent` directory of this
library, appending the given `extensions` if the file is not found."""
path = os.path.join(self.library, parent, filename)
if os.path.exists(path):
return path
for ext in extensions:
p = path + ext
if os.path.exists(p):
return p
return None
# EOF - vim: tw=80 ts=4 sw=4 noet
| [
"luxo237@gmail.com"
] | luxo237@gmail.com |
67d8b4ed1dfaee1604a007b001cd7abea8c31b5c | 1d61bf0b287533c9eb89bf71e217ead8cffb7811 | /echo.py | 050b6333a8aa630e713d3421b7fb828098943c04 | [] | no_license | chukotka12/PP4E-GitHub | 2f6bf5e431a211beb9e1b6aa56b495770f07e6e4 | c9347ffa20f598b8c469082788a964549cd5df2b | refs/heads/master | 2020-05-30T04:43:30.570872 | 2019-09-10T14:33:14 | 2019-09-10T14:33:14 | 189,545,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | import sys
print(sys.version)
input('Нажмите Enter')
| [
"chukreev.g@gmail.com"
] | chukreev.g@gmail.com |
fa9e74de9960aafc87cf37002938ddea36129615 | f7b3c8f6c10a742c742ca21184f797f0ad737408 | /svn2git.py | b172a8a5d083d46fd3735a02aa1a7d7abca7f490 | [] | no_license | mcdonc/svn2git | 88488a415c8e0a514a0cc42d7dd4bf5d9ea9504b | f37b624e3b25746ae61fab07045cdb728f84828c | refs/heads/master | 2020-04-20T15:52:18.217285 | 2011-08-18T18:21:10 | 2011-08-18T18:21:10 | 1,415,728 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | # usage:
#
# python2.7 svn2git.py \
# http://svn.repoze.org/repoze.catalog \
# git@github.com:repoze/repoze.catalog.git \
# branch1 \
# branch2
#
# (branch specs are optional, otherwise only trunk and tags are imported)
#
# users.txt should be in /tmp/users.txt
#
# requires python 2.7
import tempfile
import shutil
import os
import subprocess
import re
import sys
tag_re = re.compile(r'tags/(\d.*)')
def do(svn, git, *branches):
cmd = "git svn clone --stdlayout --no-metadata -A /tmp/users.txt %s tmp"
cmd = cmd % svn
wd = tempfile.mkdtemp()
try:
os.chdir(wd)
result = os.system(cmd)
if result:
raise ValueError(result)
os.chdir('tmp')
r = subprocess.check_output(['git', 'branch', '-r'])
tag_branches = [ x.strip() for x in filter(None, r.split('\n'))]
for tag_branch in tag_branches:
matched = tag_re.match(tag_branch)
if matched:
if not '@' in tag_branch:
tag = matched.group(1)
print 'making tag %s' % tag
os.system('git checkout -b tag_x remotes/%s' % tag_branch)
os.system('git checkout master')
os.system('git tag %s tag_x' % tag)
os.system('git branch -D tag_x')
for branch in branches:
print 'creating branch %s' % branch
os.system('git checkout -b %s remotes/%s' % (branch, branch))
os.system('git checkout master')
os.chdir('..')
os.system('git clone tmp dest')
os.chdir('dest')
os.system('git remote add xx %s' % git)
os.system('git push xx master')
for branch in branches:
print 'pushing branch %s' % branch
os.system('git checkout -b %s remotes/origin/%s' % (branch, branch))
os.system('git push xx %s' % branch)
os.system('git push xx --tags')
finally:
shutil.rmtree(wd)
if __name__ == '__main__':
do(sys.argv[1], sys.argv[2], *sys.argv[3:])
| [
"chrism@plope.com"
] | chrism@plope.com |
cdd207946758af304736b74d5fb083e7c096090c | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/combination-sum-iii/src/Solution.py | 0a4d6205ea8f236eb6dd457b917b0509fb19bddc | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | class Solution:
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
def dfs(target, k, start, paths, res):
if k == 0:
if target == 0:
res.append(paths)
return
for i in range(start, 10):
if target - i >= 0 and k - 1 >= 0:
dfs(target - i, k - 1, i + 1, paths + [i], res)
res = []
dfs(n, k, 1, [], res)
return res
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
5ee023b391e258a9e5fa78d3a05981119497a72a | 33e4aaa1a31363336db230eba9a4a65cebc49491 | /train-d1.py | 47f04edee3bcf07f7ad16143c5b6d0a23967c4b5 | [
"BSD-3-Clause"
] | permissive | chonlei/neural-ode-ion-channels | 06ecb6d72b90704a8d6171573d7b067a28ec8595 | 10235c162a5e569ab0b3e48c933bf46359319afb | refs/heads/main | 2023-04-12T01:27:50.049286 | 2021-08-04T06:21:44 | 2021-08-04T06:21:44 | 360,855,175 | 4 | 2 | BSD-3-Clause | 2021-07-07T10:09:43 | 2021-04-23T11:02:55 | Python | UTF-8 | Python | false | false | 36,892 | py | import sys
import os
import argparse
import time
import numpy as np
from scipy.interpolate import interp1d
from scipy.interpolate import UnivariateSpline
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from smoothing import smooth
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
parser = argparse.ArgumentParser('IKr discrepancy fit with NN-f.')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--pred', action='store_true')
parser.add_argument('--cached', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
# Set random seed
np.random.seed(0)
torch.manual_seed(0)
noise_sigma = 0.1
use_pt3 = False
prediction_protocol = np.loadtxt('test-protocols/ap2hz.csv', skiprows=1, delimiter=',')
prediction_protocol[:, 0] *= 1e3 # s -> ms
#
t1 = torch.linspace(0., 8000, 80001).to(device)
t2 = torch.linspace(0., 10000, 100001).to(device)
if use_pt3:
t3 = torch.linspace(0., 5000, 50001).to(device)
mask3s = [np.ones(len(t3), dtype=bool) for i in range(6)]
mask1 = np.ones(len(t1), dtype=bool)
# Filter out +/- 5 time points for each step (numerical issue for estimating derivatives)
mask1[list(range(9995, 10050))
+ list(range(59995, 60050))
+ list(range(69995, 70050))
+ list(range(74995, 75050))] \
= False
mask2 = np.ones(len(t2), dtype=bool)
mask2[list(range(9995, 10050))
+ list(range(29995, 30050))
+ list(range(89995, 90050))
+ list(range(94995, 95050))] \
= False
prediction_t = torch.linspace(0., 3000, 1501).to(device)
#
# Activation
#
protocol_batches1 = []
pt1 = np.linspace(0., 8000., 80001) # 8 seconds, 0.1 ms interval
template_v = np.zeros(pt1.shape)
template_v[:10000] = -80
# template_v[10000:60000] to be set
template_v[60000:70000] = -40
template_v[70000:75000] = -120
template_v[75000:] = -80
for v_i in [-60, -40, -20, 0, 20, 40, 60]:
v = np.copy(template_v)
v[10000:60000] = v_i
protocol_batches1.append(np.array([pt1, v]).T)
#
# Deactivation
#
protocol_batches2 = []
pt2 = np.linspace(0., 10000., 100001) # 10 seconds, 0.1 ms interval
template_v = np.zeros(pt2.shape)
template_v[:10000] = -80
template_v[10000:30000] = 50
# template_v[30000:90000] to be set
template_v[90000:95000] = -120
template_v[95000:] = -80
for v_i in [-120, -110, -100, -90, -80, -70, -60, -50, -40]:
v = np.copy(template_v)
v[30000:90000] = v_i
protocol_batches2.append(np.array([pt2, v]).T)
if use_pt3:
#
# Activation time constant at 40mV
#
protocol_batches3 = []
pt3 = np.linspace(0., 5000., 50001) # 5 seconds, 0.1 ms interval
for i, t_i in enumerate([30, 100, 300, 1000, 3000, 10000]): # 0.1ms
v = np.zeros(pt3.shape)
v[:10000] = -80
v[10000:10000+t_i] = 40
v[10000+t_i:35000+t_i] = -120
v[35000+t_i:] = -80
protocol_batches3.append(np.array([pt3, v]).T)
# NOTE data time has the same index as the protocol time
mask3s[i][list(range(9995, 10005))
+ list(range(9995+t_i, 10005+t_i))
+ list(range(34995+t_i, 35005+t_i))] \
= False
true_y0s = [torch.tensor([[1., 0.]]).to(device), # what you get after holding at +40mV
torch.tensor([[0., 1.]]).to(device)] # (roughly) what you get after holding at -80mV
gt_true_y0s = [torch.tensor([[0., 0., 1., 0., 0., 0.]]).to(device), # what you get after holding at +40mV
torch.tensor([[0., 1., 0., 0., 0., 0.]]).to(device)] # (roughly) what you get after holding at -80mV
#
#
#
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
makedirs('d1')
#
#
#
class Lambda(nn.Module):
def __init__(self):
super(Lambda, self).__init__()
# Best of 10 fits for data herg25oc1 cell B06 (seed 542811797)
self.p1 = 5.94625498751561316e-02 * 1e-3
self.p2 = 1.21417701632850410e+02 * 1e-3
self.p3 = 4.76436985414236425e+00 * 1e-3
self.p4 = 3.49383233960778904e-03 * 1e-3
self.p5 = 9.62243079990877703e+01 * 1e-3
self.p6 = 2.26404683824047979e+01 * 1e-3
self.p7 = 8.00924780462999131e+00 * 1e-3
self.p8 = 2.43749808069009823e+01 * 1e-3
self.p9 = 2.06822607368134157e+02 * 1e-3
self.p10 = 3.30791433507312362e+01 * 1e-3
self.p11 = 1.26069071928587784e+00 * 1e-3
self.p12 = 2.24844970727316245e+01 * 1e-3
def set_fixed_form_voltage_protocol(self, t, v):
# Regular time point voltage protocol time series
self._t_regular = t
self._v_regular = v
self.__v = interp1d(t, v)
def _v(self, t):
return torch.from_numpy(self.__v([t.cpu().numpy()])).to(device)
def voltage(self, t):
# Return voltage
return self._v(t).numpy()
def forward(self, t, y):
c1, c2, i, ic1, ic2, o = torch.unbind(y[0])
try:
v = self._v(t).to(device)
except ValueError:
v = torch.tensor([-80]).to(device)
a1 = self.p1 * torch.exp(self.p2 * v)
b1 = self.p3 * torch.exp(-self.p4 * v)
bh = self.p5 * torch.exp(self.p6 * v)
ah = self.p7 * torch.exp(-self.p8 * v)
a2 = self.p9 * torch.exp(self.p10 * v)
b2 = self.p11 * torch.exp(-self.p12 * v)
dc1dt = a1 * c2 + ah * ic1 + b2 * o - (b1 + bh + a2) * c1
dc2dt = b1 * c1 + ah * ic2 - (a1 + bh) * c2
didt = a2 * ic1 + bh * o - (b2 + ah) * i
dic1dt = a1 * ic2 + bh * c1 + b2 * i - (b1 + ah + a2) * ic1
dic2dt = b1 * ic1 + bh * c2 - (ah + a1) * ic2
dodt = a2 * c1 + ah * i - (b2 + bh) * o
return torch.stack([dc1dt[0], dc2dt[0], didt[0], dic1dt[0], dic2dt[0], dodt[0]])
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 200),
nn.LeakyReLU(),
nn.Linear(200, 200),
nn.LeakyReLU(),
nn.Linear(200, 200),
nn.LeakyReLU(),
nn.Linear(200, 200),
nn.LeakyReLU(),
nn.Linear(200, 200),
nn.LeakyReLU(),
nn.Linear(200, 200),
nn.LeakyReLU(),
nn.Linear(200, 1),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
self.vrange = torch.tensor([100.]).to(device)
self.netscale = torch.tensor([1000.]).to(device)
self.p5 = 9.62243079990877703e+01 * 1e-3
self.p6 = 2.26404683824047979e+01 * 1e-3
self.p7 = 8.00924780462999131e+00 * 1e-3
self.p8 = 2.43749808069009823e+01 * 1e-3
self.unity = torch.tensor([1]).to(device)
def set_fixed_form_voltage_protocol(self, t, v):
# Regular time point voltage protocol time series
self._t_regular = t
self._v_regular = v
self.__v = interp1d(t, v)
def _v(self, t):
return torch.from_numpy(self.__v([t.cpu().detach().numpy()]))
def voltage(self, t):
# Return voltage
return self._v(t).numpy()
def forward(self, t, y):
a, r = torch.unbind(y, dim=1)
try:
v = self._v(t).to(device)
except ValueError:
v = torch.tensor([-80]).to(device)
nv = v / self.vrange
k3 = self.p5 * torch.exp(self.p6 * v)
k4 = self.p7 * torch.exp(-self.p8 * v)
drdt = -k3 * r + k4 * (self.unity - r)
dadt = self.net(torch.stack([nv[0], a[0]]).float()) / self.netscale
return torch.stack([dadt[0], drdt[0]]).reshape(1, -1)
#
#
#
#
#
#
if args.pred:
true_model = Lambda()
func = ODEFunc().to(device)
func.load_state_dict(torch.load('d1/model-state-dict.pt'))
func.eval()
prediction_protocol2 = np.loadtxt('test-protocols/staircase.csv', skiprows=1, delimiter=',')
prediction_protocol2[:, 0] *= 1e3 # s -> ms
prediction_t2 = torch.linspace(0., 15000, 7501).to(device)
prediction_protocol3 = np.loadtxt('test-protocols/sinewave.csv', skiprows=1, delimiter=',')
prediction_protocol3[:, 0] *= 1e3 # s -> ms
prediction_t3 = torch.linspace(0., 8000, 4001).to(device)
prediction_protocol4 = np.loadtxt('test-protocols/aps.csv', skiprows=1, delimiter=',')
prediction_protocol4[:, 0] *= 1e3 # s -> ms
prediction_t4 = torch.linspace(0., 8000, 4001).to(device)
ii = 999
with torch.no_grad():
###
### Predict unseen protocols
###
#
# AP 2Hz
#
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(prediction_protocol[:, 0], prediction_protocol[:, 1])
prediction_y = odeint(true_model, true_y0, prediction_t, method='dopri5')
prediction_yo = prediction_y[:, 0, -1] * (true_model._v(prediction_t) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(prediction_protocol[:, 0], prediction_protocol[:, 1])
pred_y = odeint(func, true_y0, prediction_t).to(device)
pred_yo = pred_y[:, 0, 0] * pred_y[:, 0, 1] * (func._v(prediction_t).to(device) + 86)
loss = torch.mean(torch.abs(pred_yo - prediction_yo))
print('AP 2Hz prediction | Total Loss {:.6f}'.format(loss.item()))
prediction_yo += torch.from_numpy(np.random.normal(0, noise_sigma, prediction_t.cpu().numpy().shape)).to(device)
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
ax1.plot(prediction_t.cpu().numpy(), prediction_yo.reshape(-1).cpu().numpy(), 'g-')
ax1.plot(prediction_t.cpu().numpy(), pred_yo.reshape(-1).cpu().numpy(), 'b--')
ax1.set_xlim(prediction_t.cpu().min(), prediction_t.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-ap2hz'.format(ii), dpi=200)
plt.close(fig1)
#
# Kylie's APs
#
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(prediction_protocol4[:, 0], prediction_protocol4[:, 1])
prediction_y4 = odeint(true_model, true_y0, prediction_t4, method='dopri5')
prediction_yo4 = prediction_y4[:, 0, -1] * (true_model._v(prediction_t4) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(prediction_protocol4[:, 0], prediction_protocol4[:, 1])
pred_y4 = odeint(func, true_y0, prediction_t4).to(device)
pred_yo4 = pred_y4[:, 0, 0] * pred_y4[:, 0, 1] * (func._v(prediction_t4).to(device) + 86)
loss = torch.mean(torch.abs(pred_yo4 - prediction_yo4))
print('APs prediction | Total Loss {:.6f}'.format(loss.item()))
prediction_yo4 += torch.from_numpy(np.random.normal(0, noise_sigma, prediction_t4.cpu().numpy().shape)).to(device)
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
ax1.plot(prediction_t4.cpu().numpy(), prediction_yo4.reshape(-1).cpu().numpy(), 'g-')
ax1.plot(prediction_t4.cpu().numpy(), pred_yo4.reshape(-1).cpu().numpy(), 'b--')
ax1.set_xlim(prediction_t4.cpu().min(), prediction_t4.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-aps'.format(ii), dpi=200)
plt.close(fig1)
#
# Sinewave
#
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(prediction_protocol3[:, 0], prediction_protocol3[:, 1])
prediction_y3 = odeint(true_model, true_y0, prediction_t3, method='dopri5')
prediction_yo3 = prediction_y3[:, 0, -1] * (true_model._v(prediction_t3) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(prediction_protocol3[:, 0], prediction_protocol3[:, 1])
pred_y3 = odeint(func, true_y0, prediction_t3).to(device)
pred_yo3 = pred_y3[:, 0, 0] * pred_y3[:, 0, 1] * (func._v(prediction_t3).to(device) + 86)
loss = torch.mean(torch.abs(pred_yo3 - prediction_yo3))
print('Sinewave prediction | Total Loss {:.6f}'.format(loss.item()))
prediction_yo4 += torch.from_numpy(np.random.normal(0, noise_sigma, prediction_t3.cpu().numpy().shape)).to(device)
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
ax1.plot(prediction_t3.cpu().numpy(), prediction_yo3.reshape(-1).cpu().numpy(), 'g-')
ax1.plot(prediction_t3.cpu().numpy(), pred_yo3.reshape(-1).cpu().numpy(), 'b--')
ax1.set_xlim(prediction_t3.cpu().min(), prediction_t3.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-sinewave'.format(ii), dpi=200)
plt.close(fig1)
#
# Staircase
#
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(prediction_protocol2[:, 0], prediction_protocol2[:, 1])
prediction_y2 = odeint(true_model, true_y0, prediction_t2, method='dopri5')
prediction_yo2 = prediction_y2[:, 0, -1] * (true_model._v(prediction_t2) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(prediction_protocol2[:, 0], prediction_protocol2[:, 1])
pred_y2 = odeint(func, true_y0, prediction_t2).to(device)
pred_yo2 = pred_y2[:, 0, 0] * pred_y2[:, 0, 1] * (func._v(prediction_t2).to(device) + 86)
loss = torch.mean(torch.abs(pred_yo2 - prediction_yo2))
print('Staircase prediction | Total Loss {:.6f}'.format(loss.item()))
prediction_yo2 += torch.from_numpy(np.random.normal(0, noise_sigma, prediction_t2.cpu().numpy().shape)).to(device)
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
ax1.plot(prediction_t2.cpu().numpy(), prediction_yo2.reshape(-1).cpu().numpy(), 'g-')
ax1.plot(prediction_t2.cpu().numpy(), pred_yo2.reshape(-1).cpu().numpy(), 'b--')
ax1.set_xlim(prediction_t2.cpu().min(), prediction_t2.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-staircase'.format(ii), dpi=200)
plt.close(fig1)
#
# Pr 3 Activation (fit two time constants?!)
#
t = torch.linspace(0., 8000., 8001).to(device) # 8 seconds, 1 ms interval
template_v = np.zeros(t.cpu().numpy().shape)
template_v[:1000] = -80
# template_v[1000:6000] to be set
template_v[6000:7000] = -40
template_v[7000:7500] = -120
template_v[7500:] = -80
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
print('Activation prediction:')
for v_i in [-60, -40, -20, 0, 20, 40, 60]:
v = np.copy(template_v)
v[1000:6000] = v_i
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yt = odeint(true_model, true_y0, t, method='dopri5')
ot = yt[:, 0, -1] * (true_model._v(t) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yp = odeint(func, true_y0, t).to(device)
op = yp[:, 0, 0] * yp[:, 0, 1] * (func._v(t).to(device) + 86)
loss = torch.mean(torch.abs(op - ot))
print(' {:.1f}mV | Total Loss {:.6f}'.format(v_i, loss.item()))
ot += torch.from_numpy(np.random.normal(0, noise_sigma, t.cpu().numpy().shape)).to(device)
ax1.plot(t.cpu().numpy(), ot.reshape(-1).cpu().numpy(), c='#7f7f7f')
ax1.plot(t.cpu().numpy(), op.reshape(-1).cpu().numpy(), c='C0', ls='--')
ax1.set_xlim(t.cpu().min(), t.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-act'.format(ii), dpi=200)
plt.close(fig1)
#
# Pr 5 Deactivation
#
t = torch.linspace(0., 10000., 10001).to(device) # 8 seconds, 1 ms interval
template_v = np.zeros(t.cpu().numpy().shape)
template_v[:1000] = -80
template_v[1000:3000] = 50
# template_v[3000:9000] to be set
template_v[9000:9500] = -120
template_v[9500:] = -80
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
print('Deactivation prediction:')
for v_i in [-120, -110, -100, -90, -80, -70, -60, -50, -40]:
v = np.copy(template_v)
v[3000:9000] = v_i
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yt = odeint(true_model, true_y0, t, method='dopri5')
ot = yt[:, 0, -1] * (true_model._v(t) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yp = odeint(func, true_y0, t).to(device)
op = yp[:, 0, 0] * yp[:, 0, 1] * (func._v(t).to(device) + 86)
loss = torch.mean(torch.abs(op - ot))
print(' {:.1f}mV | Total Loss {:.6f}'.format(v_i, loss.item()))
ot += torch.from_numpy(np.random.normal(0, noise_sigma, t.cpu().numpy().shape)).to(device)
ax1.plot(t.cpu().numpy(), ot.reshape(-1).cpu().numpy(), c='#7f7f7f')
ax1.plot(t.cpu().numpy(), op.reshape(-1).cpu().numpy(), c='C0', ls='--')
ax1.set_xlim(t.cpu().min(), t.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-deact'.format(ii), dpi=200)
plt.close(fig1)
#
# Pr 2 Activation time constant at 40mV
#
t = torch.linspace(0., 5000., 5001).to(device) # 8 seconds, 1 ms interval
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
print('Activation time constant at 40mV prediction:')
for t_i in [3, 10, 30, 100, 300, 1000]:
v = np.zeros(t.cpu().numpy().shape)
v[:1000] = -80
v[1000:1000+t_i] = 40
v[1000+t_i:3500+t_i] = -120
v[3500+t_i:] = -80
# Ground truth
true_y0 = gt_true_y0s[1] # (roughly holding at -80mV)
true_model.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yt = odeint(true_model, true_y0, t, method='dopri5')
ot = yt[:, 0, -1] * (true_model._v(t) + 86)
# Trained Neural ODE
true_y0 = true_y0s[1] # (roughly holding at -80mV)
func.set_fixed_form_voltage_protocol(t.cpu().numpy(), v)
yp = odeint(func, true_y0, t).to(device)
op = yp[:, 0, 0] * yp[:, 0, 1] * (func._v(t).to(device) + 86)
loss = torch.mean(torch.abs(op - ot))
print(' {:.1f}ms | Total Loss {:.6f}'.format(t_i, loss.item()))
ot += torch.from_numpy(np.random.normal(0, noise_sigma, t.cpu().numpy().shape)).to(device)
ax1.plot(t.cpu().numpy(), ot.reshape(-1).cpu().numpy(), c='#7f7f7f')
ax1.plot(t.cpu().numpy(), op.reshape(-1).cpu().numpy(), c='C0', ls='--')
ax1.set_xlim(t.cpu().min(), t.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}-atau'.format(ii), dpi=200)
plt.close(fig1)
sys.exit()
#
#
#
#
# Generate syn data from the ground truth model
#
true_model = Lambda()
true_y_batches1 = []
true_yo_batches1 = []
true_y_batches2 = []
true_yo_batches2 = []
true_y_batches3 = []
true_yo_batches3 = []
with torch.no_grad():
if not args.cached:
for protocol in protocol_batches1:
true_y0 = gt_true_y0s[1]
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(true_model, true_y0, t1, method='dopri5')
true_y_batches1.append(true_y)
true_yo_batches1.append(true_y[:, 0, -1] * (true_model._v(t1) + 86) +
torch.from_numpy(np.random.normal(0, noise_sigma, t1.cpu().numpy().shape)).to(device))
for protocol in protocol_batches2:
true_y0 = gt_true_y0s[1]
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(true_model, true_y0, t2, method='dopri5')
true_y_batches2.append(true_y)
true_yo_batches2.append(true_y[:, 0, -1] * (true_model._v(t2) + 86) +
torch.from_numpy(np.random.normal(0, noise_sigma, t2.cpu().numpy().shape)).to(device))
if use_pt3:
for protocol in protocol_batches3:
true_y0 = gt_true_y0s[1]
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(true_model, true_y0, t3, method='dopri5')
true_y_batches3.append(true_y)
true_yo_batches3.append(true_y[:, 0, -1] * (true_model._v(t3) + 86) +
torch.from_numpy(np.random.normal(0, noise_sigma, t3.cpu().numpy().shape)).to(device))
# ap 2hz for prediction
true_y0 = gt_true_y0s[1]
true_model.set_fixed_form_voltage_protocol(prediction_protocol[:, 0], prediction_protocol[:, 1])
prediction_y = odeint(true_model, true_y0, prediction_t, method='dopri5')
prediction_yo = prediction_y[:, 0, -1] * (true_model._v(prediction_t) + 86)
if args.cached:
v_batches = torch.load('d1/v.pt')
a_batches = torch.load('d1/a.pt')
dadt_batches = torch.load('d1/dadt.pt')
else:
###
### 'post-processing': estimating dadt and a
###
skip = 5 # not accurate for the first few time points for estimating derivatives
sparse = 11 # use less data points
dvdt_constant = torch.tensor([0]).to(device) # for now yes
e = torch.tensor([-86.]).to(device) # assume we know
g = torch.tensor([1.]).to(device) # assume we know
r_batches1 = [] # assume we know to the extent of which we can ignore its discrepancy
r_batches2 = []
if use_pt3:
r_batches3 = []
with torch.no_grad():
m = ODEFunc().to(device)
for protocol in protocol_batches1:
true_y0 = true_y0s[1]
m.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(m, true_y0, t1, method='dopri5')
r_batches1.append(true_y[:, 0, 1])
for protocol in protocol_batches2:
true_y0 = true_y0s[1]
m.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(m, true_y0, t2, method='dopri5')
r_batches2.append(true_y[:, 0, 1])
if use_pt3:
for protocol in protocol_batches3:
true_y0 = true_y0s[1]
m.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
true_y = odeint(m, true_y0, t3, method='dopri5')
r_batches3.append(true_y[:, 0, 1])
v_batches1 = []
for protocol in protocol_batches1:
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
v_batches1.append(true_model._v(t1)[0])
v_batches2 = []
for protocol in protocol_batches2:
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
v_batches2.append(true_model._v(t2)[0])
if use_pt3:
v_batches3 = []
for protocol in protocol_batches3:
true_model.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
v_batches3.append(true_model._v(t3)[0])
drdt_batches1 = []
drdt_batches2 = []
for r, v in zip(r_batches1, v_batches1):
k3 = m.p5 * torch.exp(m.p6 * v)
k4 = m.p7 * torch.exp(-m.p8 * v)
drdt = -k3 * r + k4 * (1. - r)
drdt_batches1.append(drdt)
for r, v in zip(r_batches2, v_batches2):
k3 = m.p5 * torch.exp(m.p6 * v)
k4 = m.p7 * torch.exp(-m.p8 * v)
drdt = -k3 * r + k4 * (1. - r)
drdt_batches2.append(drdt)
if use_pt3:
drdt_batches3 = []
for r, v in zip(r_batches3, v_batches3):
k3 = m.p5 * torch.exp(m.p6 * v)
k4 = m.p7 * torch.exp(-m.p8 * v)
drdt = -k3 * r + k4 * (1. - r)
drdt_batches3.append(drdt)
i_batches1 = true_yo_batches1
i_batches2 = true_yo_batches2
didt_batches1 = []
didt_batches2 = []
for j, (i, protocol) in enumerate(zip(i_batches1, protocol_batches1)):
ii = i.cpu().numpy().reshape(-1)
pt = protocol[:, 0]
pv = protocol[:, 1]
t_split = pt[np.append([False], pv[:-1] != pv[1:])]
t_split = np.append(t_split, pt[-1] + 1)
t_i = 0
io = []
didto = []
for t_f in t_split:
idx = np.where((t1 >= t_i) & (t1 < t_f))[0]
tfit = t1.cpu().numpy()[idx]
ifit = smooth(ii[idx], 61)[30:-30] # smoothing
spl = UnivariateSpline(tfit, ifit, k=3)
spl.set_smoothing_factor(0)
io = np.append(io, spl(tfit))
didto = np.append(didto, spl.derivative()(tfit))
t_i = t_f
i_batches1[j] = torch.from_numpy(io).to(device)
didt_batches1.append(torch.from_numpy(didto).to(device))
for j, (i, protocol) in enumerate(zip(i_batches2, protocol_batches2)):
ii = i.cpu().numpy().reshape(-1)
pt = protocol[:, 0]
pv = protocol[:, 1]
t_split = pt[np.append([False], pv[:-1] != pv[1:])]
t_split = np.append(t_split, pt[-1] + 1)
t_i = 0
io = []
didto = []
for t_f in t_split:
idx = np.where((t2 >= t_i) & (t2 < t_f))[0]
tfit = t2.cpu().numpy()[idx]
ifit = smooth(ii[idx], 61)[30:-30] # smoothing
spl = UnivariateSpline(tfit, ifit, k=3)
spl.set_smoothing_factor(0)
io = np.append(io, spl(tfit))
didto = np.append(didto, spl.derivative()(tfit))
t_i = t_f
i_batches2[j] = torch.from_numpy(io).to(device)
didt_batches2.append(torch.from_numpy(didto).to(device))
if use_pt3:
didt_batches3 = []
i_batches3 = true_yo_batches3
for j, (i, protocol) in enumerate(zip(i_batches3, protocol_batches3)):
ii = i.cpu().numpy().reshape(-1)
pt = protocol[:, 0]
pv = protocol[:, 1]
t_split = pt[np.append([False], pv[:-1] != pv[1:])]
t_split = np.append(t_split, pt[-1] + 1)
t_i = 0
io = []
didto = []
for t_f in t_split:
idx = np.where((t3 >= t_i) & (t3 < t_f))[0]
tfit = t3.cpu().numpy()[idx]
ifit = smooth(ii[idx], 61)[30:-30] # smoothing
spl = UnivariateSpline(tfit, ifit, k=3)
spl.set_smoothing_factor(0)
io = np.append(io, spl(tfit))
didto = np.append(didto, spl.derivative()(tfit))
t_i = t_f
i_batches3[j] = torch.from_numpy(io).to(device)
didt_batches3.append(torch.from_numpy(didto).to(device))
# Calculate a and dadt
a_batches1 = []
dadt_batches1 = []
for j, (i, r, v, drdt, didt) in enumerate(zip(i_batches1, r_batches1, v_batches1, drdt_batches1, didt_batches1)):
ii = i.reshape(-1)
a = ii / (g * r * (v - e))
if np.all(v.cpu().numpy() == v.cpu().numpy()[0]) or True: # all steps even different values
dvdt = dvdt_constant
else:
spl = UnivariateSpline(range(len(v.cpu().numpy())), v.cpu().numpy(), k=3, s=0)
dvdt = torch.from_numpy(spl.derivative()(range(len(v.cpu().numpy()))))
dadt = r ** (-1) * (
(didt / g - a * r * dvdt) / (v - e)
- a * drdt
)
a_batches1.append(a)
dadt_batches1.append(dadt)
a_batches2 = []
dadt_batches2 = []
for j, (i, r, v, drdt, didt) in enumerate(zip(i_batches2, r_batches2, v_batches2, drdt_batches2, didt_batches2)):
ii = i.reshape(-1)
a = ii / (g * r * (v - e))
if np.all(v.cpu().numpy() == v.cpu().numpy()[0]) or True: # all steps even different values
dvdt = dvdt_constant
else:
spl = UnivariateSpline(range(len(v.cpu().numpy())), v.cpu().numpy(), k=3, s=0)
dvdt = torch.from_numpy(spl.derivative()(range(len(v.cpu().numpy()))))
dadt = r ** (-1) * (
(didt / g - a * r * dvdt) / (v - e)
- a * drdt
)
a_batches2.append(a)
dadt_batches2.append(dadt)
if use_pt3:
a_batches3 = []
dadt_batches3 = []
for j, (i, r, v, drdt, didt) in enumerate(zip(i_batches3, r_batches3, v_batches3, drdt_batches3, didt_batches3)):
ii = i.reshape(-1)
a = ii / (g * r * (v - e))
if np.all(v.cpu().numpy() == v.cpu().numpy()[0]) or True: # all steps even different values
dvdt = dvdt_constant
else:
spl = UnivariateSpline(range(len(v.cpu().numpy())), v.cpu().numpy(), k=3, s=0)
dvdt = torch.from_numpy(spl.derivative()(range(len(v.cpu().numpy()))))
dadt = r ** (-1) * (
(didt / g - a * r * dvdt) / (v - e)
- a * drdt
)
a_batches3.append(a)
dadt_batches3.append(dadt)
# To tensors
for i, (v, a, dadt) in enumerate(zip(v_batches1, a_batches1, dadt_batches1)):
v_batches1[i] = v[mask1,...][skip::sparse]
a_batches1[i] = a[mask1,...][skip::sparse]
dadt_batches1[i] = dadt[mask1,...][skip::sparse]
for i, (v, a, dadt) in enumerate(zip(v_batches2, a_batches2, dadt_batches2)):
v_batches2[i] = v[mask2,...][skip::sparse]
a_batches2[i] = a[mask2,...][skip::sparse]
dadt_batches2[i] = dadt[mask2,...][skip::sparse]
if use_pt3:
for i, (v, a, dadt) in enumerate(zip(v_batches3, a_batches3, dadt_batches3)):
v_batches3[i] = v[mask3s[i],...][skip::sparse]
a_batches3[i] = a[mask3s[i],...][skip::sparse]
dadt_batches3[i] = dadt[mask3s[i],...][skip::sparse]
if use_pt3:
v_batches = torch.cat(v_batches1 + v_batches2 + v_batches3).to(device)
a_batches = torch.cat(a_batches1 + a_batches2 + a_batches3).to(device)
dadt_batches = torch.cat(dadt_batches1 + dadt_batches2 + dadt_batches3).to(device)
else:
v_batches = torch.cat(v_batches1 + v_batches2).to(device)
a_batches = torch.cat(a_batches1 + a_batches2).to(device)
dadt_batches = torch.cat(dadt_batches1 + dadt_batches2).to(device)
# Cache it
torch.save(v_batches, 'd1/v.pt')
torch.save(a_batches, 'd1/a.pt')
torch.save(dadt_batches, 'd1/dadt.pt')
if args.debug:
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(v_batches.reshape(-1).detach().cpu().numpy(), a_batches.reshape(-1).detach().cpu().numpy(),
dadt_batches.reshape(-1).detach().cpu().numpy())
ax.set_xlabel('V')
ax.set_ylabel('a')
ax.set_zlabel('da/dt')
plt.show()
#sys.exit()
###
###
###
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
#"""
###
### Pretrain
###
loss_fn = torch.nn.MSELoss(reduction='sum')
x1 = torch.reshape(torch.linspace(-140, 80, 50).to(device), (-1, 1))
xx1 = x1 / func.vrange
x2 = torch.reshape(torch.linspace(0, 1, 50).to(device), (-1, 1))
X1, X2 = torch.meshgrid(x1.reshape(-1), x2.reshape(-1))
XX1, X2 = torch.meshgrid(xx1.reshape(-1), x2.reshape(-1))
k1 = true_model.p1 * torch.exp(true_model.p2 * X1) * 0.9 # NOTE: shift the pretrain
k2 = true_model.p3 * torch.exp(-true_model.p4 * X1) * 1.1
Y = k1 * (torch.tensor([1]).to(device) - X2) - k2 * X2
XX = torch.stack([XX1.reshape(-1), X2.reshape(-1)]).T
YY = Y.reshape(-1)
opt = optim.Adam(func.net.parameters(), lr=0.001)#, weight_decay=1e-5)
for _ in range(1000):
p = func.net(XX).to(device) / func.netscale
loss = loss_fn(p.reshape(-1), YY)
opt.zero_grad()
loss.backward()
opt.step()
if args.debug:
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X1.cpu().numpy(), X2.cpu().numpy(), Y.cpu().numpy())
pred = func.net(XX) / func.netscale
ax.plot_surface(X1.cpu().numpy(), X2.cpu().numpy(), pred.reshape(50, 50).detach().cpu().numpy())
plt.show()
#sys.exit()
###"""
###
### To predict
###
x1 = torch.reshape(torch.linspace(-130, 70, 50).to(device), (-1, 1))
xx1 = x1 / func.vrange
x2 = torch.reshape(torch.linspace(0, 1, 50).to(device), (-1, 1))
X1, X2 = torch.meshgrid(x1.reshape(-1), x2.reshape(-1))
XX1, X2 = torch.meshgrid(xx1.reshape(-1), x2.reshape(-1))
XX = torch.stack([XX1.reshape(-1), X2.reshape(-1)]).T
# pretrained
with torch.no_grad():
pretrain_pred = func.net(XX) / func.netscale
###
###
### Training
###
#'''
x_av = torch.stack([v_batches.reshape(-1) / func.vrange, a_batches.reshape(-1)]).T
y_dadt = dadt_batches.reshape(-1)
# Keep only 0 < a < 1
to_keep = (x_av[:, 1] > 0) & (x_av[:, 1] < 1)
x_av = x_av[to_keep, :]
y_dadt = y_dadt[to_keep]
opt = optim.Adam(func.net.parameters(), lr=0.001)
# gamma = decaying factor
scheduler = StepLR(opt, step_size=100, gamma=0.9) # 0.9**(4000steps/100) ~ 0.016
for itr in range(4000):
p = func.net(x_av.float()).to(device) / func.netscale
loss = loss_fn(p.reshape(-1), y_dadt.float())
opt.zero_grad()
loss.backward()
opt.step()
# Decay Learning Rate
scheduler.step()
if (itr % 400) == 0:
print('Iter', itr)
#'''
with torch.no_grad():
true_y0 = true_y0s[1]
func.set_fixed_form_voltage_protocol(prediction_protocol[:, 0], prediction_protocol[:, 1])
pred_y = odeint(func, true_y0, prediction_t).to(device)
pred_yo = pred_y[:, 0, 0] * pred_y[:, 0, 1] * (func._v(prediction_t).to(device) + 86)
loss = torch.mean(torch.abs(pred_yo - prediction_yo))
print('Pretraining | Total Loss {:.6f}'.format(loss.item()))
fig1, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.set_xlabel('t')
ax1.set_ylabel('i')
ax1.plot(prediction_t.cpu().numpy(), prediction_yo.reshape(-1).cpu().numpy(), 'g-')
ax1.plot(prediction_t.cpu().numpy(), pred_yo.reshape(-1).cpu().numpy(), 'b--')
ax1.set_xlim(prediction_t.cpu().min(), prediction_t.cpu().max())
fig1.tight_layout()
fig1.savefig('d1/{:03d}'.format(ii), dpi=200)
plt.close(fig1)
ax = plt.axes(projection='3d')
ax.scatter(v_batches.reshape(-1).detach().cpu().numpy(), a_batches.reshape(-1).detach().cpu().numpy(),
dadt_batches.reshape(-1).detach().cpu().numpy())
pred = func.net(XX) / func.netscale
ax.plot_surface(X1.cpu().numpy(), X2.cpu().numpy(), pred.reshape(50, 50).detach().cpu().numpy(), color='C1')
ax.set_xlabel('V')
ax.set_ylabel('a')
ax.set_zlabel('da/dt')
plt.savefig('d1/rates3d-{:03d}'.format(ii), dpi=200)
#plt.show()
plt.close()
ii += 1
###
###
# Save model
torch.save(func.state_dict(), 'd1/model-state-dict.pt')
torch.save(func, 'd1/model-entire.pt')
# To load model:
# func = TheModelClass(*args, **kwargs)
# func.set_fixed_form_voltage_protocol(protocol[:, 0], protocol[:, 1])
# func.load_state_dict(torch.load('d1/model-state-dict.pt'))
# func.eval()
#
# Or:
# func = torch.load('d1/model-entire.pt')
# func.eval()
| [
"chonloklei@gmail.com"
] | chonloklei@gmail.com |
8765c2225d5837e8d7fd36b6dc4044e605ab5686 | 039a274d8a8bfbfb90b3c884024edf8c18507150 | /examples/logisticRegression.py | 3fecc9725f6e1545a2e1b879c0744400da271f67 | [
"MIT"
] | permissive | JayceeLee/TheanoProject | 1e33ae2a58a188cfce6c5bcbd8a2f6f9fbd36a0d | be1f5f09aa84d64ad3df7b798cf6ff74a08bf3b7 | refs/heads/master | 2021-05-11T09:12:50.278105 | 2017-04-09T08:55:03 | 2017-04-09T08:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | #! /usr/bin/python3
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
__author__ = 'fyabc'
RNG = np.random
def main():
N = 400 # training sample size
feats = 784 # number of input variables
# generate a dataset: D = (input_values, target_class)
# [NOTE]: randint is [low, high)
D = (RNG.randn(N, feats), RNG.randint(size=N, low=0, high=2))
trainingSteps = 10000
# Declare Theano symbolic variables
x = T.dmatrix('x')
y = T.dvector('y')
# initialize the weight vector w randomly
#
# this and the following bias variable b
# are shared so they keep their values
# between training iterations (updates)
w = theano.shared(RNG.randn(feats), name='w')
# initialize the bias term
b = theano.shared(0., name='b')
print('Initial model:')
print(w.get_value())
print(b.get_value())
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
prediction = p_1 > 0.5 # The prediction thresholded
xCE = -y * T.log(p_1) - (1 - y) * T.log(1 - p_1) # Cross-entropy loss function
cost = xCE.mean() + 0.01 * (w ** 2).sum() # The cost to minimize
gw, gb = T.grad(cost, [w, b]) # Compute the gradient of the cost
# w.r.t weight vector w and
# bias term b
# (we shall return to this in a
# following section of this tutorial)
# Compile
train = theano.function(
inputs=[x, y],
outputs=[prediction, xCE],
updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)))
predict = theano.function(inputs=[x], outputs=prediction)
# Train
for i in range(trainingSteps):
pred, err = train(D[0], D[1])
# print('Step %d: pred = %s, err = %s' % (i, str(pred), str(err)))
print('Step %d' % (i,))
print("Final model:")
print(w.get_value())
print(b.get_value())
print("target values for D:")
print(D[1])
print("prediction on D:")
print(predict(D[0]))
print("Error:")
print(D[1] - predict(D[0]))
if __name__ == '__main__':
main()
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
f3edbdc52fa052152bcf4d6e714ac29123c16186 | 2eb8e3606a8df45d432fdf56ee9aa24942304526 | /rocketgram/api/inline_query.py | cc10fabfbdd6a9c87cdb80a8c3c80a24aeaecdd4 | [
"MIT"
] | permissive | KulZlaK/rocketgram | 22848293980ba44dd9fb63db28f34be36c437c84 | 09587deecffcd7ccc9529f4d9e51221888870f23 | refs/heads/master | 2022-07-27T23:25:51.254444 | 2020-05-15T21:36:57 | 2020-05-15T21:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # Copyright (C) 2015-2020 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import Optional
from .location import Location
from .user import User
@dataclass(frozen=True)
class InlineQuery:
"""\
Represents InlineQuery object:
https://core.telegram.org/bots/api#inlinequery
Differences in field names:
id -> query_id
from -> user
"""
query_id: str
user: User
location: Optional[Location]
query: str
offset: str
@classmethod
def parse(cls, data: dict) -> Optional['InlineQuery']:
if data is None:
return None
return cls(data['id'], User.parse(data['from']), Location.parse(data.get('location')),
data['query'], data['offset'])
| [
"vd@"
] | vd@ |
17abd873c71413e3e69a0166b662cadde12a971d | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/crf/__init__.py | 80a31cc3341a2c7cf1867eef2ef3e463a325667f | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,633 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear-chain CRF layer.
See the @{$python/contrib.crf} guide.
@@crf_sequence_score
@@crf_log_norm
@@crf_log_likelihood
@@crf_unary_score
@@crf_binary_score
@@CrfForwardRnnCell
@@viterbi_decode
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.crf.python.ops.crf import _lengths_to_masks
from tensorflow.contrib.crf.python.ops.crf import crf_binary_score
from tensorflow.contrib.crf.python.ops.crf import crf_log_likelihood
from tensorflow.contrib.crf.python.ops.crf import crf_log_norm
from tensorflow.contrib.crf.python.ops.crf import crf_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_unary_score
from tensorflow.contrib.crf.python.ops.crf import CrfForwardRnnCell
from tensorflow.contrib.crf.python.ops.crf import viterbi_decode
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
b16e2983b29295ca79df7a58ce01944eb1bc078d | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/207. Course Schedule/207.py | 38345c4258f98161245c91f343273e5b8b1ee969 | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 979 | py | # https://helloacm.com/teaching-kids-programming-topological-sort-algorithm-on-directed-graphs-course-schedule-bfs/
# https://leetcode.com/problems/course-schedule/
# MEDIUM, TOPOLOGICAL SORT, GRAPH
class Node(object):
def __init__(self):
self.inDegrees = 0
self.outNodes = []
class Solution(object):
def canFinish(self, numCourses, prerequisites):
G = defaultdict(Node)
total = 0
for a, b in prerequisites:
G[a].inDegrees += 1
G[b].outNodes.append(a)
total += 1
q = deque()
for a, node in G.items():
if node.inDegrees == 0:
q.append(a)
r = 0
while q:
v = q.popleft()
for a in G[v].outNodes:
G[a].inDegrees -= 1
r += 1
if G[a].inDegrees == 0:
q.append(a)
return r == total
| [
"noreply@github.com"
] | DoctorLai.noreply@github.com |
963ad59463139f848c367bf564d5b5d04be6a85a | d802cb112c080f99fc800effe23b2a7ca55a694b | /tests/test_updater.py | d3e372ada57fb325e58edc6929cffb630c5c5bbd | [
"Apache-2.0"
] | permissive | zjj2wry/dvc | 211ce55c00856abc7d08408c79b93b47cff538a8 | c9df567938eefd7b1f5b094c15f04e5ce704aa36 | refs/heads/master | 2020-04-17T20:34:00.368060 | 2019-01-21T22:19:15 | 2019-01-21T22:19:15 | 166,911,251 | 0 | 0 | Apache-2.0 | 2019-01-22T02:13:23 | 2019-01-22T02:13:23 | null | UTF-8 | Python | false | false | 1,148 | py | import os
from tests.basic_env import TestDvc
class TestUpdater(TestDvc):
def test(self):
# NOTE: only test on travis CRON to avoid generating too much logs
travis = os.getenv('TRAVIS') == 'true'
if not travis:
return
cron = os.getenv('TRAVIS_EVENT_TYPE') == 'cron'
if not cron:
return
env = os.environ.copy()
if os.getenv('CI'):
del os.environ['CI']
self.dvc.updater.check()
self.dvc.updater.check()
self.dvc.updater.check()
os.environ = env.copy()
def test_check_version_newer(self):
self.dvc.updater.latest = "0.20.8"
self.dvc.updater.current = "0.21.0"
self.assertFalse(self.dvc.updater._is_outdated())
def test_check_version_equal(self):
self.dvc.updater.latest = "0.20.8"
self.dvc.updater.current = "0.20.8"
self.assertFalse(self.dvc.updater._is_outdated())
def test_check_version_outdated(self):
self.dvc.updater.latest = "0.21.0"
self.dvc.updater.current = "0.20.8"
self.assertTrue(self.dvc.updater._is_outdated())
| [
"kupruser@gmail.com"
] | kupruser@gmail.com |
c6a662e4deb03a754003a41447bcac49368a9cfa | a9243f735f6bb113b18aa939898a97725c358a6d | /0.11/_downloads/rt_feedback_server.py | 292a32b002948df77bf8eebf9e06156a38d1071c | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 4,945 | py | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
import mne
from mne.datasets import sample
from mne.realtime import StimServer
from mne.realtime import MockRtClient
from mne.decoding import EpochsVectorizer, FilterEstimator
print(__doc__)
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer('localhost', port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
filt = FilterEstimator(raw.info, 1, 40)
scaler = preprocessing.StandardScaler()
vectorizer = EpochsVectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.hold(True)
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.show()
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
666ee0df3d1d96fec23aa854fea18f2dd8e1f81e | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/unearth.py | 5d8150eeae2ec61b3ceceec28bbd2a0fcf23b71c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 64 | py | ii = [('CoolWHM2.py', 1), ('CarlTFR.py', 1), ('AinsWRR3.py', 2)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
2ce249532f1a3348bba6d0f7f78e80def67cb3a9 | a39adde99c75c2bf9b25b59fb0d6769196e74a63 | /datasets/hscic/load.py | ae0c205010a46e9d2e59aa6ad24fd23ad88fdd39 | [
"MIT"
] | permissive | uk-gov-mirror/nhsengland.publish-o-matic | 51624d52df562089f7acf4ac91aabcb37ac6d63b | dc8f16cb83a2360989afa44d887e63b5cde6af29 | refs/heads/master | 2021-06-09T06:17:50.473307 | 2016-08-18T10:29:50 | 2016-08-18T10:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,016 | py | """
Publish HSCIC Indicators to CKAN !
"""
import logging
import sys
import ffs
import slugify
import dc
from publish.lib.metadata import get_resource_path
from publish.lib.helpers import download_file, to_markdown, filename_for_resource
from publish.lib.upload import Uploader
from datasets.hscic.curate import Curator
logging.basicConfig(filename='publish.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
DATA_DIR = None
# TODO: Change this to just strip punctuation and check length
def clean_tag(t):
def valid_only(tags):
return [tag for tag in tags if len(tag) > 2]
t = t.replace('(', "").replace(")", "").replace(':', "")
t = t.replace('A+E', "A and E")
t = t.replace('ST&', "")
t = t.replace('A&', "A and")
if ';' in t:
return valid_only([s.strip() for s in t.split(';')])
elif '/' in t:
return valid_only([s.strip() for s in t.split('/')])
return valid_only([t.replace('&', '-and-')])
def publish_indicators(start_from=0):
global DATA_DIR
u = Uploader("hscic-indicators")
indicatorfile = ffs.Path(get_resource_path('indicators.json'))
logging.info('Loading {}'.format(indicatorfile))
indicators = indicatorfile.json_load()
logging.info('Processing {} indicators'.format(len(indicators)))
logging.info('Starting from record {}'.format(start_from))
for indicator in indicators[start_from:]:
try:
resources = []
for s in indicator['sources']:
resource = {
"description": s['description'],
"name": s['url'].split('/')[-1],
"format": s['filetype'].upper(),
"url": s["url"]
}
"""
filename = filename_for_resource(resource)
path = DATA_DIR / filename
download_file(resource['url'], path)
print "Uploading to S3"
url = u.upload(path)
resource['url'] = url
"""
resources.append(resource)
if not 'indicators' in indicator['keyword(s)']:
indicator['keyword(s)'].append('indicators')
title = indicator['title']
c = Curator(indicator)
groups = c.get_groups()
if not groups:
print "Not in a group"
continue
prefix = c.get_title_prefix()
if prefix:
title = u"{} - {}".format(prefix, title)
tags = []
if 'keyword(s)' in dataset:
dataset['keyword(s)'] = sum([clean_tag(k) for k in indicator.get('keyword(s)',[]) if len(k) > 2], [])
tags = dc.tags(*dataset['keywords'])
print '+ Create/Update dataset {}'.format(indicator['title'])
dc.Dataset.create_or_update(
name=slugify.slugify(title).lower()[:99],
title=title,
state='active',
licence_id='ogl',
notes=to_markdown(indicator['definition'].encode('utf8')),
url='https://indicators.ic.nhs.uk/webview/',
tags=dc.tags(tags),
resources=resources,
owner_org='hscic'
)
if groups:
try:
dataset = dc.ckan.action.package_show(id=slugify.slugify(title)[:99].lower())
except:
continue
for group in groups:
group = group.lower()
if [g for g in dataset.get('groups', []) if g['name'] == group]:
print 'Already in group', g['name']
else:
dc.ckan.action.member_create(
id=group,
object=dataset_name,
object_type='package',
capacity='member'
)
except Exception as ex:
import traceback
traceback.print_exc()
import sys; sys.exit(1)
u.close()
return
def publish_datasets(start_from=0):
global DATA_DIR
u = Uploader("hscic-datasets")
datasetfile = ffs.Path(get_resource_path('datasets.json'))
logging.info('Loading {}'.format(datasetfile))
datasets = datasetfile.json_load()
logging.info('Processing {} indicators'.format(len(datasets)))
logging.info('Starting from record {}'.format(start_from))
import random
total = len(datasets) - start_from
current = 1
for dataset in datasets[start_from:]:
print "STATUS: {}/{}".format(current, total)
current += 1
#print u'Processing {}'.format(dataset['title'])
#print ' ID: {}'.format(dataset['id'])
try:
resources = []
for s in dataset['sources']:
resource = {
"description": s['description'],
"name": s['url'].split('/')[-1],
"format": s['filetype'],
"url": s["url"]
}
"""
filename = filename_for_resource(resource)
path = DATA_DIR / filename
download_file(resource['url'], path)
resource['url'] = u.upload(path)
"""
resources.append(resource)
if not resources:
print "Dataset {} does not have any resources".format(dataset['id'])
continue
title = dataset['title']
c = Curator(dataset)
groups = c.get_groups()
if not groups:
print "Not in a group"
continue
prefix = c.get_title_prefix()
if prefix:
title = u"{} - {}".format(prefix, title)
name = slugify.slugify(title).lower()[0:99]
# Call cleantags on each work and expect back a list, which is then flattened
tags = []
if 'keywords' in dataset:
dataset['keywords'] = sum([clean_tag(k) for k in dataset.get('keywords',[]) if len(k) > 2], [])
tags = dc.tags(*dataset['keywords'])
notes = dataset['summary']
if 'key_facts' in dataset:
notes += '\n\n<h2>KEY FACTS:</h2>\n' + ''.join(dataset['key_facts'])
notes = to_markdown(notes)
name = 'hscic_dataset_{}'.format(dataset['id'])
dc.Dataset.create_or_update(
name=name,
title=title,
state='active',
licence_id='ogl',
notes=notes,
url=dataset['source'],
tags=tags,
resources=resources,
owner_org='hscic'
)
if groups:
try:
dataset = dc.ckan.action.package_show(id=name)
except:
continue
for group in groups:
group = group.lower()
if [g for g in dataset.get('groups', []) if g['name'] == group]:
print 'Already in group', g['name']
else:
dc.ensure_group(group)
dc.ckan.action.member_create(
id=group,
object=dataset['id'],
object_type='package',
capacity='member'
)
except Exception as ex:
import traceback
traceback.print_exc()
u.close()
return
def load(workspace):
global DATA_DIR
DATA_DIR = ffs.Path(workspace) / "data"
DATA_DIR.mkdir()
dc.ensure_publisher('hscic')
publish_indicators(0)
publish_datasets(0)
return 0
| [
"ross@servercode.co.uk"
] | ross@servercode.co.uk |
d25bc04e3fb0cd7456802691f219db394d38c4f8 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /l10n_chart_uk_minimal/__terp__.py | b4881867bb86055b6eb6e5cb179dee319e413791 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,690 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal UK chart of accounts for building upon further
# Open ERP's default currency and accounts are remapped to this chart
#
# This module works for Open ERP 4.1.0 (and, assumed, onwards).
# This module does not work for Open ERP 4.0.2 and before.
#
# VAT is structured thus:
# - the user company is assumed to be non-VAT exempt (easy to modify, however)
# - categories OVATS (Standard), OVATR (Reduced), OVATZ (Zero) should be
# assigned to the customer taxes section of products (depending on the product)
# - categories IVATS (Standard), IVATR (Reduced), IVATZ (Zero) should be
# assigned to the supplier taxes section of products (depending on the product)
# - categories OVATX (eXempt), OVATO (Out of scope), or nothing at all should be
# assigned to default tax field of customers (depending on the customer)
# - customer categorization trumps product categorization (unchanged Tiny functionality)
# - on purchases, upon invoicing
# - the base amount (ex-VAT) appears in the appropriate input base category (S, R, Z)
# - the VAT amount appears in the appropriate input VAT category (S, R)
# - invoice lines can be traced in these VAT categories
# - refunds of invoices are deducted from the input category
# - on sales, upon invoicing
# - the base amount (ex-VAT) appears in the appropriate output base category (S, R, Z, X, O)
# - the VAT amount appears in the appropriate output VAT category (S, R)
# - invoice lines can be traced in these VAT categories
# - refunds of invoices are deducted from the output category
#
# This forms a basis for accrual tax accounting
# Cash tax accounting can be accommodated with further processing in Open ERP
#
# Status beta 0.92 - tested on Open ERP 4.1.0
# Status beta 0.93 - tested on Open ERP 4.1.0
# - trivial change to depend only on 'account'
# (seemed to be important for importing with no demo data)
# Status 1.0 - tested on Open ERP 4.1.0, 4.0.3
# - COGS account type fixed
#
{
'name': 'United Kingdom - minimal',
'version': '1.1',
'category': 'Localisation/Account Charts',
'description': """This is the base module to manage the accounting chart for United Kingdom in Open ERP.""",
'author': 'Seath Solutions Ltd',
'website': 'http://www.seathsolutions.com',
'depends': ['base', 'account', 'base_iban', 'base_vat', 'account_chart'],
'init_xml': [],
'update_xml': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_uk_wizard.xml'
],
'demo_xml': [],
'installable': True,
'certificate': '0064392144797',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"mark.norgate@affinity-digital.com"
] | mark.norgate@affinity-digital.com |
b77b296b642288c01694999600780b625adcc2da | 7ab22a9b23502f6cf3dafa02ac68067f7aacca56 | /test/test1.py | 2b7029a6553abd58dff1dee5efe0ddcef5609e88 | [] | no_license | to2bage/flask_first | 62ba6e40f0df7b46cd3f9bff91a8a0a1d41df9be | 098f1d81f516c7d369375cf9c48ec8bb6efa3a80 | refs/heads/master | 2022-11-22T01:29:53.526222 | 2020-07-24T06:51:50 | 2020-07-24T06:51:50 | 281,608,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # webserver的概念 java php nginx apache tomcat iis
# app.run()使用的是单进程, 单线程处理请求
#Local: 使用字典的方式实现的线程隔离
#LocalStack: 封装了Local, 实现了线程隔离的栈
"""
from werkzeug.local import Local
from threading import Thread
import time
my_obj = Local()
my_obj.b = 1
def worker():
my_obj.b = 2
print("in new thread b is ", my_obj.b)
new_t = Thread(target=worker)
new_t.start()
time.sleep(1)
print("in main thread b is ", my_obj.b)
"""
from werkzeug.local import LocalStack
s = LocalStack()
s.push(1)
print(s.top) | [
"to2bage@hotmail.com"
] | to2bage@hotmail.com |
130cb2f1bd3d8c3aa0903d0a6039d875dae49c00 | 6f1e1c378997bf76942ce6e203e720035169ce27 | /sort/8-counting-sort.py | 731b1a6a22acd113ba2c815055e3f06458f47a35 | [
"MIT"
] | permissive | yuenliou/leetcode | a489b0986b70b55f29d06c2fd7545294ba6e7ee5 | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | refs/heads/main | 2021-06-16T07:47:39.103445 | 2021-05-11T09:16:15 | 2021-05-11T09:16:15 | 306,536,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
def countingSort(arr):
"""
工作原理:其核心在于将输入的数据值转化为键存储在额外开辟的数组空间中
编码要素:计数排序要求输入的数据必须是有确定范围的整数
"""
#序列比较集中,如果过大:基于0的偏移量
#将数组长度定为max-min+1,即不仅要找出最大值,还要找出最小值,根据两者的差来确定计数数组的长度。
bucket = [0 for _ in range(max(arr) + 1)]
for i, val in enumerate(arr):
bucket[val] += 1
sortedIndex = 0
#根据bucket反向填充目标数组
for j, val in enumerate(bucket):
while val > 0:
arr[sortedIndex] = j
sortedIndex += 1
val -= 1
return arr
def countingSort_v2(arr):
"""
工作原理:其核心在于将输入的数据值转化为键存储在额外开辟的数组空间中
编码要素:计数排序要求输入的数据必须是有确定范围的整数
一文弄懂计数排序算法!https://www.cnblogs.com/xiaochuan94/p/11198610.html
"""
#value映射到index
bucket = [0] * (max(arr) + 1)
for i, val in enumerate(arr):
bucket[val] += 1
# print(bucket)
#调整bucket[i]的值,是该数据在output[]中的位置
for i in range(1, len(bucket)):
bucket[i] += bucket[i - 1]
# print(bucket)
"""
第五步:创建结果数组result,长度和原始数组一样。
第六步:遍历原始数组中的元素,当前元素A[j]减去最小值min,作为索引,在计数数组中找到对应的元素值count[A[j]-min],再将count[A[j]-min]的值减去1,就是A[j]在结果数组result中的位置,做完上述这些操作,count[A[j]-min]自减1。
是不是对第四步和第六步有疑问?为什么要这样操作?
第四步操作,是让计数数组count存储的元素值,等于原始数组中相应整数的最终排序位置,即计算原始数组中的每个数字在结果数组中处于的位置。
比如索引值为9的count[9],它的元素值为10,而索引9对应的原始数组A中的元素为9+101=110(要补上最小值min,才能还原),即110在排序后的位置是第10位,即result[9] = 110,排完后count[9]的值需要减1,count[9]变为9。
再比如索引值为6的count[6],他的元素值为7,而索引6对应的原始数组A中的元素为6+101=107,即107在排序后的位置是第7位,即result[6] = 107,排完后count[6]的值需要减1,count[6]变为6。
如果索引值继续为6,在经过上一次的排序后,count[6]的值变成了6,即107在排序后的位置是第6位,即result[5] = 107,排完后count[6]的值需要减1,count[6]变为5。
至于第六步操作,就是为了找到A中的当前元素在结果数组result中排第几位,也就达到了排序的目的。
"""
#a[13] = 50:output[bucket[a[13] - 1]] = output[49] = 14
output = [0] * len(arr)
for i in range(len(arr)):
output[bucket[arr[i] - 1]] = arr[i]
bucket[arr[i] - 1] -= 1
return output
def main():
arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50 ,48]
ret = countingSort_v2(arr)
print(ret)
if __name__ == '__main__':
main()
| [
"liuyuan@aplum.com.cn"
] | liuyuan@aplum.com.cn |
77cff2f761a99cf575ecfad3c9e538e645e7170c | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc015/A/4914540.py | 7002dd19a3489ebd9a6ae397cf9213d12fc3e0da | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | a = input()
b = input()
if len(a)>len(b):
print(a)
else:
print(b) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
390a467424adb2bcd0c7434b36bd29a5310d3333 | a3855396e382ec8e5be2bd54d9864feda584546b | /20 Flask REST API with Threadding Run task in background.py | 95633da11872e90ea1ba2763b883145d836b907f | [] | no_license | raj713335/REST_API_VIA_FLASK | 6930862c7c993222f876de475f08257acb2dbfec | 0b6fca7bbfbb7571e468a3b292d9bbe0d79cc6fa | refs/heads/master | 2022-12-31T08:15:58.769835 | 2020-10-26T00:05:10 | 2020-10-26T00:05:10 | 306,668,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from flask import Flask
from flask_restful import Resource,Api
import time
import threading
app=Flask(__name__)
api=Api(app)
def task():
print("Started Task...")
print(threading.current_thread().name)
time.sleep(6)
print("completed ...")
class HelloWorld(Resource):
def get(self):
threading.Thread(target=task).start()
return {'hello':'world'}
api.add_resource(HelloWorld,'/')
if __name__=="__main__":
app.run(debug=True)
| [
"raj713335@gmail.com"
] | raj713335@gmail.com |
6ab5f1183af1a2d2c4981ad8344956be938f629a | 58654f5f9f01813da08ecb4f151e7dae32f18cad | /quant_mech/src/test/hierarchy_solver_steady_state_test.py | 45604b0638fe003db261f3defb9eb367f1e40e6b | [] | no_license | rstones/quant_mech | 57c2f106adfe6fcd1880ab3c50d6f68012963beb | 4dc8f59d66e131cca0cc896638f548d9fcae66e4 | refs/heads/master | 2021-01-19T02:58:24.027297 | 2018-11-26T11:15:04 | 2018-11-26T11:15:04 | 11,790,263 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | '''
Created on 15 Mar 2017
@author: richard
'''
import numpy as np
import quant_mech.utils as utils
from quant_mech.OBOscillator import OBOscillator
from quant_mech.UBOscillator import UBOscillator
from quant_mech.hierarchy_solver import HierarchySolver
import scipy as sp
print sp.__version__
'''Ishizaki and Fleming params'''
electronic_coupling = 0.1
system_hamiltonian = np.array([[0, 0, 0],
[0, 0.2, electronic_coupling],
[0, electronic_coupling, 0]])
reorg_energy = 100.
cutoff_freq = 5.
temperature = 2.7 # Kelvin
beta = 4.297 # 0.4 #1. / (utils.KELVIN_TO_WAVENUMS * temperature)
mode_params = [] #[(200., 0.25, 10.)]
jump_ops = np.array([np.array([[0, 0, 0],
[1., 0, 0],
[0, 0, 0]]), np.array([[0, 0, 1.],
[0, 0, 0],
[0, 0, 0]])])
jump_rates = np.array([0.1, 0.0025])
K = 4
environment = []
if mode_params: # assuming that there is a single identical mode on each site
environment = [(OBOscillator(reorg_energy, cutoff_freq, beta, K=K), UBOscillator(mode_params[0][0], mode_params[0][1], mode_params[0][2], beta, K=K)), \
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K), UBOscillator(mode_params[0][0], mode_params[0][1], mode_params[0][2], beta, K=K))]
else:
environment = [(),
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K),),
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K),)]
hs = HierarchySolver(system_hamiltonian, environment, beta, jump_ops, jump_rates, num_matsubara_freqs=K, temperature_correction=True)
hs.truncation_level = 7
hm = hs.construct_hierarchy_matrix_super_fast()
print 'hierarchy matrix shape: ' + str(hm.shape)
print hs.dm_per_tier()
np.savez('DQD_heom_matrix_N7_K4.npz', hm=hm)
import scipy.sparse.linalg as spla
np.set_printoptions(precision=6, linewidth=150, suppress=True)
v0 = np.zeros(hm.shape[0])
v0[0] = 1./3
v0[4] = 1./3
v0[8] = 1./3
evals,evec = spla.eigs(hm.tocsc(), k=1, sigma=0, which='LM', v0=v0)#, ncv=100)
print evals
evec = evec[:9]
evec.shape = 3,3
evec /= np.trace(evec)
print evec
| [
"r.stones@ucl.ac.uk"
] | r.stones@ucl.ac.uk |
41f450bc4cbe94baee0ef10c1f5414f0977a3a3f | 13e93cd07fb45f9fd3bc2a1de78d6d7d4a8f8d25 | /backend/theloungeconsole_28185/settings.py | 8a505b7264581a8cc9636fc340d22846b95ad478 | [] | no_license | crowdbotics-apps/theloungeconsole-28185 | fb8a246c44d781101fa924e77a83dc5cf6f541f0 | 57c9122f1865abdcb00fbe91a02598c599575c61 | refs/heads/master | 2023-05-30T04:29:22.662494 | 2021-06-23T11:01:55 | 2021-06-23T11:01:55 | 379,573,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,132 | py | """
Django settings for theloungeconsole_28185 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'theloungeconsole_28185.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'theloungeconsole_28185.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
78959be17fec88148aeb921aa8831dc39485443f | bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4 | /sage/src/sage/coding/relative_finite_field_extension.py | 6f41b576424e37a01cdfaae14a67728872cd0543 | [] | no_license | bopopescu/geosci | 28792bda1ec1f06e23ba8dcb313769b98f793dad | 0d9eacbf74e2acffefde93e39f8bcbec745cdaba | refs/heads/master | 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,150 | py | r"""
Management of relative finite field extensions
Considering a *absolute field* `F_{q^m}` and a *relative_field* `F_q`, with
`q = p^s`, `p` being a prime and `s, m` being integers, this file
contains a class to take care of the representation of `F_{q^m}`-elements
as `F_q`-elements.
.. WARNING::
As this code is experimental, a warning is thrown when a
relative finite field extension is created for the first time
in a session (see :class:`sage.misc.superseded.experimental`).
TESTS::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation.
See http://trac.sagemath.org/20284 for details.
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
#*****************************************************************************
# Copyright (C) 2016 David Lucas <david.lucas@inria.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.rings.integer import Integer
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.structure.sage_object import SageObject
from sage.categories.homset import Hom
from sage.matrix.constructor import column_matrix
from sage.modules.free_module_element import vector
from sage.misc.superseded import experimental
class RelativeFiniteFieldExtension(SageObject):
r"""
Considering `p` a prime number, n an integer and three finite fields
`F_p`, `F_q` and `F_{q^m}`, this class contains a set of methods
to manage the representation of elements of the relative extension
`F_{q^m}` over `F_q`.
INPUT:
- ``absolute_field``, ``relative_field`` -- two finite fields, ``relative_field``
being a subfield of ``absolute_field``
- ``embedding`` -- (default: ``None``) an homomorphism from ``relative_field`` to
``absolute_field``. If ``None`` is provided, it will default to the first
homomorphism of the list of homomorphisms Sage can build.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
It is possible to specify the embedding to use
from ``relative_field`` to ``absolute_field``::
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq, embedding=Hom(Fq, Fqm)[1])
sage: FE.embedding() == Hom(Fq, Fqm)[1]
True
"""
@experimental(trac_number=20284)
def __init__(self, absolute_field, relative_field, embedding=None):
r"""
TESTS:
If ``absolute_field`` is not a finite field, an error is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm = RR
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: absolute_field has to be a finite field
Same for ``relative_field``::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq = RR
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a finite field
If ``relative_field`` is not a subfield of ``absolute_field``, an exception
is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(8)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a subfield of absolute_field
"""
if not absolute_field.is_finite():
raise ValueError("absolute_field has to be a finite field")
if not relative_field.is_finite():
raise ValueError("relative_field has to be a finite field")
s = relative_field.degree()
sm = absolute_field.degree()
if not s.divides(sm):
raise ValueError("relative_field has to be a subfield of absolute_field")
H = Hom(relative_field, absolute_field)
if embedding is not None and not embedding in H:
raise ValueError("embedding has to be an embedding from relative_field to absolute_field")
elif embedding is not None:
self._phi = embedding
else:
self._phi = H[0]
self._prime_field = relative_field.base_ring()
self._relative_field = relative_field
self._absolute_field = absolute_field
alpha = relative_field.gen()
beta = absolute_field.gen()
self._alphas = [alpha ** i for i in range(s)]
self._betas = [beta ** i for i in range(sm)]
self._relative_field_degree = s
self._absolute_field_degree = sm
def _repr_(self):
r"""
Returns a string representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
return "Relative field extension between %s and %s" % (self.absolute_field(), self.relative_field())
def _latex_(self):
r"""
Returns a latex representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: latex(RelativeFiniteFieldExtension(Fqm, Fq))
\textnormal{Relative field extension between \Bold{F}_{2^{4}} and \Bold{F}_{2^{2}}}
"""
return "\\textnormal{Relative field extension between %s and %s}" % (self.absolute_field()._latex_(),
self.relative_field()._latex_())
def __eq__(self, other):
r"""
Tests equality between embeddings.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fq = GF(4)
sage: FQ = GF(4**3)
sage: H = Hom(Fq, FQ)
sage: E1 = RelativeFiniteFieldExtension(FQ, Fq)
sage: E2 = RelativeFiniteFieldExtension(FQ, Fq, H[0])
sage: E3 = RelativeFiniteFieldExtension(FQ, Fq, H[1])
sage: E1 == E2
True
sage: E1 == E3
False
"""
return isinstance(other, RelativeFiniteFieldExtension) \
and self.embedding() == other.embedding()
@cached_method
def _representation_matrix(self):
r"""
Returns the matrix used to represents elements of the absolute field
as vectors in the basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE._representation_matrix()
[1 0 0 0]
[0 0 1 1]
[0 1 1 1]
[0 0 0 1]
"""
s = self.relative_field_degree()
m = self.extension_degree()
betas = self.absolute_field_basis()
phi_alphas = [ self._phi(self._alphas[i]) for i in range(s) ]
A = column_matrix([vector(betas[i] * phi_alphas[j])
for i in range(m) for j in range(s)])
return A.inverse()
def _flattened_relative_field_representation(self, b):
r"""
Returns a vector representation of ``b`` in the basis of
the relative field over the prime field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE._flattened_relative_field_representation(b)
(1, 0, 1, 1)
"""
if not b in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
return self._representation_matrix() * vector(b)
def relative_field_representation(self, b):
r"""
Returns a vector representation of the field element ``b`` in the basis
of the absolute field over the relative field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE.relative_field_representation(b)
(1, a + 1)
"""
if not b in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
s = self.relative_field_degree()
if s == 1:
return vector(b)
else:
Fq = self.relative_field()
vect = self._flattened_relative_field_representation(b)
sm = self.absolute_field_degree()
list_elts = []
for i in range(0, sm, s):
list_elts.append(Fq(vect[i:i+s]))
return vector(Fq, list_elts)
def absolute_field_representation(self, a):
r"""
Returns an absolute field representation of the relative field
vector ``a``.
INPUT:
- ``a`` -- a vector in the relative extension field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: rel = FE.relative_field_representation(b)
sage: FE.absolute_field_representation(rel) == b
True
"""
s = self.relative_field_degree()
m = self.extension_degree()
if len(a) != m:
raise ValueError("The input has to be a vector with length equal to the order of the absolute field")
if not a.base_ring() == self.relative_field():
raise ValueError("The input has to be over the prime field")
alphas = self.relative_field_basis()
betas = self.absolute_field_basis()
phi = self.embedding()
b = self.absolute_field().zero()
F = self.prime_field()
flattened_relative_field_rep_list = []
for i in a:
tmp = vector(i).list()
for j in tmp:
flattened_relative_field_rep_list.append(j)
flattened_relative_field_rep = vector(flattened_relative_field_rep_list)
for i in range(m):
b += betas[i] * phi(sum([flattened_relative_field_rep[j] * alphas[j%s] for j in range(i*s, i*s + s)]))
return b
def is_in_relative_field(self, b):
r"""
Returns ``True`` if ``b`` is in the relative field.
INPUT:
- ``b`` -- an element of the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.is_in_relative_field(aa^2 + aa)
True
sage: FE.is_in_relative_field(aa^3)
False
"""
vect = self.relative_field_representation(b)
return vect[1:vect.length()].is_zero()
def cast_into_relative_field(self, b, check=True):
r"""
Casts an absolute field element into the relative field (if possible).
This is the inverse function of the field embedding.
INPUT:
- ``b`` -- an element of the absolute field which also lies in the
relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: phi = FE.embedding()
sage: b = aa^2 + aa
sage: FE.is_in_relative_field(b)
True
sage: FE.cast_into_relative_field(b)
a
sage: phi(FE.cast_into_relative_field(b)) == b
True
"""
if check:
if not self.is_in_relative_field(b):
raise ValueError("%s does not belong to the relative field" % b)
return self.relative_field_representation(b)[0]
def embedding(self):
r"""
Returns the embedding which is used to go from the
relative field to the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.embedding()
Ring morphism:
From: Finite Field in a of size 2^2
To: Finite Field in aa of size 2^4
Defn: a |--> aa^2 + aa
"""
return self._phi
def relative_field_basis(self):
r"""
Returns a basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_basis()
[1, a]
"""
return self._alphas
def absolute_field_basis(self):
r"""
Returns a basis of the absolute field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_basis()
[1, aa, aa^2, aa^3]
"""
return self._betas
def relative_field_degree(self):
r"""
Let `F_p` be the base field of our relative field `F_q`.
Returns `s` where `p^s = q`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_degree()
2
"""
return self._relative_field_degree
def absolute_field_degree(self):
r"""
Let `F_p` be the base field of our absolute field `F_{q^m}`.
Returns `sm` where `p^{sm} = q^{m}`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_degree()
4
"""
return self._absolute_field_degree
def extension_degree(self):
r"""
Returns `m`, teh extension degree of the absiolute field over
the relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(64)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.extension_degree()
3
"""
return self.absolute_field_degree() // self.relative_field_degree()
def prime_field(self):
r"""
Returns the base field of our absolute and relative fields.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.prime_field()
Finite Field of size 2
"""
return self._prime_field
def relative_field(self):
r"""
Returns the relative field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field()
Finite Field in a of size 2^2
"""
return self._relative_field
def absolute_field(self):
r"""
Returns the absolute field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field()
Finite Field in aa of size 2^4
"""
return self._absolute_field
| [
"valber@HPC"
] | valber@HPC |
1b52a8c073aa7b1299327b714f40bfab360ae620 | ca850269e513b74fce76847310bed143f95b1d10 | /build/navigation/map_server/catkin_generated/pkg.develspace.context.pc.py | d70657140fd79ea2f8707d898a32909e8b49fded | [] | no_license | dvij542/RISS-2level-pathplanning-control | f98f2c83f70c2894d3c248630159ea86df8b08eb | 18390c5ab967e8649b9dc83681e9090a37f3d018 | refs/heads/main | 2023-06-15T03:58:25.293401 | 2021-06-20T20:20:30 | 2021-06-20T20:20:30 | 368,553,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/dvij5420/catkin_ws/src/navigation/map_server/include".split(';') if "/home/dvij5420/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/dvij5420/catkin_ws/devel"
PROJECT_VERSION = "1.14.9"
| [
"dvij.kalaria@gmail.com"
] | dvij.kalaria@gmail.com |
b97f9e2e6819731ed2e72f322fe82fc21b408239 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_20/models/support_response.py | 6615baa58a213837986445ba7e3bc77ac6e1937e | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,781 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class SupportResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[Support]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.Support]
):
"""
Keyword args:
items (list[Support])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SupportResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SupportResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
64c34be0d4ccbc654dd237df3cfbb2fa81f69dee | b366e1e6e5c121465c569d2ea630eea0bd61818b | /applications/agro/languages/pt-br.py | 874dc38cd74da2da6b7cdd3729dacfea18dc833e | [
"LicenseRef-scancode-public-domain"
] | permissive | rif/agro | a5cc00c3363b0d45d90cff47dd16fac02b46b9d7 | 5dc3af91a041d0d396313ae0538cf29f11e67511 | refs/heads/master | 2021-01-19T16:33:13.093880 | 2013-05-26T21:58:23 | 2013-05-26T21:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,762 | py | # coding: utf8
{
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Address': 'Address',
'Admin': 'Admin',
'Admin Home': 'Admin Home',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'Interface administrativa',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'An error occured, please %s the page',
'appadmin is disabled because insecure channel': 'Administração desativada devido ao canal inseguro',
'Approved': 'Approved',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Available positions': 'Available positions',
'Buy this book': 'Buy this book',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Não pode ser vazio',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'city': 'city',
'City': 'City',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Close': 'Close',
'Cometence areas': 'Cometence areas',
'Community': 'Community',
'Company': 'Company',
'Company name': 'Company name',
'Competence areas': 'Competence areas',
'Competences': 'Competences',
'Completed studies': 'Completed studies',
'Components and Plugins': 'Components and Plugins',
'Contact': 'Contact',
'Controller': 'Controlador',
'Copyright': 'Copyright',
'Created on': 'Created on',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'db': 'bd',
'DB Model': 'Modelo BD',
'Delete:': 'Apagar:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'concluído!',
'Download': 'Download',
'E-mail': 'E-mail',
'E-mail sending failed. Please contact the site administration!': 'E-mail sending failed. Please contact the site administration!',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'Edit items': 'Edit items',
'edit profile': 'editar perfil',
'Edit This App': 'Edit This App',
'Email': 'Email',
'Email and SMS': 'Email and SMS',
'Enabled': 'Enabled',
'Enter Job Offer': 'Enter Job Offer',
'Enter Post Graduate Status': 'Enter Post Graduate Status',
'Enter Resume': 'Enter Resume',
'Errors': 'Errors',
'export as csv file': 'exportar como um arquivo csv',
'FAQ': 'FAQ',
'Field of activity': 'Field of activity',
'First name': 'First name',
'First Name': 'First Name',
'Foreign languages': 'Foreign languages',
'form has errors': 'form has errors',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Graph Model': 'Graph Model',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Olá Mundo',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'if you later decide to disable the entry': 'if you later decide to disable the entry',
'If you want to edit your previously entered informations, please enter the e-mail below and the links will be sent to your e-mail address.': 'If you want to edit your previously entered informations, please enter the e-mail below and the links will be sent to your e-mail address.',
'import': 'import',
'Import/Export': 'Importar/Exportar',
'Index': 'Início',
'Information entered here is confidential. It will NOT be displayed publicly.': 'Information entered here is confidential. It will NOT be displayed publicly.',
'Informations entered here is confidential. It will NOT be displayed publicly.': 'Informations entered here is confidential. It will NOT be displayed publicly.',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Internal State': 'Estado Interno',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'Job contacts': 'Job contacts',
'Job offer approved!': 'Job offer approved!',
'Job offer deleted!': 'Job offer deleted!',
'Job offers': 'Job offers',
'Job Offers': 'Job Offers',
'Key': 'Key',
'Languages': 'Languages',
'Last name': 'Last name',
'Last Name': 'Last Name',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live chat': 'Live chat',
'Live Chat': 'Live Chat',
'login': 'Entrar',
'Login': 'Autentique-se',
'logout': 'Sair',
'Lost Password': 'Esqueceu sua senha?',
'lost password?': 'lost password?',
'Main Menu': 'Menu Principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Modelo de Menu',
'Message': 'Message',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Sem bancos de dados nesta aplicação',
'No job offers to approve.': 'No job offers to approve.',
'No resumes to approve.': 'No resumes to approve.',
'Offer expire date': 'Offer expire date',
'Online examples': 'Alguns exemplos',
'or import from csv file': 'ou importar de um arquivo csv',
'Origin': 'Origin',
'Other info': 'Other info',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Part of the information entered here will be made publicly available.': 'Part of the information entered here will be made publicly available.',
'Part of the information entered here will be made publicly available. We will not display your contact information.': 'Part of the information entered here will be made publicly available. We will not display your contact information.',
'Part of the informations entered here will be made publicly available.': 'Part of the informations entered here will be made publicly available.',
'Part of the informations entered here will be made publicly available. We will not display your contact information.': 'Part of the informations entered here will be made publicly available. We will not display your contact information.',
'Password': 'Password',
'phone': 'phone',
'Phone': 'Phone',
'Photo': 'Photo',
'Photo file': 'Photo file',
'Plugins': 'Plugins',
'Pos count': 'Pos count',
'Position': 'Position',
'Position description': 'Position description',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': '100 linhas anteriores',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'registro',
'record does not exist': 'registro não existe',
'Record ID': 'Record ID',
'Record id': 'id do registro',
'Recover edit links': 'Recover edit links',
'Register': 'Registre-se',
'register': 'Registre-se',
'Registration key': 'Registration key',
'reload': 'reload',
'Reset Password key': 'Reset Password key',
'Resources': 'Resources',
'Resume approved!': 'Resume approved!',
'Resume contacts': 'Resume contacts',
'Resume deleted!': 'Resume deleted!',
'Resumes': 'Resumes',
'Role': 'Role',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Semantic': 'Semantic',
'Send message': 'Send message',
'Send message to': 'Send message to',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'Sorry, no message to send!': 'Sorry, no message to send!',
'state': 'estado',
'Statistics': 'Statistics',
'Statuses': 'Statuses',
'Studies': 'Studies',
'Stylesheet': 'Stylesheet',
'Subject': 'Subject',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Está certo(a) que deseja apagar esse objeto ?',
'Table': 'tabela',
'Table name': 'Table name',
'Thank you! Your job offer was recorded and is pendng approval.': 'Thank you! Your job offer was recorded and is pendng approval.',
'Thank you! Your resume was recorded and is pendng approval.': 'Thank you! Your resume was recorded and is pendng approval.',
'Thank you! Your status was recorded.': 'Thank you! Your status was recorded.',
'Thank you, e-mail sent!': 'Thank you, e-mail sent!',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'This is a copy of the scaffolding application': 'This is a copy of the scaffolding application',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'To be approved': 'To be approved',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'unable to parse csv file': 'não foi possível analisar arquivo csv',
'Update:': 'Atualizar:',
'Updated on': 'Updated on',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'User ID': 'User ID',
'User Voice': 'User Voice',
'UUID': 'UUID',
'Videos': 'Videos',
'View': 'Visualização',
'Web2py': 'Web2py',
'Welcome': 'Welcome',
'Welcome %s': 'Vem vindo %s',
'Welcome to web2py': 'Bem vindo ao web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'Work address': 'Work address',
'Work place': 'Work place',
'Work position': 'Work position',
'You are successfully running web2py': 'You are successfully running web2py',
'You are successfully running web2py.': 'You are successfully running web2py.',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| [
"radu@fericean.ro"
] | radu@fericean.ro |
c25b1e852ebebddd1fb1b02d1b006f1d861e91b7 | 4c7fc810eb442b386969bf345b4dc6ef3152c783 | /templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py | cbe8153c0ec70ada3f955905e43df9945a751ecf | [
"Apache-2.0"
] | permissive | newcodevelop/transformers | fbcef5d703b12febf6e76e84e3f0493769fb9d37 | e8d1bd7427021d2114ec159b2c90c6b1fcddeae7 | refs/heads/main | 2023-03-15T11:45:09.906184 | 2022-08-30T07:26:17 | 2022-08-30T07:26:17 | 254,360,734 | 0 | 1 | Apache-2.0 | 2020-04-09T12:07:09 | 2020-04-09T12:07:08 | null | UTF-8 | Python | false | false | 155,171 | py | # coding=utf-8
# Copyright 2022 {{cookiecutter.authors}} The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch {{cookiecutter.modelname}} model. """
{% if cookiecutter.is_encoder_decoder_model == "False" %}
import math
import os
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Tuple, Union
from ...activations import ACT2FN
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary
from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
is_torch_greater_than_1_6,
)
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [
"{{cookiecutter.checkpoint_identifier}}",
# See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}}
]
def load_tf_weights_in_{{cookiecutter.lowercase_modelname}}(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if is_torch_greater_than_1_6:
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in {{cookiecutter.camelcase_modelname}}Model forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Attention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = {{cookiecutter.camelcase_modelname}}SelfAttention(config, position_embedding_type=position_embedding_type)
self.output = {{cookiecutter.camelcase_modelname}}SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = {{cookiecutter.camelcase_modelname}}Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = {{cookiecutter.camelcase_modelname}}Attention(config, position_embedding_type="absolute")
self.intermediate = {{cookiecutter.camelcase_modelname}}Intermediate(config)
self.output = {{cookiecutter.camelcase_modelname}}Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([{{cookiecutter.camelcase_modelname}}Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = {{cookiecutter.camelcase_modelname}}LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = {{cookiecutter.camelcase_modelname}}Config
load_tf_weights = load_tf_weights_in_{{cookiecutter.lowercase_modelname}}
base_model_prefix = "{{cookiecutter.lowercase_modelname}}"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, {{cookiecutter.camelcase_modelname}}Encoder):
module.gradient_checkpointing = value
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`{{cookiecutter.camelcase_modelname}}Tokenizer`].
See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert *input_ids* indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare {{cookiecutter.modelname}} Model transformer outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`.
To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
argument and `add_cross_attention` set to `True`; an
`encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config)
self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=sequence_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING)
class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `{{cookiecutter.camelcase_modelname}}ForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss.
Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring)
Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels
in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a `language modeling` head on top for CLM fine-tuning. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING
)
class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`")
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2
tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two
additional tensors are only required when the model is used as a decoder in a Sequence to Sequence
model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}Config
>>> import torch
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> config = {{cookiecutter.camelcase_modelname}}Config.from_pretrained("{{cookiecutter.checkpoint_identifier}}")
>>> config.is_decoder = True
>>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('{{cookiecutter.checkpoint_identifier}}', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],)
return reordered_past
class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss.
Indices should be in `[0, ..., config.num_labels - 1]`.
If `config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss.
Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension
of the input tensors. (See `input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss.
Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
{% else %}
import math
import copy
import random
from typing import Optional, Tuple, List, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
CausalLMOutputWithCrossAttentions
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [
"{{cookiecutter.checkpoint_identifier}}",
# See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}}
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(
mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None
):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
class {{cookiecutter.camelcase_modelname}}Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class {{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = {{cookiecutter.camelcase_modelname}}Attention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(config.encoder_attention_heads,)*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class {{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = {{cookiecutter.camelcase_modelname}}Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = {{cookiecutter.camelcase_modelname}}Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
cross_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel):
config_class = {{cookiecutter.camelcase_modelname}}Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ({{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}Encoder)):
module.gradient_checkpointing = value
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`~{{cookiecutter.camelcase_modelname}}Config`]):
Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
weights.
"""
{{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5)
>>> print(tokenizer.decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the `input_ids` to the right, following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read [`modeling_{{cookiecutter.lowercase_modelname}}._prepare_decoder_attention_mask`] and
modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`,
*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors
of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds`
have to be input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds`
takes the value of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
{{cookiecutter.uppercase_modelname}}_STANDALONE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`ProphetNetTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class {{cookiecutter.camelcase_modelname}}Encoder({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`{{cookiecutter.camelcase_modelname}}EncoderLayer`].
Args:
config: {{cookiecutter.camelcase_modelname}}Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}DecoderLayer`]
Args:
config: {{cookiecutter.camelcase_modelname}}Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2
tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential
decoding.
If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size,
sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning("`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache = False`...")
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare {{cookiecutter.modelname}} Model outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared)
self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The {{cookiecutter.modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING
)
class {{cookiecutter.camelcase_modelname}}ForConditionalGeneration({{cookiecutter.camelcase_modelname}}PreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
]
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__(config)
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings({{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Conditional generation example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
super().__init__(config, **kwargs)
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.classification_head = {{cookiecutter.camelcase_modelname}}ClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}DecoderWrapper({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = {{cookiecutter.camelcase_modelname}}DecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('facebook/bart-large')
>>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
{% endif -%}
| [
"noreply@github.com"
] | newcodevelop.noreply@github.com |
d3cdc7a0077856a2b97391ea48f1c5b7b2add971 | af2fca9931bd9d9531d7e3a7fba3df02fea1bfd3 | /bin/dl | 99e68cdd0bae029ebaf686b93fc8873ba55c2de9 | [] | no_license | hukkelas/Dotfiles | 21c5fa5500bf0a0ee3e4dfa1bd069d45c66738df | b0439f612261f1aa0c4f2bb89f06095a08dad990 | refs/heads/master | 2021-06-28T12:26:04.478573 | 2020-09-21T07:18:17 | 2020-09-21T07:18:17 | 136,961,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | #!/usr/bin/env python3
import subprocess
import argparse
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument("docker_name")
args = parser.parse_args()
keys_to_print = ["Image", "Status","RunningFor", "Names"]
query = "docker ps -a --format '{{.Names}}'"
wanted_idx = [1, 3, 4, 5, 6]
values = {
"Train": "",
"GPU": ""
}
output = subprocess.check_output(query, shell=True).decode().split("\n")
output = [x for x in output if args.docker_name in x]
assert len(output) == 1, output
out = subprocess.check_output(f"docker logs {output[0]}", shell=True).decode()
print(out)
| [
"hakon.hukkelas@ntnu.no"
] | hakon.hukkelas@ntnu.no | |
eee551a4414a19d3f9c93aa595e307f5d736289e | ac4dbe322dcb666abdbd5450ecf1194dd7fa21aa | /learn_pipe/model/opt_params.py | 7edcc46f16441c7e31765e0bf7154e2e93798abc | [
"MIT"
] | permissive | tpimentelms/meaning2form | 432a1cc1b83e54f6b3e3eed54d8d336ae00079a8 | 624b3947b3ac2a7a521cf35c762fb56508236f74 | refs/heads/master | 2022-12-13T21:50:10.196711 | 2020-03-12T10:29:09 | 2020-03-12T10:29:09 | 188,860,052 | 3 | 1 | MIT | 2022-12-08T05:15:50 | 2019-05-27T14:36:40 | Python | UTF-8 | Python | false | false | 804 | py | import pandas as pd
def _get_opt_params(fname, lang, delimiter='\t'):
results = pd.read_csv(fname, delimiter=delimiter)
instance = results[results['lang'] == lang]
embedding_size = int(instance['embedding_size'].item())
hidden_size = int(instance['hidden_size'].item())
word2vec_size = int(instance['word2vec_size'].item())
nlayers = int(instance['nlayers'].item())
dropout = instance['dropout'].item()
return embedding_size, hidden_size, word2vec_size, nlayers, dropout
def get_opt_params(lang, args):
context = args.context if 'shuffle' not in args.context else args.context[:-8]
fname = '%s/bayes-opt%s/orig/%s__%s__opt-results.csv' \
% (args.rfolder_base, args.fsuffix, args.model, context)
return _get_opt_params(fname, lang, delimiter=',')
| [
"tiagopms@gmail.com"
] | tiagopms@gmail.com |
4aa8df812330ebe7963dd961c6485a55d2669e95 | 433cf60d4a3bb69f126ab6b55c43eb34a79aaa8f | /state.py | 1c7863b94d133fba8d527416442c94ae094eeba5 | [] | no_license | pockerman/odisseus_raspberry_pi | 2d84460db859d4b9d52af10a433899945cd44f4c | b6ca2ebf9178411dcb71246880bfb97adcffbad0 | refs/heads/master | 2021-07-13T09:50:33.113891 | 2020-05-23T15:55:43 | 2020-05-23T15:55:43 | 199,181,066 | 2 | 1 | null | 2020-05-23T14:42:37 | 2019-07-27T15:19:03 | Python | UTF-8 | Python | false | false | 587 | py | """
State describe the state of Odisseus
"""
import numpy as np
class State(object):
@staticmethod
def names():
return ["X", "Y", "Vx", "Vy", "Theta"]
def __init__(self, init_cond):
if init_cond:
self._state = init_cond
else:
self._state = np.array([0., 0., 0., 0., 0.])
def get_value(self):
return self._state
def set_value(self, value):
self._state = value
def __iadd__(self, other):
self._state += other
return self
def __len__(self):
return len(self._state) | [
"a.giavaras@gmail.com"
] | a.giavaras@gmail.com |
e6e31132160cc9877eb1dd9ceee62b6a99180cc9 | 7b36801dd87a1df93b2836db74f68b5e00682638 | /scripts/02_MP2RAGE/04_apply_reg.py | 652c4181b66a141c87daf9611f9e9d2bd2b4e656 | [
"BSD-3-Clause"
] | permissive | ofgulban/meso-MRI | c873bf227ae1048a84ffa7999c7ece72f3a8c3f8 | 2afd70a3bb7576f401dd98eeb07df38368f42baf | refs/heads/main | 2023-04-11T19:49:50.837267 | 2022-10-24T13:31:49 | 2022-10-24T13:31:49 | 327,944,487 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """Apply registration."""
import os
import subprocess
import numpy as np
import nibabel as nb
# =============================================================================
NII_NAMES = [
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-02_dir-RL_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-03_dir-PA_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-04_dir-LR_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-05_dir-AP_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-06_dir-RL_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-07_dir-PA_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-08_dir-LR_MP2RAGE_T1_crop_ups2X.nii.gz",
]
AFFINES = [
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-02_dir-RL_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-03_dir-PA_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-04_dir-LR_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-05_dir-AP_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-06_dir-RL_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-07_dir-PA_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-08_dir-LR_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
]
REFERENCE = "/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-01_dir-AP_MP2RAGE_uni_crop_ups2X.nii.gz"
OUTDIR = "/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/04_apply_reg"
# =============================================================================
print("MP2RAGE Step 04: Apply registration to UNI images.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}\n".format(OUTDIR))
for i in range(0, len(NII_NAMES)):
# -------------------------------------------------------------------------
# Apply affine transformation matrix
# -------------------------------------------------------------------------
# Prepare inputs
in_moving = NII_NAMES[i]
affine = AFFINES[i]
# Prepare output
basename, ext = in_moving.split(os.extsep, 1)
basename = os.path.basename(basename)
print(basename)
out_moving = os.path.join(OUTDIR, "{}_reg.nii.gz".format(basename))
command = "greedy "
command += "-d 3 "
command += "-rf {} ".format(REFERENCE) # reference
command += "-ri LINEAR " # No other better options than linear
command += "-rm {} {} ".format(in_moving, out_moving) # moving resliced
command += "-r {} ".format(affine)
# Execute command
subprocess.run(command, shell=True)
print('\n\nFinished.')
| [
"farukgulban@gmail.com"
] | farukgulban@gmail.com |
a7520b92dd939d6051721286e33af5bdb7690a8d | eb82e06402be351e1d41dfc0a2646426d26eace6 | /mkt/files/migrations/0001_initial.py | 6b85d73be2712eebb1cf43058d6865cbabce8250 | [] | permissive | sarvex/zamboni | 34c28697f007b40131444af10fa943b19244fa24 | 5fa5400a447f2e905372d4c8eba6d959d22d4f3e | refs/heads/main | 2023-08-19T04:53:22.857291 | 2023-08-14T10:04:05 | 2023-08-14T10:04:05 | 32,572,674 | 0 | 0 | BSD-3-Clause | 2023-09-10T15:16:10 | 2015-03-20T08:41:50 | Python | UTF-8 | Python | false | false | 3,661 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.site.models
from django.conf import settings
import uuidfield.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('filename', models.CharField(default=b'', max_length=255)),
('size', models.PositiveIntegerField(default=0)),
('hash', models.CharField(default=b'', max_length=255)),
('status', models.PositiveSmallIntegerField(default=2, db_index=True, choices=[(0, 'Incomplete'), (16, 'Unlisted'), (2, 'Pending approval'), (4, 'Published'), (5, 'Banned from Marketplace'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved but private'), (15, 'Blocked')])),
('datestatuschanged', models.DateTimeField(auto_now_add=True, null=True)),
('reviewed', models.DateTimeField(null=True)),
('uses_flash', models.BooleanField(default=False, db_index=True)),
],
options={
'abstract': False,
'db_table': 'files',
'get_latest_by': 'created',
},
bases=(mkt.site.models.OnChangeMixin, models.Model),
),
migrations.CreateModel(
name='FileUpload',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', uuidfield.fields.UUIDField(primary_key=True, serialize=False, editable=False, max_length=32, blank=True, unique=True)),
('path', models.CharField(default=b'', max_length=255)),
('name', models.CharField(default=b'', help_text=b"The user's original filename", max_length=255)),
('hash', models.CharField(default=b'', max_length=255)),
('valid', models.BooleanField(default=False)),
('validation', models.TextField(null=True)),
('task_error', models.TextField(null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'db_table': 'file_uploads',
'get_latest_by': 'created',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FileValidation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('valid', models.BooleanField(default=False)),
('errors', models.IntegerField(default=0)),
('warnings', models.IntegerField(default=0)),
('notices', models.IntegerField(default=0)),
('validation', models.TextField()),
('file', models.OneToOneField(related_name='validation', to='files.File')),
],
options={
'db_table': 'file_validation',
},
bases=(models.Model,),
),
]
| [
"ashort@mozilla.com"
] | ashort@mozilla.com |
7d67f4616c7e5934ab9c234e24b997b4b13f07a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02577/s033060966.py | f0b04fbe76acd807d3b8c178cea182bec7767807 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | digits = [int(i) for i in list(input())]
Sum = 0
for i in digits:
Sum += i
if Sum%9 == 0:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
95d7af9ddf3ad428c306a3dda706de52e90220f3 | baad8c0884d2a0ff57dfb70f766a2f2c05f3f5cc | /douyu/douyu/spiders/douyuapp.py | 6b13e7cb5737b91e470ca568702e56f087646506 | [] | no_license | ShaoLay/douyu | ee96f9bd09fd7136de8078284baa2b4f2ede274f | 6b2fe2c3f05a58d5e7a323c6e72b7ba447fc74dd | refs/heads/master | 2020-04-14T18:59:58.695098 | 2019-01-04T02:36:24 | 2019-01-04T02:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import scrapy
import json
from douyu.items import DouyuspiderItem
class DouyuSpider(scrapy.Spider):
name = "douyu"
allowd_domains = ["http://capi.douyucdn.cn"]
offset = 0
url = "http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset="
start_urls = [url + str(offset)]
def parse(self, response):
# 返回从json里获取 data段数据集合
data = json.loads(response.text)["data"]
for each in data:
item = DouyuspiderItem()
item["name"] = each["nickname"]
item["imagesUrls"] = each["vertical_src"]
yield item
self.offset += 20
yield scrapy.Request(self.url + str(self.offset), callback = self.parse) | [
"javs_shao@163.com"
] | javs_shao@163.com |
183946b6075590e313dcdd943ff083cf360204a4 | b0fdcd6038f8f51ac6fb88abd3698656d9df6ef5 | /HCI_LAB/line_detection/main.py | 14ba7d421926ef79a0388b5fc6345187f040f210 | [] | no_license | ShineySun/HCI_Deep_Lane_Detection | a5b40c3b9df5eaf6afc7cb9983574009199e1af9 | 0b7b7a97f03258942d1d2cd9d86e42348bebab15 | refs/heads/master | 2021-01-02T06:55:59.600259 | 2020-05-15T09:59:33 | 2020-05-15T09:59:33 | 239,537,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,033 | py | import sys
# print(sys.path)
# sys.path.append('/lib/python3.7/site-packages')
import opts
import math
import importlib
from preprocess import *
import _init_paths
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
import cv2
import numpy as np
# python3 main.py --netType stackedHGB --GPUs 0 --LR 0.001 --batchSize 1 --nStack 7 --optim Adam
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#cudnn.benchmark = True
opt = opts.parse()
print(("device id: {}".format(torch.cuda.current_device())))
print("torch.version",torch.__version__)
print("cuda_version",torch.version.cuda)
models = importlib.import_module('models.init')
# print(models)
criterions = importlib.import_module('criterions.init')
checkpoints = importlib.import_module('checkpoints')
Trainer = importlib.import_module('models.' + opt.netType + '-train')
# if opt.genLine:
# if opt.testOnly:
# processData('test')
# else:
# print('Prepare train data')
# processData('train')
try:
DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
#print('DataLoader1 : ', DataLoader)
except ImportError:
DataLoader = importlib.import_module('datasets.dataloader')
#print('DataLoader2 : ', DataLoader)
# Data loading
print('=> Setting up data loader')
trainLoader, valLoader = DataLoader.create(opt)
#print('opt',opt)
# Load previous checkpoint, if it exists
print('=> Checking checkpoints')
checkpoint = checkpoints.load(opt)
# Create model
model, optimState = models.setup(opt, checkpoint)
model.cuda()
criterion = criterions.setup(opt, checkpoint, model)
# The trainer handles the training loop and evaluation on validation set
trainer = Trainer.createTrainer(model, criterion, opt, optimState)
if opt.testOnly:
loss = trainer.test(valLoader, 0)
sys.exit()
bestLoss = math.inf
startEpoch = max([1, opt.epochNum])
#print("opt.epochNum : ", opt.epochNum)
if checkpoint != None:
startEpoch = checkpoint['epoch'] + 1
bestLoss = checkpoint['loss']
print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)
# optimizer.step()
trainer.LRDecay(startEpoch)
# opt.nEpochs + 1
for epoch in range(startEpoch, opt.nEpochs + 1):
trainer.scheduler.step()
#trainLoss = trainer.train(trainLoader, epoch)
testLoss = trainer.test(valLoader, epoch)
break
# bestModel = False
# if testLoss < bestLoss:
# bestModel = True
# bestLoss = testLoss
# print(' * Best model: \033[1;36m%1.4f\033[0m * ' % testLoss)
#
# checkpoints.save(epoch, trainer.model, criterion, trainer.optimizer, bestModel, testLoss ,opt)
#
# print(' * Finished Err: \033[1;36m%1.4f\033[0m * ' % bestLoss)
if __name__ == '__main__':
main()
| [
"ksp2246@naver.com"
] | ksp2246@naver.com |
371c31e5fc3bcc1a0141df7f8659376578c4ebf1 | 786de89be635eb21295070a6a3452f3a7fe6712c | /numpy/tags/V00-01-05/SConscript | 6f45a7e075210962a1c9f52bd87171f239eeedf9 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package numpy
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving some or all parameters.
#
pkg = "numpy"
pkg_ver = "1.3.0"
PREFIX = pjoin('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = pjoin('$LIB_ABI', '$PYTHON', "site-packages", pkg)
PYDIRSEP = True
INCDIR = pjoin(PYDIR, "core", "include", pkg)
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage(pkg, **locals())
| [
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
feb42e71803ec62191f6593b4b79a9c6a75d36a1 | b53e3d57d31a47a98d87141e44a5f8940ee15bca | /test/programytest/parser/pattern/matching/test_set.py | 002785e6fa43cb2e1fb506957d39ed763fe19deb | [
"MIT"
] | permissive | Chrissimple/program-y | 52177fcc17e75fb97ab3993a4652bcbe7906bd58 | 80d80f0783120c2341e6fc57e7716bbbf28a8b3f | refs/heads/master | 2020-03-29T13:20:08.162177 | 2018-09-26T19:09:20 | 2018-09-26T19:09:20 | 149,952,995 | 1 | 0 | null | 2018-09-23T06:11:04 | 2018-09-23T06:11:04 | null | UTF-8 | Python | false | false | 4,013 | py |
from programytest.parser.pattern.matching.base import PatternMatcherBaseClass
from programy.mappings.sets import SetLoader
class PatternMatcherSetTests(PatternMatcherBaseClass):
def test_basic_set_match_as_text(self):
loader = SetLoader()
if self._bot.brain.sets.contains("SEX") is False:
self._bot.brain.sets.add_set("SEX", loader.load_from_text("""
Man
Woman
"""))
self.add_pattern_to_graph(pattern="I AM A <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Man", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Woman", context.star(1))
def test_basic_set_match_as_name(self):
loader = SetLoader()
if self._bot.brain.sets.contains("SEX") is False:
self._bot.brain.sets.add_set("SEX", loader.load_from_text("""
Man
Woman
"""))
self.add_pattern_to_graph(pattern='I AM A <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Man", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Woman", context.star(1))
def test_multi_word_set_match(self):
loader = SetLoader()
self._bot.brain.sets.add_set("COLOR", loader.load_from_text("""
RED
RED AMBER
RED BURNT OAK
RED ORANGE
"""))
self.add_pattern_to_graph(pattern="I LIKE <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED AMBER", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED BURNT OAK", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_basic_set_number_match(self):
self._bot.brain.dynamics.add_dynamic_set('number', "programy.dynamic.sets.numeric.IsNumeric", None)
self.add_pattern_to_graph(pattern="I AM <set>number</set> YEARS OLD", topic="X", that="Y", template="1")
context = self.match_sentence("I AM 49 YEARS OLD", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("49", context.star(1))
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
3a72b003e889d606d91992540708d221eb06875b | ca47fe64364188b9317cf27c08c31c4af0ddf65f | /app/auth/forms.py | bb504b77ba7cf9b1984ba3ac07559dd5942d140d | [] | no_license | Kennedy128/kennedy-project3 | 79d1cfe2d90bec1c34cfc9135106dab2e1008e3b | a5523fcbf86acaae29759895002712cc046fafc4 | refs/heads/master | 2022-07-05T21:52:03.865036 | 2020-05-13T20:57:48 | 2020-05-13T20:57:48 | 263,722,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField,ValidationError
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self, data_field):
if User.query.filter_by(email=data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self, data_field):
if User.query.filter_by(username=data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In') | [
"santa@northpole.com"
] | santa@northpole.com |
c7e52de8525de6e0c856f43ecb896309eb09a319 | 0db4f248a6f8c036ff47a90e5f85e1e0b8e9b8a4 | /TEMP/201610_MKT/audit_content/image_label_gcloud_vision_api/mapping_product.py | 3256e109655aaed47679b1d21b20f07c5acfca79 | [] | no_license | vunguyen1408/no-more-weekend | 9c6afec60d92c7788d0c8b03472b200840d89d11 | 2b5025798e232ffc23cb1b498d2adb89078f602a | refs/heads/master | 2021-08-08T11:54:38.244834 | 2017-11-10T07:22:00 | 2017-11-10T07:22:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,277 | py | """
Project : Online marketing tool - Audit content - Audit video
Company : VNG Corporation
Description: Mapping product id to Ads
Examples of Usage:
python mapping_product.py 2016-10-01 2017-06-29
"""
import os, os.path
#from os.path import splitext, basename, join
import io
import json
import time
import csv
from datetime import datetime , timedelta, date
def get_date(date_):
date_ = date_[:10]
date = date_[6] + date_[7] + date_[8] + date_[9] + '-' + date_[0] + date_[1] + '-' + date_[3] + date_[4]
return date
def parse_csv_to_json_file_EMC(path_file):
list_json = []
list_ = []
list_unique = []
with open(path_file, 'r') as f:
reader=csv.reader(f)
for row in reader:
list_.append(row)
list_ = list(list_[1:])
for row in list_:
if row[5] not in list_unique:
list_unique.append(row[5])
flag = False
for json_ in list_json:
if row[0] == json_['event_id']:
flag = True
list_campaign = list(json_['list_campaign'])
campaign = row[6]
list_campaign. append(campaign)
json_['list_campaign'] = list(list_campaign)
break
# Chua ton tai trong list_json
if flag == False:
list_campaign = []
list_campaign.append(row[6])
content = {
'event_id' : row[0],
'start_date' : get_date(row[3]),
'end_date' : get_date(row[4]),
'product' : row[5],
'list_campaign' : list(list_campaign)
}
list_json.append(content)
return list_json
def parse_json_insight(path_insight, folder):
# Lay tat ca noi dung cua cac file insight trong mot ngay, chuyen thanh 1 list Json
folder_insight = os.path.join(path_insight, folder)
list_folder_a_insight = next(os.walk(folder_insight))[1]
data_insight = []
if len(list_folder_a_insight) > 0:
data_insight = '{ "my_json" :['
for i in list_folder_a_insight:
# Delete infor time
temp = i[4:]
insight = temp + "_insight.txt"
folder_file_insight = os.path.join(folder_insight, i)
file_insight = os.path.join(folder_file_insight, insight)
if os.path.exists(file_insight):
with open(file_insight) as f:
content = f.readlines()
for row in content:
data_insight = data_insight + row + ','
data_insight = data_insight[:len(data_insight) - 1] + ']}'
data_insight = json.loads(data_insight)
else:
data_insight = '{ "my_json" :[]}'
return data_insight
def add_content(list_json, path_audit_content, path_insight):
print ("\n================ Maping event and campaign ====================\n")
list_folder_json_content = next(os.walk(path_audit_content))[1]
for json_ in list_json:
start_date = datetime.strptime(json_['start_date'], '%Y-%m-%d').date()
end_date = datetime.strptime(json_['end_date'], '%Y-%m-%d').date()
print (start_date)
print (end_date)
print (json_['product'])
for folder in list_folder_json_content:
date = datetime.strptime(folder, '%Y-%m-%d').date()
# Trong mot ngay thuoc khoang
print (folder)
if date >= start_date and date <= end_date:
# Lay thong tin file audit content
folder_audit = os.path.join(path_audit_content, folder)
audit_content = "ads_creatives_audit_content_"+ folder +".json"
path_file_audit_content = os.path.join(folder_audit, audit_content)
if os.path.exists(path_file_audit_content):
with open(path_file_audit_content, 'r') as f_json:
data_json = json.load(f_json)
# Lay tat ca noi dung cua cac file insight trong mot ngay, chuyen thanh 1 list Json
data_insight = parse_json_insight(path_insight, folder)
# Duyet de kiem tra va them thong tin product vao cac audit_content
product = []
for j in data_json['my_json']:
flag = True
list_product = []
# Find in data_insight of date
for k in data_insight['my_json']:
if str(j['ad_id']) == str(k['ad_id']):
# Neu campaign_id ton tai trong "Event" dang duyet thi them id product
for campaign in json_['list_campaign']:
if campaign == k['campaign_id']:
flag = False
list_product = list(j['list_product'])
if json_['product'] not in list_product:
list_product.append(str(json_['product']))
j['list_product'] = list(list_product)
with open (path_file_audit_content,'w') as f_out:
json.dump(data_json,f_out)
print ("==================================================================")
def group_by_product(path_audit_content):
list_folder = next(os.walk(path_audit_content))[1]
list_unique_product = []
for folder in list_folder:
folder_audit = os.path.join(path_audit_content, folder)
audit_content = "ads_creatives_audit_content_"+ folder +".json"
path_file_audit_content = os.path.join(folder_audit, audit_content)
if os.path.exists(path_file_audit_content):
with open(path_file_audit_content, 'r') as f_json:
data_json = json.load(f_json)
print (folder)
for j in data_json['my_json']:
try:
list_product = j['list_product']
for p in list_product:
if p not in list_unique_product:
list_unique_product.append(p)
except KeyError as e:
print ("-")
print (list_unique_product)
print ("==========================================")
for i in list_unique_product:
print (i)
def compare(path_audit_content, path_insight):
list_folder = next(os.walk(path_audit_content))[1]
list_not_compare = []
for folder in list_folder:
num = 0
folder_audit = os.path.join(path_audit_content, folder)
audit_content = "ads_creatives_audit_content_"+ folder +".json"
path_file_audit_content = os.path.join(folder_audit, audit_content)
if os.path.exists(path_file_audit_content):
with open(path_file_audit_content, 'r') as f_json:
data_json = json.load(f_json)
data_insight = parse_json_insight(path_insight, folder)
for j in data_json['my_json']:
for k in data_insight['my_json']:
if str(j['ad_id']) == str(k['ad_id']):
num += 1
break
list_not_compare.append([folder, len(data_json['my_json']), num, len(data_json['my_json']) - num])
path_out = 'C:/Users/CPU10145-local/Desktop/audit_contet_insight.csv'
with open(path_out, 'w+', newline="") as f:
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
wr.writerow(['date', 'number json', 'number json finded', 'miss'])
wr.writerows(list_not_compare)
def add_list(path_audit_content, date_, to_date_):
print ("\n================ Add list_product ====================\n")
list_folder = next(os.walk(path_audit_content))[1]
date = datetime.strptime(date_, '%Y-%m-%d').date()
to_date = datetime.strptime(to_date_, '%Y-%m-%d').date()
for folder in list_folder:
print (folder)
if folder[:4].isdigit():
d = datetime.strptime(folder, '%Y-%m-%d').date()
if d <= to_date and d >= date:
try:
folder_audit = os.path.join(path_audit_content, folder)
audit_content = "ads_creatives_audit_content_"+ folder +".json"
path_file_audit_content = os.path.join(folder_audit, audit_content)
if os.path.exists(path_file_audit_content):
with open(path_file_audit_content, 'r') as f_json:
data_json = json.load(f_json)
for j in data_json['my_json']:
if 'list_product' not in j:
j['list_product'] = []
with open (path_file_audit_content,'w') as f_out:
json.dump(data_json,f_out)
except:
print ("Date error: %s" %folder)
def FindNewFileEventMapCamp(path_file_event_map_campaign):
list_file = next(os.walk(path_file_event_map_campaign))[2]
d = '2017-01-01'
date = datetime.strptime(d, '%Y-%m-%d').date()
for file in list_file:
if file.find('EVENT_MAP_CAMPAIGN_20') >= 0:
temp = file[-14:][:10]
day = temp[:4] + '-' + temp[5:][:2] + '-' + temp[5:][3:]
now = datetime.strptime(day, '%Y-%m-%d').date()
if now > date:
date = now
temp = str(date).replace('-', '_')
path_file = path_file_event_map_campaign + '/EVENT_MAP_CAMPAIGN_' + temp + '.csv'
return path_file
# path_audit_content = 'C:/Users/CPU10145-local/Desktop/Python Envirement/DATA NEW/DATA/DWHVNG/APEX/MARKETING_TOOL_02_JSON'
# path_insight = 'D:/DATA_CHECK/MARKETING_TOOL_02'
# path_file_event_map_campaign = 'D:/DATA_CHECK/EVENT_MAP_CAMPAIGN.txt'
# path_audit_content = 'E:/VNG/DATA/DATA/DWHVNG/APEX/MARKETING_TOOL_02_JSON'
# path_insight = 'E:/VNG/DATA/DATA/DWHVNG/APEX/MARKETING_TOOL_02'
# path_file_event_map_campaign = 'E:/VNG/DATA/DATA/DWHVNG/APEX/MARKETING_TOOL_02/EXPORT_DATA/EVENT_MAP_CAMPAIGN.txt'
if __name__ == '__main__':
from sys import argv
path_audit_content = '/u01/oracle/oradata/APEX/MARKETING_TOOL_02_JSON'
path_insight = '/u01/oracle/oradata/APEX/MARKETING_TOOL_02'
path_event_map_campaign = '/u01/oracle/oradata/APEX/MARKETING_TOOL_02/EXPORT_DATA'
print ("\n================ Maping event and campaign ====================\n")
print ("\n================ ========================= ====================\n")
script, start_date, end_date = argv
add_list(path_audit_content, start_date, end_date)
path_file_event_map_campaign = FindNewFileEventMapCamp(path_event_map_campaign)
print (path_file_event_map_campaign)
list_json = parse_csv_to_json_file_EMC(path_file_event_map_campaign)
add_content(list_json, path_audit_content, path_insight)
# statistic(path_audit_content)
# group_by_product(path_audit_content)
# compare(path_audit_content, path_insight)
# add_list(path_audit_content)
| [
"ltduong1994@gmail.com"
] | ltduong1994@gmail.com |
7f21588272c702b2fbfae216a5ae2764f36efb80 | 5891051796778cfb44a255248ce38789bfef9e70 | /P_base/faith_class/loading.py | 22388fec3e0227782de7fce6f4d8ec6eb0d8f417 | [] | no_license | Faithlmy/Python_base | cc546a5d86b123e102a69df1227cde9b6e567493 | 5a43557e6375dc9dbe5f6701d7c10e549873a5ab | refs/heads/master | 2021-01-01T17:07:04.097978 | 2018-03-31T16:44:01 | 2018-03-31T16:44:01 | 98,000,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python的重载
"""
class human:
__age = 0
__sex = ''
__heigth = 0
__weigth = 0
name = ''
def __init__(self, age, sex, height, weight):
self.age = age
self.sex = sex
self.heigth = height
self.weigth = weight
def setname(self, name):
self.name = name
def show(self):
print(self.name)
print(self.__age)
print(self.__sex)
print(self.__heigth)
print(self.__weigth)
class student(human):
__classes = 0
__grade = 0
__num = 0
def __init__(self, classes, grade, num, age, sex, height, weight):#重载
self.__classes = classes
self.__grade = grade
self.__num = num
human.__init__(self, age, sex, height, weight) #调用human的初始化方法
def show(self):
human.show(self)
print(self.__classes)
print(self.__grade)
print(self.__num)
if __name__ == '__main__':
a = student(12, 3, 20170305, 18, 'male', 175, 65)
a.setname('faith')
a.show() | [
"lmengyy@126.com"
] | lmengyy@126.com |
9873106f27cd3a0141597de96df0b53e68ca1d87 | e65a428ca7ee11d2f62d702842d4afbd493f08a4 | /Data Types and Variables/elevator.py | 8863fad20ce9dfa6fc628d6a3a681caa5f3725d6 | [] | no_license | NikiDimov/SoftUni-Python-Fundamentals | d8ba24a06c4366e76bdc69f1c5225dca29fe955e | 5bb1bf5928e40f2bac867d33566c8b9dac13f566 | refs/heads/main | 2023-07-15T05:57:57.085880 | 2021-08-19T10:27:45 | 2021-08-19T10:27:45 | 323,631,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | number_of_people = int(input())
capacity_of_elevator = int(input())
total_courses = 0
if number_of_people > capacity_of_elevator:
total_courses += number_of_people // capacity_of_elevator
if not number_of_people % capacity_of_elevator == 0:
total_courses += 1
else:
total_courses += 1
print(total_courses)
| [
"niki.dimov86@gmail.com"
] | niki.dimov86@gmail.com |
ad19d9d129e4da03fb6bf7e6eab0b113ab9f3769 | b84f0d7cf248452d7becfdfb672bc91dba4ea46c | /benchmark.py | f50f9d2cbc7fc03c03e6b82c560e6569d2992d0c | [
"MIT"
] | permissive | saurabhkulkarni77/bert-as-service | 35a467c1140333ef1319c8b40987f70dcd86d492 | d5d5670b7aa79746163ff8061fe76398e7146d5b | refs/heads/master | 2020-04-17T17:04:19.314059 | 2019-01-19T06:06:29 | 2019-01-19T06:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | import random
import string
import sys
import threading
import time
from collections import namedtuple
from bert_serving.client import BertClient
from bert_serving.server import BertServer, get_args_parser
from numpy import mean
PORT = 7779
PORT_OUT = 7780
MODEL_DIR = '/data/cips/save/chinese_L-12_H-768_A-12'
common = vars(get_args_parser().parse_args(['-model_dir', MODEL_DIR, '-port', str(PORT), '-port_out', str(PORT_OUT)]))
common['num_worker'] = 2 # set num workers
common['num_repeat'] = 5 # set num repeats per experiment
common['num_client'] = 1 # set number of concurrent clients, will be override later
args = namedtuple('args_nt', ','.join(common.keys()))
globals()[args.__name__] = args
def tprint(msg):
"""like print, but won't get newlines confused with multiple threads"""
sys.stdout.write(msg + '\n')
sys.stdout.flush()
class BenchmarkClient(threading.Thread):
def __init__(self):
super().__init__()
self.batch = [''.join(random.choices(string.ascii_uppercase + string.digits,
k=args.max_seq_len)) for _ in range(args.client_batch_size)]
self.num_repeat = args.num_repeat
self.avg_time = 0
def run(self):
time_all = []
bc = BertClient(port=PORT, port_out=PORT_OUT, show_server_config=False, check_version=False, check_length=False)
for _ in range(self.num_repeat):
start_t = time.perf_counter()
bc.encode(self.batch)
time_all.append(time.perf_counter() - start_t)
print(time_all)
self.avg_time = mean(time_all)
if __name__ == '__main__':
experiments = {
'client_batch_size': [1, 4, 8, 16, 64, 256, 512, 1024, 2048, 4096],
'max_batch_size': [32, 64, 128, 256, 512],
'max_seq_len': [20, 40, 80, 160, 320],
'num_client': [2, 4, 8, 16, 32],
'pooling_layer': [[-j] for j in range(1, 13)]
}
fp = open('benchmark-%d.result' % common['num_worker'], 'w')
for var_name, var_lst in experiments.items():
# set common args
for k, v in common.items():
setattr(args, k, v)
avg_speed = []
for var in var_lst:
# override exp args
setattr(args, var_name, var)
server = BertServer(args)
server.start()
# sleep until server is ready
time.sleep(15)
all_clients = [BenchmarkClient() for _ in range(args.num_client)]
tprint('num_client: %d' % len(all_clients))
for bc in all_clients:
bc.start()
all_thread_speed = []
for bc in all_clients:
bc.join()
cur_speed = args.client_batch_size / bc.avg_time
all_thread_speed.append(cur_speed)
max_speed = int(max(all_thread_speed))
min_speed = int(min(all_thread_speed))
t_avg_speed = int(mean(all_thread_speed))
tprint('%s: %s\t%.3f\t%d/s' % (var_name, var, bc.avg_time, t_avg_speed))
tprint('max speed: %d\t min speed: %d' % (max_speed, min_speed))
avg_speed.append(t_avg_speed)
server.close()
fp.write('#### Speed wrt. `%s`\n\n' % var_name)
fp.write('|`%s`|seqs/s|\n' % var_name)
fp.write('|---|---|\n')
for i, j in zip(var_lst, avg_speed):
fp.write('|%s|%d|\n' % (i, j))
fp.flush()
fp.close()
| [
"hanhxiao@tencent.com"
] | hanhxiao@tencent.com |
59fd38d364beae081dd066ea60debcfc76d65e69 | 1fb2da0e6f73652f0b0126c82a84562f6a8d3535 | /935. Knight Dialer.py | 0c6faf5974ebdf4a997d8c9f4c72c190f73e9101 | [] | no_license | JaylenZhang19/Leetcode | be3456fcb45270c8aad797f965f4c7a1781c0e61 | 178546686aa3ae8f5da1ae845417f86fab9a644d | refs/heads/master | 2023-02-27T06:08:58.818435 | 2021-01-31T20:28:10 | 2021-01-31T20:28:10 | 287,661,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | class Solution:
def knightDialer(self, n: int) -> int:
moves = [
[4, 6],
[6, 8],
[7, 9],
[4, 8],
[3, 9, 0],
[],
[0, 1, 7],
[2, 6],
[1, 3],
[2, 4],
]
dp = [1] * 10
for _ in range(n-1):
current_dp = [0] * 10
for node, count in enumerate(dp):
for nei in moves[node]:
current_dp[nei] = (current_dp[nei] + count) % (10 ** 9 + 7)
dp = current_dp
return sum(dp) % (10 ** 9 + 7)
| [
"noreply@github.com"
] | JaylenZhang19.noreply@github.com |
4727e9204aeccf1fa3855c1e6fdd478abea9f146 | 0734fe314483192e630272bb212aa7817d627628 | /parsl/tests/test_aalst_patterns/test_python_AND_SPLIT_P2.py | e9d383b559002bd1e8e680c61361fecb9d98ea66 | [
"Apache-2.0"
] | permissive | djf604/parsl | 9798f1043a2196d3b538c8683de6d34d57d8f279 | 118af3a52be1811a3355c79a7adadda5ea66afde | refs/heads/master | 2020-12-02T16:27:10.252111 | 2017-06-29T01:47:09 | 2017-06-29T01:47:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | ''' Testing bash apps
'''
import parsl
from parsl import *
import os
import time
import shutil
import argparse
#parsl.set_stream_logger()
workers = ThreadPoolExecutor(max_workers=4)
dfk = DataFlowKernel(workers)
@App('python', dfk)
def increment(x):
return x+1
@App('python', dfk)
def slow_increment(x, dur):
import time
time.sleep(dur)
return x+1
def test_and_split(depth=5):
''' Test simple pipeline A->B...->N
'''
futs = {}
for i in range(depth):
futs[i] = increment(i)
print([ futs[i].result() for i in futs])
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", default="10", help="width of the pipeline")
parser.add_argument("-d", "--debug", action='store_true', help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
pass
parsl.set_stream_logger()
#test_increment(depth=int(args.width))
test_and_split(depth=int(args.width))
| [
"yadudoc1729@gmail.com"
] | yadudoc1729@gmail.com |
0269fdda7a60854c67452fee04e6583d65eb2c04 | 6ff8b7b7ed534d36da6456feeda6ded80464a7de | /chains/tasks.py | d982b814bba7ac7e5b34a9a7950e17f285d6187f | [
"Apache-2.0"
] | permissive | denismakogon/aiorchestra-chain-plugin | c14c4fc1e8417edfbd0a60cc5e28542006928040 | 8607f2a547234952eeb4008aba48eb168b20d217 | refs/heads/master | 2021-01-17T08:46:54.599774 | 2016-07-05T11:02:45 | 2016-07-05T11:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | # Author: Denys Makogon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from aiorchestra.core import context
from aiorchestra.core import utils
DT = 'tosca.artifacts.chain.deployment_template'
DTI = 'tosca.artifacts.chain.deployment_inputs'
PC = 'tosca.artifacts.chain.persisted_context'
@utils.operation
async def create(node, inputs):
node.context.logger.info('[{0}] - Building chain function '
'deployment context.'.format(node.name))
template = node.get_artifact_from_type(DT)
persisted_context = node.get_artifact_from_type(PC)
if persisted_context and not template:
raise Exception('[{0}] - Persisted context requires '
'template.'.format(node.name))
if not template:
raise Exception('[{0}] - Deployment template artifact '
'required.'.format(node.name))
inputs = node.get_artifact_from_type(DTI)
if not inputs:
node.context.logger.warn('[{0}] - Inputs artifact was '
'not specified.'.format(node.name))
deployment_inputs_file = inputs.pop().get('file')
deployment_template_file = template.pop().get('file')
dti = {}
if deployment_inputs_file:
with open(deployment_inputs_file, 'r') as dti:
dti = yaml.load(dti)
deployment_context = context.OrchestraContext(
node.name, path=deployment_template_file,
template_inputs=dti, logger=node.context.logger,
enable_rollback=node.context.rollback_enabled,
event_loop=node.context.event_loop,
)
node.update_runtime_properties('deployment_context',
deployment_context)
node.context.logger.info('[{0}] - Deployment context assembled.'
.format(node.name))
@utils.operation
async def start(node, inputs):
node.context.logger.info('[{0}] - Starting chain function '
'deployment.'.format(node.name))
deployment_context = node.runtime_properties.get(
'deployment_context')
await deployment_context.deploy()
outputs = deployment_context.outputs
node.batch_update_runtime_properties(**{
'deployment_context': deployment_context,
'deployment_context_outputs': outputs,
'persisted_context': deployment_context.serialize(),
})
node.context.logger.info('[{0}] - Deployment finished with '
'status "{1}".'
.format(node.name,
deployment_context.
status.upper()))
@utils.operation
async def stop(node, inputs):
node.context.logger.info('[{0}] - Stopping chain function '
'deployment.'.format(node.name))
deployment_context = node.runtime_properties.get(
'deployment_context')
await deployment_context.undeploy()
node.context.logger.info('[{0}] - Deployment finished with '
'status "{1}".'
.format(node.name,
deployment_context.
status.upper()))
@utils.operation
async def delete(node, inputs):
node.context.logger.info('[{0}] - Deleting chain function '
'deployment context.'.format(node.name))
if 'deployment_context' in node.runtime_properties:
del node.runtime_properties['deployment_context']
| [
"lildee1991@gmail.com"
] | lildee1991@gmail.com |
4e618d4077b4c800d20aaf59ebaad94e9cddd0cb | 9defbebd427f77ac549548ea83280f253e335ea3 | /ltp/transformer_rel_linear.py | 820743317af12009087f3fe14b0d74f356c67ef9 | [] | no_license | okokyou/ltp | d992378ccff0c955852f9f2f948541ce63808a11 | f3d4a25ee2fbb71613f76c99a47e70a5445b8c03 | refs/heads/master | 2023-07-31T15:44:36.220184 | 2021-09-10T01:50:48 | 2021-09-10T01:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,774 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*_
# Author: Yunlong Feng <ylfeng@ir.hit.edu.cn>
from argparse import ArgumentParser
from typing import Optional
import torch
from torch import nn
from torch.nn import functional as F
from transformers import AutoModel
from ltp.nn import BaseModule, RelativeTransformer, CRF
from ltp.transformer_linear import TokenClassifierResult
class RelativeTransformerLinearClassifier(nn.Module):
crf: Optional[CRF]
def __init__(self, input_size, hidden_size, num_layers, num_heads, num_labels, max_length, dropout,
disable_relative_transformer=False, use_cls=False, use_sep=False, use_crf=False, crf_reduction='sum'):
super().__init__()
self.use_cls = use_cls
self.use_sep = use_sep
if disable_relative_transformer:
self.relative_transformer = None
else:
self.relative_transformer = RelativeTransformer(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
max_length=max_length * 2
)
self.classifier = nn.Linear(input_size, num_labels)
if use_crf:
self.crf = CRF(num_labels, batch_first=True)
self.crf_reduction = crf_reduction
else:
self.crf = None
def forward(self, input, attention_mask=None, word_index=None, word_attention_mask=None, labels=None,
is_processed=False):
if not is_processed:
if not self.use_cls:
input = input[:, 1:, :]
if not self.use_cls:
input = input[:, :-1, :]
if word_attention_mask is None:
assert word_index is None
bias = int(not self.use_cls) + int(not self.use_sep)
word_attention_mask = attention_mask[:, bias:] == 1
if word_index is not None:
input = torch.gather(input, dim=1, index=word_index.unsqueeze(-1).expand(-1, -1, input.size(-1)))
if self.relative_transformer is not None:
sequence_output = self.relative_transformer(input, word_attention_mask)
else:
sequence_output = input
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if word_attention_mask is not None and self.crf is not None:
logits = F.log_softmax(logits, dim=-1)
loss = - self.crf.forward(logits, labels, word_attention_mask, reduction=self.crf_reduction)
elif word_attention_mask is not None:
active_loss = word_attention_mask.view(-1)
active_logits = logits.view(-1, self.classifier.out_features)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.classifier.out_features), labels.view(-1))
decoded = None
if not self.training and self.crf is not None:
decoded = self.crf.decode(emissions=logits, mask=word_attention_mask)
if self.use_cls:
decoded = [sent[1:] for sent in decoded]
labels = labels[:, 1:]
if self.use_sep:
decoded = [sent[:-1] for sent in decoded]
labels = labels[:, :-1]
return TokenClassifierResult(loss=loss, logits=logits, decoded=decoded, labels=labels)
class TransformerRelLinear(BaseModule):
def __init__(self, hparams, config=None):
super().__init__()
self.save_hyperparameters(hparams)
if config is None:
self.transformer = AutoModel.from_pretrained(self.hparams.transformer)
else:
self.transformer = AutoModel.from_config(config)
self.dropout = nn.Dropout(self.hparams.dropout)
hidden_size = self.transformer.config.hidden_size
max_length = self.transformer.config.max_position_embeddings
self.classifier = RelativeTransformerLinearClassifier(
input_size=hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.num_layers,
num_heads=self.hparams.num_heads,
dropout=self.hparams.dropout,
max_length=max_length,
num_labels=self.hparams.num_labels,
use_crf=self.hparams.use_crf,
use_cls=self.hparams.use_cls,
use_sep=self.hparams.use_sep,
crf_reduction=self.hparams.crf_reduction,
disable_relative_transformer=self.hparams.disable_relative_transformer
)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')
parser.add_argument('--transformer', type=str, default="hfl/chinese-electra-base-discriminator")
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_size', type=int, default=256)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--use_crf', action='store_true')
parser.add_argument('--use_cls', action='store_true')
parser.add_argument('--use_sep', action='store_true')
parser.add_argument('--disable_relative_transformer', action='store_true')
parser.add_argument('--crf_reduction', type=str, default='sum')
parser.add_argument('--num_labels', type=int)
return parser
def forward(
self,
input_ids=None,
attention_mask=None,
word_index=None,
word_attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None
) -> TokenClassifierResult:
hidden_states = self.transformer(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
inputs_embeds,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
)
sequence_output = hidden_states[0]
sequence_output = self.dropout(sequence_output)
return self.classifier(
sequence_output,
word_index=word_index,
attention_mask=attention_mask,
word_attention_mask=word_attention_mask,
labels=labels
)
| [
"ylfeng@ir.hit.edu.cn"
] | ylfeng@ir.hit.edu.cn |
f9fe0aeefed117def69a33a5d9e979c5599efd77 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py | 86a0b8d2cf0afa5b4e4f34e76130943090797174 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 13,384 | py | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible_collections.netapp.ontap.tests.unit.compat import unittest
from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain \
import NetAppOntapBroadcastDomain as broadcast_domain_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'broadcast_domain':
xml = self.build_broadcast_domain_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_broadcast_domain_info(broadcast_domain_details):
''' build xml data for broadcast_domain info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-port-broadcast-domain-info': {
'broadcast-domain': broadcast_domain_details['name'],
'ipspace': broadcast_domain_details['ipspace'],
'mtu': broadcast_domain_details['mtu'],
'ports': {
'port-info': {
'port': 'test_port_1'
}
}
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_broadcast_domain = {
'name': 'test_broadcast_domain',
'mtu': '1000',
'ipspace': 'Default',
'ports': 'test_port_1'
}
def mock_args(self):
return {
'name': self.mock_broadcast_domain['name'],
'ipspace': self.mock_broadcast_domain['ipspace'],
'mtu': self.mock_broadcast_domain['mtu'],
'ports': self.mock_broadcast_domain['ports'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_broadcast_domain_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_volume object
:param kind: passes this param to MockONTAPConnection()
:param data: passes this param to MockONTAPConnection()
:return: na_ontap_volume object
"""
broadcast_domain_obj = broadcast_domain_module()
broadcast_domain_obj.asup_log_for_cserver = Mock(return_value=None)
broadcast_domain_obj.cluster = Mock()
broadcast_domain_obj.cluster.invoke_successfully = Mock()
if kind is None:
broadcast_domain_obj.server = MockONTAPConnection()
else:
if data is None:
broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=self.mock_broadcast_domain)
else:
broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=data)
return broadcast_domain_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
broadcast_domain_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_net_route(self):
''' Test if get_broadcast_domain returns None for non-existent broadcast_domain '''
set_module_args(self.mock_args())
result = self.get_broadcast_domain_mock_object().get_broadcast_domain()
assert result is None
def test_create_error_missing_broadcast_domain(self):
''' Test if create throws an error if broadcast_domain is not specified'''
data = self.mock_args()
del data['name']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').create_broadcast_domain()
msg = 'missing required arguments: name'
assert exc.value.args[0]['msg'] == msg
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.create_broadcast_domain')
def test_successful_create(self, create_broadcast_domain):
''' Test successful create '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object().apply()
assert exc.value.args[0]['changed']
create_broadcast_domain.assert_called_with()
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
obj = self.get_broadcast_domain_mock_object('broadcast_domain')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert not exc.value.args[0]['changed']
def test_modify_mtu(self):
''' Test successful modify mtu '''
data = self.mock_args()
data['mtu'] = '1200'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').apply()
assert exc.value.args[0]['changed']
def test_modify_ipspace_idempotency(self):
''' Test modify ipsapce idempotency'''
data = self.mock_args()
data['ipspace'] = 'Cluster'
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').apply()
msg = 'A domain ipspace can not be modified after the domain has been created.'
assert exc.value.args[0]['msg'] == msg
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.add_broadcast_domain_ports')
def test_add_ports(self, add_broadcast_domain_ports):
''' Test successful modify ports '''
data = self.mock_args()
data['ports'] = 'test_port_1,test_port_2'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').apply()
assert exc.value.args[0]['changed']
add_broadcast_domain_ports.assert_called_with(['test_port_2'])
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain_ports')
def test_delete_ports(self, delete_broadcast_domain_ports):
''' Test successful modify ports '''
data = self.mock_args()
data['ports'] = ''
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').apply()
assert exc.value.args[0]['changed']
delete_broadcast_domain_ports.assert_called_with(['test_port_1'])
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain')
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain')
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
def test_split_broadcast_domain(self, get_broadcast_domain, split_broadcast_domain, modify_broadcast_domain):
''' Test successful split broadcast domain '''
data = self.mock_args()
data['from_name'] = 'test_broadcast_domain'
data['name'] = 'test_broadcast_domain_2'
data['ports'] = 'test_port_2'
set_module_args(data)
current = {
'name': 'test_broadcast_domain',
'mtu': '1000',
'ipspace': 'Default',
'ports': ['test_port_1,test_port2']
}
get_broadcast_domain.side_effect = [
None,
current,
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object().apply()
assert exc.value.args[0]['changed']
modify_broadcast_domain.assert_not_called()
split_broadcast_domain.assert_called_with()
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain')
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain')
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
def test_split_broadcast_domain_modify_delete(self, get_broadcast_domain, modify_broadcast_domain, delete_broadcast_domain):
''' Test successful split broadcast domain '''
data = self.mock_args()
data['from_name'] = 'test_broadcast_domain'
data['name'] = 'test_broadcast_domain_2'
data['ports'] = ['test_port_1', 'test_port_2']
data['mtu'] = '1200'
set_module_args(data)
current = {
'name': 'test_broadcast_domain',
'mtu': '1000',
'ipspace': 'Default',
'ports': ['test_port_1', 'test_port2']
}
get_broadcast_domain.side_effect = [
None,
current,
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object().apply()
assert exc.value.args[0]['changed']
delete_broadcast_domain.assert_called_with('test_broadcast_domain')
modify_broadcast_domain.assert_called_with()
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
def test_split_broadcast_domain_not_exist(self, get_broadcast_domain):
''' Test successful split broadcast domain '''
data = self.mock_args()
data['from_name'] = 'test_broadcast_domain'
data['name'] = 'test_broadcast_domain_2'
data['ports'] = 'test_port_2'
set_module_args(data)
get_broadcast_domain.side_effect = [
None,
None,
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_broadcast_domain_mock_object().apply()
msg = 'A domain can not be split if it does not exist.'
assert exc.value.args[0]['msg'], msg
@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain')
def test_split_broadcast_domain_idempotency(self, split_broadcast_domain):
''' Test successful split broadcast domain '''
data = self.mock_args()
data['from_name'] = 'test_broadcast_domain'
data['ports'] = 'test_port_1'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_broadcast_domain_mock_object('broadcast_domain').apply()
assert exc.value.args[0]['changed'] is False
split_broadcast_domain.assert_not_called()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
74377d062ee0d17bd11f3abf5d882d2cd8718a03 | 87e520f16911077e3944f27be142b028110239d9 | /guild/commands/package.py | 05fa5c9ad67a1dc7299619e61860d652e0a2ec07 | [
"Apache-2.0"
] | permissive | cfregly/guild-python-1 | 06c81e5c633be231f18318604f2402e8ac24bce9 | 543889469251e20c1ac55e358100952cdc33e58d | refs/heads/master | 2021-07-12T13:38:31.291333 | 2017-10-16T22:01:17 | 2017-10-16T22:01:17 | 107,252,039 | 0 | 0 | null | 2017-10-17T10:14:47 | 2017-10-17T10:14:47 | null | UTF-8 | Python | false | false | 1,841 | py | # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click.option(
"-p", "--project", "project_location", metavar="LOCATION",
help=("Project location (file system directory) of the "
"project to package. Defaults to current directory."))
@click.option(
"-d", "--dist-dir", metavar="DIR",
help="Directory to create the package distribution in.")
@click.option(
"--upload",
help="Upload the package distribution to PyPI after creating it.",
is_flag=True)
@click.option(
"-s", "--sign",
help="Sign a package distribution upload with gpg.",
is_flag=True)
@click.option("-i", "--identity", help="GPG identity used to sign upload.")
@click.option("-u", "--user", help="PyPI user name for upload.")
@click.option("-p", "--password", help="PyPI password for upload.")
@click.option("-c", "--comment", help="Comment to include with upload.")
@click.pass_context
@click_util.use_args
def package(ctx, args):
"""Create a package for distribution.
Packages are built from projects that contain a PACKAGE file that
describes the package to be built.
"""
from . import package_impl
package_impl.create_package(args, ctx)
| [
"g@rre.tt"
] | g@rre.tt |
96af65b743081e61f0620630af27fb2aa2652125 | 8efb4caeafe2cfb024827ce194b5abae6fdfc9a4 | /test/functional/rpc_named_arguments.py | d3557ce2777061179531d4a6c6782b345379400b | [
"MIT"
] | permissive | Worldcoin-Network/worldcoin | cd8ac9631154666cb11603d5f07e3a9dc2e1653a | 4f14d8baadda3f46363c26dc327a68b33f14e28c | refs/heads/master | 2022-03-04T01:50:14.783972 | 2021-10-26T15:21:47 | 2021-10-26T15:21:47 | 156,328,955 | 15 | 9 | MIT | 2021-05-10T16:58:07 | 2018-11-06T05:08:32 | C++ | UTF-8 | Python | false | false | 1,217 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Worldcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import WorldcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(WorldcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert(h.startswith('getblockchaininfo\n'))
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| [
"quentin.neveu@hotmail.ca"
] | quentin.neveu@hotmail.ca |
6d4c6bfd172e9d8027ccb812a282bd1879722b65 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/pypi__numpy_1_15_4/numpy/distutils/system_info.py | aac7a3ff95556cebf69ee7ac4d807b86b06f47ea | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__numpy_1_15_4/numpy/distutils/system_info.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
a235367cf7e0587291907ad9357befc485578b2d | 824f831ce0921b3e364060710c9e531f53e52227 | /Leetcode/Sliding_Window/LC-209. Minimum Size Subarray Sum.py | 09456da62cae8ff3353367758c2559fbab35706a | [] | no_license | adityakverma/Interview_Prepration | e854ff92c10d05bc2c82566ea797d2ce088de00a | d08a7f728c53943e9a27c33f8e4249633a69d1a6 | refs/heads/master | 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py |
# Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous
# subarray of which the sum ≥ s. If there is not one, return 0 instead.
#
# Example:
#
# Input: s = 7, nums = [2,3,1,2,4,3]
# Output: 2
# Explanation: the subarray [4,3] has the minimal length under the problem constraint.
#
# Follow up:
# If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).
# ================================================================================================================
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
# Using Sliding Window Algorithm: - See below explanation
minLen = len(nums) + 1
total, start = 0, 0
for i in range(len(nums)):
total += nums[i] # Get possible candidate
# If total is not >= target, then quit while loop and add more to total (expand the window).
# else refine candiate by moving left end to left since we need to get minimum number.
while total >= s:
minLen = min(minLen, i - start + 1)
total = total - nums[start]
start = start + 1 # Moving the window's left end now, because we need to get minmum number ele whole sum>=target
return 0 if minLen > len(nums) else minLen
'''
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtyp
# Using Binary Search
result = len(nums) + 1
for idx, n in enumerate(nums[1:], 1):
nums[idx] = nums[idx - 1] + n
left = 0
for right, n in enumerate(nums):
if n >= s:
left = self.find_left(left, right, nums, s, n)
result = min(result, right - left + 1)
return result if result <= len(nums) else 0
def find_left(self, left, right, nums, s, n):
while left < right:
mid = (left + right) // 2
if n - nums[mid] >= s:
left = mid + 1
else:
right = mid
return left
'''
'''
Sliding Window Algorithm:
========================
Trick here is to keep adding numbers from the start of array until you hit the target.
After that we keep adding numbers from the end and subtracting numbers from the start as long as the total is still above target and keep checking if the new array is the minimum length.
The intuition is that for example, a 10 added on the end could replace two 5's from start of array and thus the reduce the number of elements needed to hit target in that subarray.
IMP NOTE: To find maximum substring, we should update maximum after the inner while loop to guarantee that the substring is valid. On the other hand, when asked to find minimum substring, we should update minimum inside the inner while loop.
# https://leetcode.com/problems/minimum-size-subarray-sum/discuss/211775/Python-O(N)-greater-minimum-window-substring-template
# https://leetcode.com/problems/minimum-size-subarray-sum/discuss/59093/Python-O(n)-and-O(n-log-n)-solution
''' | [
"noreply@github.com"
] | adityakverma.noreply@github.com |
2573678e9815d6c0d02d522c1f682042b66018a9 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_A_SUBJECT_D004039.py | 4324513e029099964e80d4e9191f6fdd22410f7b | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D004039').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#---------------------------------------------------------------------------------------#
V_YEAR_MONTH = etl_date[0:4]+"-" + etl_date[4:6]
v_sub_id = 'D004039';
ACRM_A_TARGET_D004022 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004022/*')
ACRM_A_TARGET_D004022.registerTempTable("ACRM_A_TARGET_D004022")
ACRM_A_TARGET_D004023 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004023/*')
ACRM_A_TARGET_D004023.registerTempTable("ACRM_A_TARGET_D004023")
ACRM_A_TARGET_D004024 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004024/*')
ACRM_A_TARGET_D004024.registerTempTable("ACRM_A_TARGET_D004024")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT
A.CUST_ID as CUST_ID
,'' as ORG_ID
,'D004039' as INDEX_CODE
,CASE WHEN A.INDEX_VALUE < B.INDEX_VALUE AND B.INDEX_VALUE < C.INDEX_VALUE THEN 0.0
WHEN A.INDEX_VALUE > B.INDEX_VALUE AND B.INDEX_VALUE > C.INDEX_VALUE THEN 2.0
ELSE 1.0 END as INDEX_VALUE
,V_YEAR_MONTH as YEAR_MONTH
,V_DT as ETL_DATE
,A.CUST_TYPE as CUST_TYPE
,A.FR_ID as FR_ID
FROM ACRM_A_TARGET_D004022 A,
ACRM_A_TARGET_D004023 B,
ACRM_A_TARGET_D004024 C
WHERE A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
AND A.CUST_TYPE = '2'
AND A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
sql = re.sub(r"\bV_YEAR_MONTH\b", "'"+V_YEAR_MONTH+"'", sql)
ACRM_A_TARGET_D004039 = sqlContext.sql(sql)
ACRM_A_TARGET_D004039.registerTempTable("ACRM_A_TARGET_D004039")
dfn="ACRM_A_TARGET_D004039/"+V_DT+".parquet"
ACRM_A_TARGET_D004039.cache()
nrows = ACRM_A_TARGET_D004039.count()
ACRM_A_TARGET_D004039.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D004039.unpersist()
ACRM_A_TARGET_D004022.unpersist()
ACRM_A_TARGET_D004023.unpersist()
ACRM_A_TARGET_D004024.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D004039/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D004039 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| [
"cysuncn@126.com"
] | cysuncn@126.com |
90d2e9f95d7f2972285bb2645fbe7b1c72e80b3e | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ddpm_w_distillation/ddpm_w_distillation/config/i64_w_unet3_distill.py | 087622af189706bd2ae189e6c0d8965bd8a31c4f | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 6,776 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring
# pylint: disable=g-no-space-after-comment,g-bad-todo
# pylint: disable=invalid-name,line-too-long
import ml_collections
class hyper:
pass
def D(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
# added
# end_num_steps = 1 # eventual number of steps in the distilled sampler
# start_num_steps = 1024 # number of steps in baseline sampler
distill_steps_per_iter = 1000000
# TODO, change the teacher ckpt patg
another_teacher_ckpt_path = 'projects/diffusion/stage1_2048_42848231/1/retained_checkpoints/checkpoint_520000'
# 'projects/diffusion/i64_retrain_snr_1e-4_42469520/1/retained_checkpoints/checkpoint_380000'
teacher_ckpt_path = 'projects/diffusion/retrain_snr_2048_42804866/1/retained_checkpoints/checkpoint_220000'
# 'projects/diffusion/i64_retrain_snr_1e-4_42469520/1/retained_checkpoints/checkpoint_380000'
# 'projects/diffusion/i64_teacher_v_42202910/1/retained_checkpoints/checkpoint_100000'
# 'projects/diffusion/i64_retrain_42445613/1/retained_checkpoints/checkpoint_200000'
single_model_path = 'projects/diffusion/stage1_2048_42848231/1/retained_checkpoints/checkpoint_520000'
eval_sampling_num_steps = 256 #128 #512 #256 #512 #128
train_batch_size = 256 #2048, # 256
use_sample_single_ckpt = True #False
use_retained_ckpt = True #False
train_clip_x = False
# sampler = 'ddim', # 'noisy'
def get_config():
return D(
launch=D(
sweep=hyper.product([
# hyper.sweep('config.model.args.uncond_prob', [0.01, 0.02, 0.05]),
# hyper.sweep('config.model.args.uncond_prob', [0.1, 0.2, 0.5]),
hyper.sweep('config.seed', [0]), #TODO [1, 2, 3] change to [0]
hyper.sweep(
'config.model.args.uncond_prob', [0.1]
), # NOTE: not used for w_unet model check NOTE change from 0.1 to 0
# hyper.sweep(config.model.acond_uncond_coefs)
]),),
# added
distillation=D(
# teacher checkpoint is used for teacher and initial params of student
teacher_checkpoint_path=teacher_ckpt_path,
steps_per_iter=distill_steps_per_iter, # number of distillation training steps per halving of sampler steps
only_finetune_temb=False, #TODO!! False,
another_teacher_init=True, #False, #NOTE: change to False #False, #True,
another_teacher_path=another_teacher_ckpt_path,
# start_num_steps=start_num_steps,
# end_num_steps=end_num_steps,
),
# added
seed=0,
main_class='Model',
dataset=D(
name='ImageNet',
args=D(
image_size=64,
class_conditional=True,
randflip=True,
),
),
sampler='noisy', #'ddim', # 'noisy', # added
##
#together
use_sample_single_ckpt=use_sample_single_ckpt, #True,
sample_single_ckpt_path=single_model_path,
#together
model=D(
# architecture
name='w_unet3',
args=D(
ch=192,
emb_ch=768, # default is ch * 4
ch_mult=[1, 2, 3, 4],
num_res_blocks=3,
attn_resolutions=[8, 16, 32],
num_heads=None,
head_dim=64,
dropout=0., #NOTE!! changed previously 0.1,
logsnr_input_type='inv_cos',
w_input_type='inv_cos', # w embedding added
resblock_resample=True,
uncond_prob=0.1, #NOTE: default, but as sweep 0.,
),
teacher_extra_class=True, #NOTE added
mean_type='v', #'eps', #'v', #NOTE: change to v 'eps',
teacher_mean_type='v', #"eps", # added
logvar_type='fixed_medium:0.3', # TODO: check
mean_loss_weight_type='snr', #'constant', #'snr', #'snr_trunc', #note not 'constant', #constant='mse', snr, snr_trunc
logvar_loss_type='none',
# logsnr schedule
train_num_steps=0, # train in continuous time
eval_sampling_num_steps=eval_sampling_num_steps,
train_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_clip_denoised=True,
# added
train_w_schedule=D(
name='uniform',
# logsnr_min=0., logsnr_max=0.5),
# logsnr_min=0., logsnr_max=1.0),
# logsnr_min=0., logsnr_max=2.0),
logsnr_min=0.,
logsnr_max=4.),
# NOTE can set logsnr_max=logsnr_min for a single w value
# sample interpolation
# cond_uncond_coefs=[1.3, -0.3], # [cond_coef, uncond_coef]
# eval_cond_uncond_coefs # NOTE: never have it for distillation!, it does not make sense
),
train=D(
# optimizer
batch_size=train_batch_size, #2048, # 256 #2048, # TODO: change back 2048,
optimizer='adam',
learning_rate=3e-4, #edited 3e-4,
learning_rate_warmup_steps=0, #edited 10000, # used to be 1k, but 10k helps with stability
weight_decay=0.0,
ema_decay=0.9999,
grad_clip=1.0,
substeps=10,
enable_update_skip=False,
# logging
log_loss_every_steps=100,
checkpoint_every_secs=900, # 15 minutes
retain_checkpoint_every_steps=20000, # old checkpoints won't get deleted
eval_every_steps=20000,
train_clip_x=train_clip_x, # NOTE added
w_conditoned_training=True, # added
w_warmup_steps=10000, #1, #10000, # added to worm up w embedding
),
eval=D(
batch_size=128, # TODO change to 128,
num_inception_samples=50000,
sampling_fixed_classes=[249, 284], # visualize malamute, siamese cat
sampling_fixed_w=[0.1, 0.3, 0.5],
w_sample_const=4.0, #0.3, #0.3
noisy_sampler_interpolation=0.5, #0.2, # NOTE: need to change
use_retained_ckpt=use_retained_ckpt, #True,
),
)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f840d8f08c5b3912798557561050b0c4b3506e47 | 70280955a5382d73e58395eba78c119a400f4ce7 | /comp/exawizards2019/test.py | a24b292819c9854baa39c6eac3b842189d260338 | [] | no_license | cohock13/atcoder | a7d0e26a10a4e58690347a2e36839c2f503a79ba | d268aa68fc96203eab94d021bd158cf84bdb00bc | refs/heads/master | 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | N = int(input())
S = {}
for i in range(N):
s = input()
if s in S:
S[s] += 1
else:
S[s] = 1
key = list(S.keys())
value = list(S.values())
m = max(value)
Ans = []
for i in range(len(key)):
Ans.append([value[i],key[i]])
Ans.sort()
for i in range(len(key)):
if Ans[i][0] == m:
print(Ans[i][1])
| [
"callout2690@gmail.com"
] | callout2690@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.