blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ba31f6ee551ccdab61849ead5d95db11cb35504
|
7ea5c45401947eaa56c7abb571fc5968aa74abd1
|
/python入门/day_9_函数/9-2-向函数传递信息.py
|
c81f629fd921a2bcc50119839a93ef863318ba1a
|
[] |
no_license
|
jihongsheng/python3
|
a901d47c7a46054360f5efe8087ad0f958981945
|
12e2d5bf29bc8c1d16f05e6afcbc6f70530d0d6d
|
refs/heads/master
| 2020-05-16T22:18:50.210424
| 2019-05-14T00:53:39
| 2019-05-14T00:53:39
| 183,331,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
# -*- coding: UTF-8 -*-
# 只需稍作修改,就可以让函数greet_user() 不仅向用户显示Hello! ,还将用户的名字用作抬头。
# 为此,可在函数定义def greet_user() 的括号内添加username 。通过在这里添加username ,就
# 可让函数接受你给username 指定的任何值。现在,这个函数要求你调用它时给username 指定一
# 个值。调用greet_user() 时,可将一个名字传递给它,如下所示:
def greet_user(username):
print("Hello,%s!" % username)
# 代码greet_user(username="Wang") 调用函数greet_user() ,并向它提供执行print 语句所需的信息。
greet_user(username="Wang")
# 同样,greet_user('Ji') 调用函数greet_user() 并向它传递'Ji' ,打印Hello, Ji! 。
greet_user("Ji")
# 你可以根据需要调用函数greet_user() 任意次,调用时无论传入什么样的名字,都会生成相应的输出。
|
[
"6909283@qq.com"
] |
6909283@qq.com
|
2b0ce6210932107f0a5bcb5e5e2b9fcb8b44dfb1
|
62bdde43ce88507530610a2b77d2ce0859eebc8b
|
/BOJ/mathematics/01676-팩토리얼_0의_개수/01676-팩토리얼_0의_개수-jiwoong.py
|
3a451210c54a1ccdfad790cfe2f53a10c021a7f6
|
[] |
no_license
|
j2woong1/algo-itzy
|
6810f688654105cf4aefda3b0876f714ca8cbd08
|
7cf6cd8383dd8e9ca63f605609aab003790e1565
|
refs/heads/master
| 2023-08-21T12:51:54.874102
| 2021-10-03T04:35:21
| 2021-10-03T04:35:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
import sys
input = sys.stdin.readline
N = int(input()) # N!
def fact(N):
if N == 1:
return 1
else: # factorial 구현
num = N * fact(N - 1)
return num
if N == 0:
print(0)
else:
num = str(fact(N)) # 문자열 처리로 0의 개수 count
cnt = 0
for i in range(len(num) - 1, -1, -1): # 뒤에서부터 거꾸로
if num[i] == "0":
cnt += 1
else:
break
print(cnt)
|
[
"j2woong1@gmail.com"
] |
j2woong1@gmail.com
|
4d36a1fe52245f8e1c029f1e0d036ca2a1b932c1
|
b3a2ac9eb02a6eef9e6f3504afabc6400f894f56
|
/clld/lib/latex.py
|
325cb2c7397bf818455945818ba172a534d29a06
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
Anaphory/clld
|
9f67c477e228eff05fdc7b7fa4310c703af02108
|
bed1a6d08275a41fd7b5d13a0af19e4e538d186c
|
refs/heads/master
| 2021-01-17T04:50:05.382411
| 2017-02-16T11:20:11
| 2017-02-16T11:20:11
| 66,831,136
| 0
| 0
| null | 2016-08-29T09:43:06
| 2016-08-29T09:43:05
| null |
UTF-8
|
Python
| false
| false
| 15,747
|
py
|
"""
Character translation utilities for LaTeX-formatted text.
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
We also make public a dictionary latex_equivalents,
mapping ord(unicode char) to LaTeX code.
D. Eppstein, October 2003.
"""
from __future__ import generators
import codecs
import re
from six import text_type, Iterator, unichr
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(_registry)
def getregentry():
"""Encoding module API."""
return _registry('latex') # pragma: no cover
def _registry(encoding):
if encoding == 'latex':
encoding = None # pragma: no cover
elif encoding.startswith('latex+'):
encoding = encoding[6:]
else:
return None # pragma: no cover
class Codec(codecs.Codec):
def encode(self, input, errors='strict'): # pragma: no cover
"""Convert unicode string to latex."""
output = []
for c in input:
if encoding:
try:
output.append(c.encode(encoding))
continue
except:
pass
if ord(c) in latex_equivalents:
output.append(latex_equivalents[ord(c)])
else:
output += ['{\\char', str(ord(c)), '}']
return ''.join(output), len(input)
def decode(self, input, errors='strict'):
"""Convert latex source string to unicode."""
if encoding:
input = text_type(input, encoding, errors)
# Note: we may get buffer objects here.
# It is not permussable to call join on buffer objects
# but we can make them joinable by calling unicode.
# This should always be safe since we are supposed
# to be producing unicode output anyway.
x = map(text_type, _unlatex(input))
return u''.join(x), len(input)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return (Codec().encode, Codec().decode, StreamReader, StreamWriter)
def _tokenize(tex): # pragma: no cover
"""Convert latex source into sequence of single-token substrings."""
start = 0
try:
# skip quickly across boring stuff
pos = next(_stoppers.finditer(tex)).span()[0]
except StopIteration:
yield tex
return
while 1:
if pos > start:
yield tex[start:pos]
if (
tex[start] == '\\'
and not (tex[pos - 1].isdigit() and tex[start + 1].isalpha())
):
while pos < len(tex) and tex[pos].isspace(): # skip blanks after csname
pos += 1
while pos < len(tex) and tex[pos] in _ignore: # pragma: no cover
pos += 1 # flush control characters
if pos >= len(tex):
return
start = pos
if tex[pos:pos + 2] in {'$$': None, '/~': None}: # protect ~ in urls
pos += 2 # pragma: no cover
elif tex[pos].isdigit():
while pos < len(tex) and tex[pos].isdigit():
pos += 1
elif tex[pos] == '-':
while pos < len(tex) and tex[pos] == '-':
pos += 1
elif tex[pos] != '\\' or pos == len(tex) - 1:
pos += 1
elif not tex[pos + 1].isalpha():
pos += 2
else:
pos += 1
while pos < len(tex) and tex[pos].isalpha():
pos += 1
if tex[start:pos] == '\\char' or tex[start:pos] == '\\accent':
while pos < len(tex) and tex[pos].isdigit(): # pragma: no cover
pos += 1
class _unlatex(Iterator): # pragma: no cover
"""Convert tokenized tex into sequence of unicode strings. Helper for decode()."""
def __iter__(self):
"""Turn self into an iterator. It already is one, nothing to do."""
return self
def __init__(self, tex):
"""Create a new token converter from a string."""
self.tex = tuple(_tokenize(tex)) # turn tokens into indexable list
self.pos = 0 # index of first unprocessed token
self.lastoutput = 'x' # lastoutput must always be nonempty string
def __getitem__(self, n):
"""Return token at offset n from current pos."""
p = self.pos + n
t = self.tex
return p < len(t) and t[p] or None
def __next__(self):
"""Find and return another piece of converted output."""
if self.pos >= len(self.tex):
raise StopIteration
nextoutput = self.chunk()
if (
self.lastoutput[0] == '\\'
and self.lastoutput[-1].isalpha()
and nextoutput[0].isalpha()
):
# add extra space to terminate csname
nextoutput = ' ' + nextoutput # pragma: no cover
self.lastoutput = nextoutput
return nextoutput
def chunk(self):
"""Grab another set of input tokens and convert them to an output string."""
for delta, c in self.candidates(0): # pragma: no cover
if c in _l2u:
self.pos += delta
return unichr(_l2u[c])
elif len(c) == 2 and c[1] == 'i' and (c[0], '\\i') in _l2u:
self.pos += delta # correct failure to undot i
return unichr(_l2u[(c[0], '\\i')])
elif len(c) == 1 and c[0].startswith('\\char') and c[0][5:].isdigit():
self.pos += delta
return unichr(int(c[0][5:]))
# nothing matches, just pass through token as-is
self.pos += 1
return self[-1]
def candidates(self, offset):
"""Generate pairs delta,c.
Where c is a token or tuple of tokens from tex
(after deleting extraneous brackets starting at pos) and delta
is the length of the tokens prior to bracket deletion.
"""
t = self[offset]
if t in _blacklist:
return
elif t == '{':
for delta, c in self.candidates(offset + 1):
if self[offset + delta + 1] == '}':
yield delta + 2, c
elif t == '\\mbox':
for delta, c in self.candidates(offset + 1): # pragma: no cover
yield delta + 1, c
elif t == '$' and self[offset + 2] == '$':
yield 3, (t, self[offset + 1], t) # pragma: no cover
else:
q = self[offset + 1]
if q == '{' and self[offset + 3] == '}':
yield 4, (t, self[offset + 2])
elif q:
yield 2, (t, q)
yield 1, t
latex_equivalents = {
0x0009: ' ',
0x000a: '\n',
0x0023: '{\#}',
0x0026: '{\&}',
0x00a0: '{~}',
0x00a1: '{!`}',
0x00a2: '{\\not{c}}',
0x00a3: '{\\pounds}',
0x00a7: '{\\S}',
0x00a8: '{\\"{}}',
0x00a9: '{\\copyright}',
0x00af: '{\\={}}',
0x00ac: '{\\neg}',
0x00ad: '{\\-}',
0x00b0: '{\\mbox{$^\\circ$}}',
0x00b1: '{\\mbox{$\\pm$}}',
0x00b2: '{\\mbox{$^2$}}',
0x00b3: '{\\mbox{$^3$}}',
0x00b4: "{\\'{}}",
0x00b5: '{\\mbox{$\\mu$}}',
0x00b6: '{\\P}',
0x00b7: '{\\mbox{$\\cdot$}}',
0x00b8: '{\\c{}}',
0x00b9: '{\\mbox{$^1$}}',
0x00bf: '{?`}',
0x00c0: '{\\`A}',
0x00c1: "{\\'A}",
0x00c2: '{\\^A}',
0x00c3: '{\\~A}',
0x00c4: '{\\"A}',
0x00c5: '{\\AA}',
0x00c6: '{\\AE}',
0x00c7: '{\\c{C}}',
0x00c8: '{\\`E}',
0x00c9: "{\\'E}",
0x00ca: '{\\^E}',
0x00cb: '{\\"E}',
0x00cc: '{\\`I}',
0x00cd: "{\\'I}",
0x00ce: '{\\^I}',
0x00cf: '{\\"I}',
0x00d1: '{\\~N}',
0x00d2: '{\\`O}',
0x00d3: "{\\'O}",
0x00d4: '{\\^O}',
0x00d5: '{\\~O}',
0x00d6: '{\\"O}',
0x00d7: '{\\mbox{$\\times$}}',
0x00d8: '{\\O}',
0x00d9: '{\\`U}',
0x00da: "{\\'U}",
0x00db: '{\\^U}',
0x00dc: '{\\"U}',
0x00dd: "{\\'Y}",
0x00df: '{\\ss}',
0x00e0: '{\\`a}',
0x00e1: "{\\'a}",
0x00e2: '{\\^a}',
0x00e3: '{\\~a}',
0x00e4: '{\\"a}',
0x00e5: '{\\aa}',
0x00e6: '{\\ae}',
0x00e7: '{\\c{c}}',
0x00e8: '{\\`e}',
0x00e9: "{\\'e}",
0x00ea: '{\\^e}',
0x00eb: '{\\"e}',
0x00ec: '{\\`\\i}',
0x00ed: "{\\'\\i}",
0x00ee: '{\\^\\i}',
0x00ef: '{\\"\\i}',
0x00f1: '{\\~n}',
0x00f2: '{\\`o}',
0x00f3: "{\\'o}",
0x00f4: '{\\^o}',
0x00f5: '{\\~o}',
0x00f6: '{\\"o}',
0x00f7: '{\\mbox{$\\div$}}',
0x00f8: '{\\o}',
0x00f9: '{\\`u}',
0x00fa: "{\\'u}",
0x00fb: '{\\^u}',
0x00fc: '{\\"u}',
0x00fd: "{\\'y}",
0x00ff: '{\\"y}',
0x0100: '{\\=A}',
0x0101: '{\\=a}',
0x0102: '{\\u{A}}',
0x0103: '{\\u{a}}',
0x0104: '{\\c{A}}',
0x0105: '{\\c{a}}',
0x0106: "{\\'C}",
0x0107: "{\\'c}",
0x0108: "{\\^C}",
0x0109: "{\\^c}",
0x010a: "{\\.C}",
0x010b: "{\\.c}",
0x010c: "{\\v{C}}",
0x010d: "{\\v{c}}",
0x010e: "{\\v{D}}",
0x010f: "{\\v{d}}",
0x0112: '{\\=E}',
0x0113: '{\\=e}',
0x0114: '{\\u{E}}',
0x0115: '{\\u{e}}',
0x0116: '{\\.E}',
0x0117: '{\\.e}',
0x0118: '{\\c{E}}',
0x0119: '{\\c{e}}',
0x011a: "{\\v{E}}",
0x011b: "{\\v{e}}",
0x011c: '{\\^G}',
0x011d: '{\\^g}',
0x011e: '{\\u{G}}',
0x011f: '{\\u{g}}',
0x0120: '{\\.G}',
0x0121: '{\\.g}',
0x0122: '{\\c{G}}',
0x0123: '{\\c{g}}',
0x0124: '{\\^H}',
0x0125: '{\\^h}',
0x0128: '{\\~I}',
0x0129: '{\\~\\i}',
0x012a: '{\\=I}',
0x012b: '{\\=\\i}',
0x012c: '{\\u{I}}',
0x012d: '{\\u\\i}',
0x012e: '{\\c{I}}',
0x012f: '{\\c{i}}',
0x0130: '{\\.I}',
0x0131: '{\\i}',
0x0132: '{IJ}',
0x0133: '{ij}',
0x0134: '{\\^J}',
0x0135: '{\\^\\j}',
0x0136: '{\\c{K}}',
0x0137: '{\\c{k}}',
0x0139: "{\\'L}",
0x013a: "{\\'l}",
0x013b: "{\\c{L}}",
0x013c: "{\\c{l}}",
0x013d: "{\\v{L}}",
0x013e: "{\\v{l}}",
0x0141: '{\\L}',
0x0142: '{\\l}',
0x0143: "{\\'N}",
0x0144: "{\\'n}",
0x0145: "{\\c{N}}",
0x0146: "{\\c{n}}",
0x0147: "{\\v{N}}",
0x0148: "{\\v{n}}",
0x014c: '{\\=O}',
0x014d: '{\\=o}',
0x014e: '{\\u{O}}',
0x014f: '{\\u{o}}',
0x0150: '{\\H{O}}',
0x0151: '{\\H{o}}',
0x0152: '{\\OE}',
0x0153: '{\\oe}',
0x0154: "{\\'R}",
0x0155: "{\\'r}",
0x0156: "{\\c{R}}",
0x0157: "{\\c{r}}",
0x0158: "{\\v{R}}",
0x0159: "{\\v{r}}",
0x015a: "{\\'S}",
0x015b: "{\\'s}",
0x015c: "{\\^S}",
0x015d: "{\\^s}",
0x015e: "{\\c{S}}",
0x015f: "{\\c{s}}",
0x0160: "{\\v{S}}",
0x0161: "{\\v{s}}",
0x0162: "{\\c{T}}",
0x0163: "{\\c{t}}",
0x0164: "{\\v{T}}",
0x0165: "{\\v{t}}",
0x0168: "{\\~U}",
0x0169: "{\\~u}",
0x016a: "{\\=U}",
0x016b: "{\\=u}",
0x016c: "{\\u{U}}",
0x016d: "{\\u{u}}",
0x016e: "{\\r{U}}",
0x016f: "{\\r{u}}",
0x0170: "{\\H{U}}",
0x0171: "{\\H{u}}",
0x0172: "{\\c{U}}",
0x0173: "{\\c{u}}",
0x0174: "{\\^W}",
0x0175: "{\\^w}",
0x0176: "{\\^Y}",
0x0177: "{\\^y}",
0x0178: '{\\"Y}',
0x0179: "{\\'Z}",
0x017a: "{\\'Z}",
0x017b: "{\\.Z}",
0x017c: "{\\.Z}",
0x017d: "{\\v{Z}}",
0x017e: "{\\v{z}}",
0x01c4: "{D\\v{Z}}",
0x01c5: "{D\\v{z}}",
0x01c6: "{d\\v{z}}",
0x01c7: "{LJ}",
0x01c8: "{Lj}",
0x01c9: "{lj}",
0x01ca: "{NJ}",
0x01cb: "{Nj}",
0x01cc: "{nj}",
0x01cd: "{\\v{A}}",
0x01ce: "{\\v{a}}",
0x01cf: "{\\v{I}}",
0x01d0: "{\\v\\i}",
0x01d1: "{\\v{O}}",
0x01d2: "{\\v{o}}",
0x01d3: "{\\v{U}}",
0x01d4: "{\\v{u}}",
0x01e6: "{\\v{G}}",
0x01e7: "{\\v{g}}",
0x01e8: "{\\v{K}}",
0x01e9: "{\\v{k}}",
0x01ea: "{\\c{O}}",
0x01eb: "{\\c{o}}",
0x01f0: "{\\v\\j}",
0x01f1: "{DZ}",
0x01f2: "{Dz}",
0x01f3: "{dz}",
0x01f4: "{\\'G}",
0x01f5: "{\\'g}",
0x01fc: "{\\'\\AE}",
0x01fd: "{\\'\\ae}",
0x01fe: "{\\'\\O}",
0x01ff: "{\\'\\o}",
0x02c6: '{\\^{}}',
0x02dc: '{\\~{}}',
0x02d8: '{\\u{}}',
0x02d9: '{\\.{}}',
0x02da: "{\\r{}}",
0x02dd: '{\\H{}}',
0x02db: '{\\c{}}',
0x02c7: '{\\v{}}',
0x03c0: '{\\mbox{$\\pi$}}',
# consider adding more Greek here
0xfb01: '{fi}',
0xfb02: '{fl}',
0x2013: '{--}',
0x2014: '{---}',
0x2018: "{`}",
0x2019: "{'}",
0x201c: "{``}",
0x201d: "{''}",
0x2020: "{\\dag}",
0x2021: "{\\ddag}",
0x2122: "{\\mbox{$^\\mbox{TM}$}}",
0x2022: "{\\mbox{$\\bullet$}}",
0x2026: "{\\ldots}",
0x2202: "{\\mbox{$\\partial$}}",
0x220f: "{\\mbox{$\\prod$}}",
0x2211: "{\\mbox{$\\sum$}}",
0x221a: "{\\mbox{$\\surd$}}",
0x221e: "{\\mbox{$\\infty$}}",
0x222b: "{\\mbox{$\\int$}}",
0x2248: "{\\mbox{$\\approx$}}",
0x2260: "{\\mbox{$\\neq$}}",
0x2264: "{\\mbox{$\\leq$}}",
0x2265: "{\\mbox{$\\geq$}}",
}
for _i in range(0x0020):
if _i not in latex_equivalents:
latex_equivalents[_i] = ''
for _i in range(0x0020, 0x007f):
if _i not in latex_equivalents:
latex_equivalents[_i] = chr(_i)
# Characters that should be ignored and not output in tokenization
_ignore = set([chr(i) for i in list(range(32)) + [127]]) - set('\t\n\r')
# Regexp of chars not in blacklist, for quick start of tokenize
_stoppers = re.compile('[\x00-\x1f!$\\-?\\{~\\\\`\']')
_blacklist = set(' \n\r')
_blacklist.add(None) # shortcut candidate generation at end of data
# Construction of inverse translation table
_l2u = {
'\ ': ord(' ') # unexpanding space makes no sense in non-TeX contexts
}
for _tex in latex_equivalents:
if _tex <= 0x0020 or (_tex <= 0x007f and len(latex_equivalents[_tex]) <= 1):
continue # boring entry
_toks = tuple(_tokenize(latex_equivalents[_tex]))
if _toks[0] == '{' and _toks[-1] == '}':
_toks = _toks[1:-1]
if _toks[0].isalpha():
continue # don't turn ligatures into single chars
if len(_toks) == 1 and (_toks[0] == "'" or _toks[0] == "`"):
continue # don't turn ascii quotes into curly quotes
if _toks[0] == '\\mbox' and _toks[1] == '{' and _toks[-1] == '}':
_toks = _toks[2:-1]
if len(_toks) == 4 and _toks[1] == '{' and _toks[3] == '}':
_toks = (_toks[0], _toks[2])
if len(_toks) == 1:
_toks = _toks[0]
_l2u[_toks] = _tex
# Shortcut candidate generation for certain useless candidates:
# a character is in _blacklist if it can not be at the start
# of any translation in _l2u. We use this to quickly skip through
# such characters before getting to more difficult-translate parts.
# _blacklist is defined several lines up from here because it must
# be defined in order to call _tokenize, however it is safe to
# delay filling it out until now.
for i in range(0x0020, 0x007f):
_blacklist.add(chr(i))
_blacklist.remove('{')
_blacklist.remove('$')
for candidate in _l2u:
if isinstance(candidate, tuple):
if not candidate or not candidate[0]:
continue # pragma: no cover
firstchar = candidate[0][0]
else:
firstchar = candidate[0]
_blacklist.discard(firstchar)
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
458af30fcbf8a51195ccfe3366dbf2663797500d
|
993ef8924418866f932396a58e3ad0c2a940ddd3
|
/Production/python/PrivateSamples/EMJ_UL16APV_mMed-2500_mDark-20_ctau-5_unflavored-down_cff.py
|
a1176e87cc87095c340aa9cf5cc79a9acb526034
|
[] |
no_license
|
TreeMaker/TreeMaker
|
48d81f6c95a17828dbb599d29c15137cd6ef009a
|
15dd7fe9e9e6f97d9e52614c900c27d200a6c45f
|
refs/heads/Run2_UL
| 2023-07-07T15:04:56.672709
| 2023-07-03T16:43:17
| 2023-07-03T16:43:17
| 29,192,343
| 16
| 92
| null | 2023-07-03T16:43:28
| 2015-01-13T13:59:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,991
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-9.root',
] )
|
[
"enochnotsocool@gmail.com"
] |
enochnotsocool@gmail.com
|
7a2be79cbb91cadb4e2dae87334243513712b700
|
b9ccff33b9003c666d940dcab81e812f3cd9fa36
|
/test.py
|
866d57684b553cf78e50dbc2a407cdbfd85fe52e
|
[] |
no_license
|
willdoescode/nim-math-computations-python
|
10850ef4c50fdc5245c365c1a40ff69dc6fa7667
|
e40b4ab335a32e8ba84b60a4dc1fbb1c98dd1fea
|
refs/heads/main
| 2023-03-06T05:42:33.698655
| 2021-02-22T05:55:42
| 2021-02-22T05:55:42
| 341,094,723
| 16
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
import math_computations
print('--- Add')
print(math_computations.add(5, 5))
print('--- Fib iterator')
for i in math_computations.fib(10):
print(i)
print('--- Fib func')
print(math_computations.fibF(10))
print('--- Is Prime')
print(math_computations.is_prime(2))
print(math_computations.is_prime(4))
print(math_computations.is_prime(7))
|
[
"williamlane923@gmail.com"
] |
williamlane923@gmail.com
|
c8eaaeff0e10a34dc8797ff71d4ae9cab3cf709f
|
b7400cf7509d8cc2383fe379b3cdf39155350510
|
/AI/ML_practice/DiigitRecognition/DIgitRec.py
|
7a25639c3add71e67451b34a03df6a7a99eaa2cc
|
[] |
no_license
|
Rajesh-Khanna/Currently-Learning
|
869f2637f70a14d8691b5675aa201e1d355bf578
|
bddfa0257c5ce1b5c981adefe4fd4e5baffba89f
|
refs/heads/master
| 2021-05-05T17:09:05.182197
| 2018-06-15T14:41:57
| 2018-06-15T14:41:57
| 117,344,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
from flask import Flask,render_template,request
from scipy.misc import imsave, imread, imresize
from keras.models import load_model
import numpy as np
import tensorflow as tf
import base64
import cv2
import codecs,re
app = Flask(__name__)
def convertImage(imgData1):
imgstr = re.search(b'base64,(.*)',imgData1).group(1)
#print(imgstr)
with open('output.png','wb') as output:
output.write(base64.b64decode(imgstr))
model = load_model("MNISTCONV4.h5")
model.summary()
graph = tf.get_default_graph()
@app.route('/')
def index():
return render_template("DigitsRecognition.html")
@app.route('/predict/',methods=['GET','POST'])
def PREDICT():
img = request.get_data() #.decode('utf-8')
convertImage (img)
x = imread('output.png',mode='P')
img = np.invert(x)
# img = 255*np.reshape(x,(28,28))
img = img.astype(np.uint8)
_,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
x,y,w,h = cv2.boundingRect(img)
img = img[y:y+h,x:x+w]
r = h-w
if r > 0:
r//=2
h = 0
else:
r = 0
h = -r//2
img = cv2.copyMakeBorder(img,2+h,2+h,r+2,r+2,cv2.BORDER_CONSTANT)
img = imresize(img,(28,28))
kernel = np.ones((3,3),np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.blur(img,(3,3))
x = img.astype(np.float64)
img = 255-x
# x = np.ceil(x/255)
# print(len(x))
imsave('op.png',img)
x = np.reshape(x,(1,28,28,1))
# x = x/255
# x = np.array([np.ceil(x[0])])
# x = np.array([x,])
with graph.as_default():
out = model.predict(x)
# response = np.array_str(np.argmax(out,axis=1))
# print(out)
return np.array2string(out[0])
if __name__ == "__main__":
app.run(debug=True,port=8080)
|
[
"chrajeshkhanna82798@gmail.com"
] |
chrajeshkhanna82798@gmail.com
|
8d920587db820c1cc9ea9d02fb0bc8dfdba4d76b
|
85fbaedbca8013b2af6ca1a7610865cd154d957f
|
/procurement/models.py
|
65e390bae97c629f9c7102a2f05940a9e24589eb
|
[] |
no_license
|
AshikurRahman-sec/Code_Breakers_NSU
|
63be3789f7c161a66e758ecad33948de4ff73702
|
f129963ca8da7fcb11f0ee04903fabaf25db7719
|
refs/heads/master
| 2022-12-22T11:28:38.215571
| 2020-09-24T01:49:30
| 2020-09-24T01:49:30
| 297,012,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
from django.db import models
# Create your models here.
from django.db import models
from django_currentuser.db.models import CurrentUserField
# Create your models here.
class Sales_Manager(models.Model):
user = CurrentUserField()
order_code = models.CharField(max_length=11, blank=True, null=True)
product_name = models.CharField(max_length=11, blank=True, null=True)
product_quantity = models.PositiveIntegerField(blank=True, null=True)
product_picture = models.ImageField(upload_to='image/')
product_colour = models.CharField(max_length=11, blank=True, null=True)
description = models.TextField()
deliverd = models.BooleanField(default=False,blank=True, null=True)
def __str__(self):
return self.order_code
class Order_Status(models.Model):
STATUS_CHOICES = (
('D', 'Done'),
('P', 'Pending'),
)
user = CurrentUserField()
sales_manager = models.OneToOneField(Sales_Manager, on_delete=models.CASCADE,blank=True, null=True, verbose_name="Order Code")
order_recived = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D', verbose_name="Order Received")
raw_materials_collection = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D')
production_stage1 = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D')
production_stage2 = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D')
packing = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D')
shipping = models.CharField(max_length=2, choices=STATUS_CHOICES, blank=False, default='D')
|
[
"ashik129603@gmail.com"
] |
ashik129603@gmail.com
|
1a27b7cb18413d5522bf3d1a3fb9298b4be330c4
|
6810a482759afd585db7bb0b85fd0416f0450e6d
|
/Open Kattis/sibice.py
|
f2d6c7c2fb3679fd80f435ba5541e145a8be4611
|
[] |
no_license
|
BenRStutzman/kattis
|
01b000ac2353c8b8000c6bddec3698f66b0198ef
|
005720f853e7f531a264227d0d9aaa19d4d7cf1b
|
refs/heads/master
| 2020-07-15T23:52:45.785021
| 2019-11-09T03:28:06
| 2019-11-09T03:28:06
| 205,675,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
import sys
import math
[num, box_length, box_width] = [int(num) for num in sys.stdin.readline().split()]
diag = box_length**2 + box_width**2
for i in range(num):
match_length = int(sys.stdin.readline())
if match_length**2 <= diag:
print("DA")
else:
print("NE")
|
[
"tysorx89@gmail.com"
] |
tysorx89@gmail.com
|
9d1544e03e6517060106ba3d8555c94351c4e3c9
|
b5fb45288ed2a204692051ab78e72d8aa6e5accd
|
/argo_data_scripts/util/count_concurrent.py
|
d13a0c91034e0cdcecb1c9531b3779cf08de3da0
|
[
"Apache-2.0"
] |
permissive
|
nithinksath96/MMdetection_TensorRT_FP16
|
d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d
|
c8379b209d4deeff9350baf5bbedfc95fb8941f4
|
refs/heads/master
| 2023-02-13T20:00:21.834541
| 2021-01-06T09:24:20
| 2021-01-06T09:24:20
| 327,260,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
# given rt_ series input, count the max number of concurrent jobs
# current implementation only applies to inf results, where processing starts immediately
import argparse, json, pickle
from os.path import join, isfile
from tqdm import tqdm
import numpy as np
from pycocotools.coco import COCO
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--result-dir', type=str, required=True)
parser.add_argument('--out-dir', type=str, default=None)
parser.add_argument('--type', type=str, default='det')
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
def main():
opts = parse_args()
out_dir = mkdir2(opts.out_dir) if opts.out_dir else opts.result_dir
db = COCO(opts.annot_path)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
n_concurrent = []
for sid, seq in enumerate(tqdm(seqs)):
frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
results = pickle.load(open(join(opts.result_dir, seq + '.pkl'), 'rb'))
# use raw results when possible in case we change class subset during evaluation
if opts.type == 'det':
timestamps = results['timestamps']
input_fidx = results['input_fidx']
else:
det1_timestamps = results['det1_timestamps']
det2_timestamps = results['det2_timestamps']
det1_input_fidx = results['det1_input_fidx']
det2_input_fidx = results['det2_input_fidx']
timestamps = np.concatenate((det1_timestamps, det2_timestamps))
input_fidx = np.concatenate((det1_input_fidx, det2_input_fidx))
t_start = np.asarray(input_fidx)/opts.fps
t_end = np.asarray(timestamps)
t_all = np.concatenate((t_start, t_end))
order = np.argsort(t_all)
n_output = len(t_start)
n_current = 0
max_current = 0
for i in order:
if i < n_output:
# start
n_current += 1
max_current = max(max_current, n_current)
else:
# end
n_current -= 1
n_concurrent.append(max_current)
print(f'Max number of concurrent jobs {max(n_concurrent)}')
out_path = join(out_dir, 'n_concurrent.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump(n_concurrent, open(out_path, 'wb'))
if __name__ == '__main__':
main()
|
[
"nsathish@compute-0-32.local"
] |
nsathish@compute-0-32.local
|
9efc6155568b3d9233b7ce966d249f50faa2e6f5
|
d0fe73d675e7a7a4b1c5ccfbe3fce46f62af4b27
|
/plugin/__init__.py
|
2f5b4348bd8f74b3b4921f1f9c23e71d2c5a9ba4
|
[] |
no_license
|
nguyenvulebinh/OCR-Seq2Seq-Language-Model
|
872f99fc2c161e78669c27976e66f9a0fd5195aa
|
92198cfce04578e794ccc34ae0aacb33f0033bac
|
refs/heads/master
| 2022-06-28T22:51:54.819120
| 2020-05-12T04:01:58
| 2020-05-12T04:01:58
| 188,793,839
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
import plugin.criterions
import plugin.models
import plugin.modules
import plugin.tasks
import plugin.data
|
[
"nguyenvulebinh@gmail.com"
] |
nguyenvulebinh@gmail.com
|
60e9e905c443d1eefe82333a9c6785ebea1e4fd7
|
30a8319e110be273752024ba053fbec9c04556ef
|
/posts/migrations/0001_initial.py
|
267ac7e0a8fd4a33c04c3ec1e515020b4f80cd07
|
[] |
no_license
|
Sighovie/django-blog
|
534a35c31fdc74cfbb177f60d1a8c5e6d3d95308
|
62c6de8ae3d409d45c6ece1a7e90a3913c569760
|
refs/heads/master
| 2020-03-22T04:16:29.185117
| 2018-07-04T12:21:28
| 2018-07-04T12:21:28
| 139,486,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-16 00:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('published_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('views', models.IntegerField(default=0)),
('tag', models.CharField(blank=True, max_length=30, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to=b'img')),
],
),
]
|
[
"ighovie@yahoo.com"
] |
ighovie@yahoo.com
|
f2c1c16ed0e8f4f8e07a11d1f183c67fd64566e0
|
79817c4585739a2dcec53ed92937f050a58e526f
|
/scripts/workflow_batch_nuclear.py
|
b9877571f845b9139d2663fb52958e9ce3e81423
|
[
"MIT"
] |
permissive
|
nickborodinov/simsdata
|
acf47ed508d63065052cea7a999c6cde07a97f49
|
618f9952b8978624c85f4e3a4c080c4ce9f10a1c
|
refs/heads/master
| 2020-07-24T23:26:14.295536
| 2019-09-12T15:28:34
| 2019-09-12T15:28:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 15:26:08 2016
@author: iv1
"""
import datetime
#%%This code won’t work from Python console
#!!! Clean import procedure with __init__.py file etc.
from SIMSmodel import SIMSmodel
from SIMSdata import SIMSdata
verbose = True
#%%
#print('start time = %s'%(str(timestamps['start'])))
if __name__ == '__main__':
cores = 8
#Path to file and prefix
path = r'/home/cades/data/good_brain/SIMS'
prefix_list = ['rubber e (rubber e)-#1 of 2-(-)', 'rubber e (rubber e)-#1 of 2-(+)', 'rubber f (rubber f)-#1 of 2-(-)', 'rubber f (rubber f)-#1 of 2-(+)']
for prefix in prefix_list:
#Initialize converter class
if verbose:
print('Initializing data handler...')
sims_data = SIMSdata()
#sims_data.load_h5(h5_path)
#Load SIMS measurement data from raw datafile
if verbose:
print('Loading raw data...')
sims_data.load_raw(path, prefix, nuke=True)
#Preparing SIMS conversion model
if verbose:
print('Intiailizing data converter...')
model = SIMSmodel(xy_bins=1, z_bins=3, counts_threshold=1000000, tof_resolution=64) #Minimal overhead
model.convert_all_peaks()
#model.enable_shift_correction()
if verbose:
print('Converting data...')
data2d = sims_data.convert_data('all', model, cores)
# if verbose:
# print('Loading converted data...')
# sims_data.load_converted_data(1)
# if verbose:
# print('Calculating PCA...')
# sims_data.PCA(comp_num=20, spatial_range=(slice(30), -1, -1))
# if verbose:
# print('Calculating NMF...')
# sims_data.NMF(20)
#Plotting averaged data
#sims_data.plot_ave_data()
if verbose:
print('Finished converting, closing model.')
sims_data.close()
|
[
"ni1@ornl.gov"
] |
ni1@ornl.gov
|
a5c96eee31895f0ff5c1ced0b9c2665be4c5955b
|
74c40d538ed19e5ab5a3859c000dd89c5d13719e
|
/python/problem12.py
|
478b984d9aa3570d47505a163832014f616e8c4a
|
[] |
no_license
|
ataylor89/projecteuler
|
1fb8489d7526a00fb29cd1536e13ba0d2db65716
|
6dd84c25bec2454def48c3de41a55452e9b692e8
|
refs/heads/master
| 2021-01-19T06:59:00.002284
| 2020-01-14T02:29:07
| 2020-01-14T02:29:07
| 31,574,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
#!/usr/bin/python
import math
def num_divisors(n):
num = 0
m = int(math.sqrt(n))
for x in range(1, m):
if n % x == 0:
num += 2
return num
t = 0
incr = 1
found = False
while not found:
t += incr
incr += 1
if num_divisors(t) > 500:
found = True
print t
|
[
"rolyata47@gmail.com"
] |
rolyata47@gmail.com
|
cb808db14099e13ce15d190e7cae0d30f64b264c
|
cfd7d6fca2d547d0bee617ed22ffcb31a655cc43
|
/machine-learning/AdaBoost.py
|
5b6fd7cb95cb6b75d79cf5dcc3b485e346f40bac
|
[] |
no_license
|
AnonymousMB/Machine-Learning-in-action
|
cada512dbecf87c2bd74eb4d3a6a085579e9fc32
|
d1e0b1dc8be5e959d62e563bf54e3851aa5ae7d9
|
refs/heads/master
| 2020-03-19T15:56:37.695438
| 2017-08-14T03:29:09
| 2017-08-14T03:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,073
|
py
|
# coding=utf-8
from numpy import *
from matplotlib import pyplot as plt
import math
class Data():
total = None # 样本总数
featureNum = None # 特征数量
def __init__(self, weight, num, feature, label):
self.weight = weight # 权值
self.num = num # 序号
self.feature = feature # 输入特征
self.label = label # 类别标签
self.tempWeight = None
class Tree():
def __init__(self):
self.leftLabel = None
self.rightLabel = None
# 左右标签
self.weight = None
# 分类器权重
self.splitFeatureValue = None
# 用于划分的值
self.splitFeatureOrder = None
# 用于划分的属性序号
self.err = None
# 基学习器在训练数据集上的分类误差率
def classifier(self, feature):
# 基学习器的分类器
"""
:type feature: matrix
"""
value = feature[0, self.splitFeatureOrder]
if value > self.splitFeatureValue:
return self.rightLabel
else:
return self.leftLabel
class Classifier():
def __init__(self, treeSet):
self.treeSet = treeSet
def classify(self, feature):
sumResult = 0
for tree in self.treeSet:
value = tree.classifier(feature)
sumResult += tree.weight * value
return sumResult
def loadDataSet(path):
fr = open(path, 'r')
dataSet = []
labelSet = []
for line in fr.readlines():
lineArr = line.strip().split()
dataSet.append(lineArr[0:-1])
labelSet.append(lineArr[-1])
dataMat = mat(dataSet, float)
labelMat = mat(labelSet, float)
count = 0
m, n = shape(dataMat)
Data.featureNum = n
Data.total = m
dataSet = []
weight = float(1.0 / m)
# 初始化权值分布
for count in range(m):
if labelMat[0, count] != 1:
labelMat[0, count] = -1
feature = dataMat[count, :]
data = Data(weight, count, feature, labelMat[0, count])
dataSet.append(data)
count += 1
dataSet = mat(dataSet)
return dataSet
# dataSet是一个 1*(训练样本数)的矩阵
def loadSimpleData():
path1 = r'data/plrx/simpleTest.txt'
path2 = r'data/mlia/Ch06/testSetRBF.txt'
return loadDataSet(path2)
def chooseSplitValue(dataSet, featureOrder):
# 选择划分值
labelList = []
i = 0
err = inf
# 初始化误差为正无穷
leftLabel = 0
rightLabel = 0
# 初始化左标签和右标签
featureInf = {"err": 0, "order": 0, "value": 0, "labelList": None}
# 初始化划分值
for i in range(2):
# 分别检测左标签为-1右标签为1,左标签为1右标签为-1两种情况
leftLabelTemp = float(-1 + i * 2)
rightLabelTemp = -leftLabelTemp
dataNum = 0
for dataNum in range(Data.total):
# 将每个样本的特征值作为划分值计算误差
count = 0
left = set()
right = set()
for count in range(Data.total):
# 根据dataNum对应样本在对应属性上的对应值划分左右集合,集合内容为样本序号
data = dataSet[0, count]
if data.feature[0, featureOrder] <= dataSet[0, dataNum].feature[0, featureOrder]:
left.add(data.num)
else:
right.add(data.num)
count += 1
leftErr = 0
count = 0
for count in left:
# 计算左子集误差和
data = dataSet[0, count]
if data.label != leftLabelTemp:
leftErr += data.weight
count += 1
count = 0
rightErr = 0
for count in right:
# 计算右子集误差和
data = dataSet[0, count]
if data.label != rightLabelTemp:
rightErr += data.weight
count += 1
if leftErr + rightErr < err:
# 如果当前误差小于最小误差
err = leftErr + rightErr
leftLabel = leftLabelTemp
rightLabel = rightLabelTemp
featureInf['value'] = dataSet[0, dataNum].feature[0, featureOrder]
dataNum += 1
i += 1
labelList.append(leftLabel)
labelList.append(rightLabel)
featureInf['err'] = err
featureInf['order'] = featureOrder
featureInf['labelList'] = labelList
return featureInf
def chooseFeature(dataSet):
# 挑选划分属性
stdList = []
count = 0
err = inf
featureInf = None
for count in range(Data.featureNum):
featureInfTemp = chooseSplitValue(dataSet, count)
if featureInfTemp['err'] < err:
err = featureInfTemp['err']
featureInf = featureInfTemp
count += 1
return featureInf
def treeGenerate(dataSet):
# 生成决策树
tree = Tree()
featureInf = chooseFeature(dataSet)
print featureInf
labelList = featureInf['labelList']
tree.leftLabel = labelList[0]
tree.rightLabel = labelList[1]
if featureInf['err'] != 0:
tree.weight = 0.5 * math.log((1.0 - featureInf['err']) / featureInf['err'])
else:
tree.weight = 1
tree.splitFeatureOrder = featureInf['order']
tree.splitFeatureValue = featureInf['value']
tree.err = featureInf['err']
return tree
def updateWeight(dataSet, tree):
i = 0
Z = 0
for i in range(Data.total):
data = dataSet[0, i]
Z += data.weight * exp(-tree.weight * data.label * tree.classifier(data.feature))
i += 1
i = 0
for i in range(Data.total):
data = dataSet[0, i]
data.weight = data.weight * exp(-tree.weight * data.label * tree.classifier(data.feature)) / Z
i += 1
return dataSet
def terminate(classifier, dataSet, minErr):
total = 0
correctCount = 0
for i in range(Data.total):
data = dataSet[0, i]
value = classifier.classify(data.feature)
if value >= 0:
value = 1
else:
value = -1
if value == data.label:
correctCount += 1
total += 1
correctRate = float(correctCount) / total
print "迭代完成,正确率为", correctRate
if 1 - correctRate <= minErr:
return False
else:
return True
def adaBoost(dataSet, iterNum, minErr):
# AdaBoost主体函数
count = 0
classifier = None
treeSet = set()
flag = True
# 运行条件初始化为真
while (count < iterNum and flag is True):
tree = treeGenerate(dataSet)
# 训练一个决策树桩
dataSet = updateWeight(dataSet, tree)
# 更新权值分布
treeSet.add(tree)
# 添加一棵树进树集合
classifier = Classifier(treeSet)
# 构造分类器
flag = terminate(classifier, dataSet, minErr)
# 检测停机条件
count += 1
if flag is True:
print "迭代超过最大次数,终止迭代并返回分类器"
return classifier
def draw(dataSet, classifier):
# 绘图函数
fig, ax = plt.subplots(1, 1)
i = 0
for i in range(Data.total):
data = dataSet[0, i]
if data.label != 1:
ax.scatter(data.feature[0, 0], data.feature[0, 1], s=20, marker='o', c='g')
else:
ax.scatter(data.feature[0, 0], data.feature[0, 1], s=20, marker='o', c='b')
i += 1
for tree in classifier.treeSet:
axis = tree.splitFeatureOrder
value = tree.splitFeatureValue
otherAxis = arange(-1, 1, 0.1)
if axis == 0:
value = otherAxis * 0 + value + 0.1
ax.plot(value, otherAxis)
else:
value = otherAxis * 0 + value + 0.1
ax.plot(otherAxis, value)
plt.show()
dataSet = loadSimpleData()
classifier = adaBoost(dataSet, 100, 0)
draw(dataSet, classifier)
|
[
"1304137697@qq.com"
] |
1304137697@qq.com
|
a27806e252e67dc407b440d4781e0d23bf86fc34
|
f3827ae39e077daf5507959a13d1ac4a782fe084
|
/src/accounts/urls.py
|
c29e329060e8daed823e775738b48945589f62da
|
[] |
no_license
|
achiengcindy/ExtendingDjangoAuth
|
c6bc2c5360d90378d7d96efb3132506ad10349d9
|
19214ef7ef9ccdcc66e4ec15fa9e22e5fd5e24f3
|
refs/heads/master
| 2020-03-26T20:09:34.687774
| 2018-08-28T19:32:46
| 2018-08-28T19:32:46
| 145,308,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from django.urls import path
from .views import register, home, edit, activate, account_activation_sent
urlpatterns = [
path('', home, name='home'),
path('register/', register, name='register'),
path('edit/', edit, name='edit'),
path('activate/<slug:uidb64>/<slug:token>)/', activate, name='activate'),
path('account_activation_sent/', account_activation_sent, name='account_activation_sent')
]
|
[
"achiengcindy36@gmail.com"
] |
achiengcindy36@gmail.com
|
702fb4c5e14061b0357f79b59aab9e34ee74c5ae
|
e708ae6207ca775fe6f5106ea971c994b29ffac0
|
/examples/shrec17/dataset.py
|
a5c07eee3fcfc1e87c9f3610656ea664095326d1
|
[
"MIT"
] |
permissive
|
arewellborn/s2cnn
|
6f4304cd22456bfaefe7623e53ac7b8afda22471
|
fd734b845f1d313ce13a6f1161c8fa383b28a9e5
|
refs/heads/master
| 2021-05-23T14:24:31.276613
| 2021-01-20T04:37:53
| 2021-01-20T04:37:53
| 253,338,038
| 0
| 0
|
MIT
| 2020-04-05T21:43:51
| 2020-04-05T21:43:51
| null |
UTF-8
|
Python
| false
| false
| 11,432
|
py
|
# pylint: disable=E1101,R,C
import csv
import glob
import os
import re
import numpy as np
import torch
import torch.utils.data
import trimesh
import logging
logging.getLogger('pyembree').disabled = True
def rotmat(a, b, c, hom_coord=False): # apply to mesh using mesh.apply_transform(rotmat(a,b,c, True))
"""
Create a rotation matrix with an optional fourth homogeneous coordinate
:param a, b, c: ZYZ-Euler angles
"""
def z(a):
return np.array([[np.cos(a), np.sin(a), 0, 0],
[-np.sin(a), np.cos(a), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def y(a):
return np.array([[np.cos(a), 0, np.sin(a), 0],
[0, 1, 0, 0],
[-np.sin(a), 0, np.cos(a), 0],
[0, 0, 0, 1]])
r = z(a).dot(y(b)).dot(z(c)) # pylint: disable=E1101
if hom_coord:
return r
else:
return r[:3, :3]
def make_sgrid(b, alpha, beta, gamma):
from lie_learn.spaces import S2
theta, phi = S2.meshgrid(b=b, grid_type='SOFT')
sgrid = S2.change_coordinates(np.c_[theta[..., None], phi[..., None]], p_from='S', p_to='C')
sgrid = sgrid.reshape((-1, 3))
R = rotmat(alpha, beta, gamma, hom_coord=False)
sgrid = np.einsum('ij,nj->ni', R, sgrid)
return sgrid
def render_model(mesh, sgrid):
# Cast rays
# triangle_indices = mesh.ray.intersects_first(ray_origins=sgrid, ray_directions=-sgrid)
index_tri, index_ray, loc = mesh.ray.intersects_id(
ray_origins=sgrid, ray_directions=-sgrid, multiple_hits=False, return_locations=True)
loc = loc.reshape((-1, 3)) # fix bug if loc is empty
# Each ray is in 1-to-1 correspondence with a grid point. Find the position of these points
grid_hits = sgrid[index_ray]
grid_hits_normalized = grid_hits / np.linalg.norm(grid_hits, axis=1, keepdims=True)
# Compute the distance from the grid points to the intersection pionts
dist = np.linalg.norm(grid_hits - loc, axis=-1)
# For each intersection, look up the normal of the triangle that was hit
normals = mesh.face_normals[index_tri]
normalized_normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Construct spherical images
dist_im = np.ones(sgrid.shape[0])
dist_im[index_ray] = dist
# dist_im = dist_im.reshape(theta.shape)
# shaded_im = np.zeros(sgrid.shape[0])
# shaded_im[index_ray] = normals.dot(light_dir)
# shaded_im = shaded_im.reshape(theta.shape) + 0.4
n_dot_ray_im = np.zeros(sgrid.shape[0])
# n_dot_ray_im[index_ray] = np.abs(np.einsum("ij,ij->i", normals, grid_hits_normalized))
n_dot_ray_im[index_ray] = np.einsum("ij,ij->i", normalized_normals, grid_hits_normalized)
nx, ny, nz = normalized_normals[:, 0], normalized_normals[:, 1], normalized_normals[:, 2]
gx, gy, gz = grid_hits_normalized[:, 0], grid_hits_normalized[:, 1], grid_hits_normalized[:, 2]
wedge_norm = np.sqrt((nx * gy - ny * gx) ** 2 + (nx * gz - nz * gx) ** 2 + (ny * gz - nz * gy) ** 2)
n_wedge_ray_im = np.zeros(sgrid.shape[0])
n_wedge_ray_im[index_ray] = wedge_norm
# Combine channels to construct final image
# im = dist_im.reshape((1,) + dist_im.shape)
im = np.stack((dist_im, n_dot_ray_im, n_wedge_ray_im), axis=0)
return im
def rnd_rot():
a = np.random.rand() * 2 * np.pi
z = np.random.rand() * 2 - 1
c = np.random.rand() * 2 * np.pi
rot = rotmat(a, np.arccos(z), c, True)
return rot
class ToMesh:
def __init__(self, random_rotations=False, random_translation=0):
self.rot = random_rotations
self.tr = random_translation
def __call__(self, path):
mesh = trimesh.load_mesh(path)
mesh.remove_degenerate_faces()
mesh.fix_normals()
mesh.fill_holes()
mesh.remove_duplicate_faces()
mesh.remove_infinite_values()
mesh.remove_unreferenced_vertices()
mesh.apply_translation(-mesh.centroid)
r = np.max(np.linalg.norm(mesh.vertices, axis=-1))
mesh.apply_scale(1 / r)
if self.tr > 0:
tr = np.random.rand() * self.tr
rot = rnd_rot()
mesh.apply_transform(rot)
mesh.apply_translation([tr, 0, 0])
if not self.rot:
mesh.apply_transform(rot.T)
if self.rot:
mesh.apply_transform(rnd_rot())
r = np.max(np.linalg.norm(mesh.vertices, axis=-1))
mesh.apply_scale(0.99 / r)
return mesh
def __repr__(self):
return self.__class__.__name__ + '(rotation={0}, translation={1})'.format(self.rot, self.tr)
class ProjectOnSphere:
def __init__(self, bandwidth):
self.bandwidth = bandwidth
self.sgrid = make_sgrid(bandwidth, alpha=0, beta=0, gamma=0)
def __call__(self, mesh):
im = render_model(mesh, self.sgrid)
im = im.reshape(3, 2 * self.bandwidth, 2 * self.bandwidth)
from scipy.spatial.qhull import QhullError # pylint: disable=E0611
try:
convex_hull = mesh.convex_hull
except QhullError:
convex_hull = mesh
hull_im = render_model(convex_hull, self.sgrid)
hull_im = hull_im.reshape(3, 2 * self.bandwidth, 2 * self.bandwidth)
im = np.concatenate([im, hull_im], axis=0)
assert len(im) == 6
im[0] -= 0.75
im[0] /= 0.26
im[1] -= 0.59
im[1] /= 0.50
im[2] -= 0.54
im[2] /= 0.29
im[3] -= 0.52
im[3] /= 0.19
im[4] -= 0.80
im[4] /= 0.18
im[5] -= 0.51
im[5] /= 0.25
im = im.astype(np.float32) # pylint: disable=E1101
return im
def __repr__(self):
return self.__class__.__name__ + '(bandwidth={0})'.format(self.bandwidth)
class CacheNPY:
def __init__(self, prefix, repeat, transform, pick_randomly=True):
self.transform = transform
self.prefix = prefix
self.repeat = repeat
self.pick_randomly = pick_randomly
def check_trans(self, file_path):
print("transform {}...".format(file_path))
try:
return self.transform(file_path)
except:
print("Exception during transform of {}".format(file_path))
raise
def __call__(self, file_path):
head, tail = os.path.split(file_path)
root, _ = os.path.splitext(tail)
npy_path = os.path.join(head, self.prefix + root + '_{0}.npy')
exists = [os.path.exists(npy_path.format(i)) for i in range(self.repeat)]
if self.pick_randomly and all(exists):
i = np.random.randint(self.repeat)
try: return np.load(npy_path.format(i))
except OSError: exists[i] = False
if self.pick_randomly:
img = self.check_trans(file_path)
np.save(npy_path.format(exists.index(False)), img)
return img
output = []
for i in range(self.repeat):
try:
img = np.load(npy_path.format(i))
except (OSError, FileNotFoundError):
img = self.check_trans(file_path)
np.save(npy_path.format(i), img)
output.append(img)
return output
def __repr__(self):
return self.__class__.__name__ + '(prefix={0}, transform={1})'.format(self.prefix, self.transform)
class Shrec17(torch.utils.data.Dataset):
'''
Download SHREC17 and output valid obj files content
'''
url_data = 'http://3dvision.princeton.edu/ms/shrec17-data/{}.zip'
url_label = 'http://3dvision.princeton.edu/ms/shrec17-data/{}.csv'
def __init__(self, root, dataset, perturbed=True, download=False, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
if not dataset in ["train", "test", "val"]:
raise ValueError("Invalid dataset")
self.dir = os.path.join(self.root, dataset + ("_perturbed" if perturbed else ""))
self.transform = transform
self.target_transform = target_transform
if download:
self.download(dataset, perturbed)
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
self.files = sorted(glob.glob(os.path.join(self.dir, '*.obj')))
if dataset != "test":
with open(os.path.join(self.root, dataset + ".csv"), 'rt') as f:
reader = csv.reader(f)
self.labels = {}
for row in [x for x in reader][1:]:
self.labels[row[0]] = (row[1], row[2])
else:
self.labels = None
def __getitem__(self, index):
img = f = self.files[index]
if self.transform is not None:
img = self.transform(img)
if self.labels is not None:
i = os.path.splitext(os.path.basename(f))[0]
target = self.labels[i]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
else:
return img
def __len__(self):
return len(self.files)
def _check_exists(self):
files = glob.glob(os.path.join(self.dir, "*.obj"))
return len(files) > 0
def _download(self, url):
import requests
filename = url.split('/')[-1]
file_path = os.path.join(self.root, filename)
if os.path.exists(file_path):
return file_path
print('Downloading ' + url)
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return file_path
def _unzip(self, file_path):
import zipfile
if os.path.exists(self.dir):
return
print('Unzip ' + file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(self.root)
zip_ref.close()
os.unlink(file_path)
def _fix(self):
print("Fix obj files")
r = re.compile(r'f (\d+)[/\d]* (\d+)[/\d]* (\d+)[/\d]*')
path = os.path.join(self.dir, "*.obj")
files = sorted(glob.glob(path))
c = 0
for i, f in enumerate(files):
with open(f, "rt") as x:
y = x.read()
yy = r.sub(r"f \1 \2 \3", y)
if y != yy:
c += 1
with open(f, "wt") as x:
x.write(yy)
print("{}/{} {} fixed ".format(i + 1, len(files), c), end="\r")
def download(self, dataset, perturbed):
if self._check_exists():
return
# download files
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == os.errno.EEXIST:
pass
else:
raise
url = self.url_data.format(dataset + ("_perturbed" if perturbed else ""))
file_path = self._download(url)
self._unzip(file_path)
self._fix()
if dataset != "test":
url = self.url_label.format(dataset)
self._download(url)
print('Done!')
|
[
"geiger.mario@gmail.com"
] |
geiger.mario@gmail.com
|
d8832437a0ce7966d9012653aa4dfd7f4afcf599
|
647d4205fdf7e576f51b7b78e1a7a1eb757bfe0a
|
/config-template.py
|
368fc1b3820ddaa269eecda6facba2dc27dbdb1c
|
[
"MIT"
] |
permissive
|
starspritechippy/dino-crawler
|
7bcde25ec90ab6430ace167ab87bdec80054872f
|
b8b6fdcacaaacd71b8e114b5ae352418f5f12f5d
|
refs/heads/main
| 2023-07-18T08:24:38.546225
| 2021-08-23T18:45:01
| 2021-08-23T18:45:01
| 399,184,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
webhook_url = "https://discord.com/api/webhooks/<webhook ID>/<webhook token>"
last_comic = "https://dinosandcomics.com/wp-content/uploads/2021/07/664-768x768.png"
|
[
"maxhey4@gmail.com"
] |
maxhey4@gmail.com
|
a64677e2553fb31f8afef9181eea2a53a382be70
|
50be66fb32a6a0a7ac2575ff801bfa3dbe229f7f
|
/res/frmMainConfiguration.py
|
9918dc5f1442a2bf013700ad5343085b585d4d6b
|
[] |
no_license
|
Neurobotics/PyCorder
|
dab6005e191709248017f85d07d5cb6618b5c744
|
cac419156ce556a224c5bfed65b61247340c1cc1
|
refs/heads/main
| 2023-06-09T23:34:09.850604
| 2023-06-06T11:44:46
| 2023-06-06T11:44:46
| 650,094,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'frmMainConfiguration.ui'
#
# Created: Wed Jun 05 12:00:50 2013
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_frmConfiguration(object):
def setupUi(self, frmConfiguration):
frmConfiguration.setObjectName("frmConfiguration")
frmConfiguration.setWindowModality(QtCore.Qt.ApplicationModal)
frmConfiguration.resize(861, 743)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/process.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
frmConfiguration.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(frmConfiguration)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtGui.QTabWidget(frmConfiguration)
self.tabWidget.setObjectName("tabWidget")
self.tab1 = QtGui.QWidget()
self.tab1.setObjectName("tab1")
self.gridLayout1 = QtGui.QGridLayout(self.tab1)
self.gridLayout1.setObjectName("gridLayout1")
self.tabWidget.addTab(self.tab1, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(frmConfiguration)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(frmConfiguration)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), frmConfiguration.accept)
QtCore.QMetaObject.connectSlotsByName(frmConfiguration)
def retranslateUi(self, frmConfiguration):
frmConfiguration.setWindowTitle(QtGui.QApplication.translate("frmConfiguration", "Configuration", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab1), QtGui.QApplication.translate("frmConfiguration", "Tab 1", None, QtGui.QApplication.UnicodeUTF8))
import resources_rc
|
[
"a.belokopytov@neurobotics.ru"
] |
a.belokopytov@neurobotics.ru
|
893e8fe174a50abd522cf3c9b3ee2d02621afd1a
|
bc611aa0e9e41d764135c54a99f8dc2ef4889a6b
|
/17.knights/cavalry.py
|
817be18274a43c9a1e6f1bfeec124ca811237c33
|
[] |
no_license
|
Albert13-Cao/AMT
|
0f691ac28d2c1f2704776de029664644300ac23a
|
4b5265b0dcd5947885bb57928c90405204be009f
|
refs/heads/main
| 2023-06-25T22:28:10.185340
| 2021-07-23T12:04:31
| 2021-07-23T12:04:31
| 384,116,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
#1. read all lines into a dict. dict key = number, value = repetitionive times
with open("cavalryin.txt") as cavalryin:
current_line = 0
troops = 0
cavalry_dict = {}
for line in cavalryin:
current_line += 1
if current_line == 1:
troops = int(line)
else:
try:
cavalry_dict[int(line)] += 1
except KeyError:
cavalry_dict[int(line)] = 1
#2. loop this dict, divided key / value, if all remainder == 0, then it is YES, else No.
result = "YES"
for key, value in cavalry_dict.items():
if value % key != 0:
result = "NO"
break
#3. Save result into a output file
with open("cavalryout.txt", "w") as cavalryout:
cavalryout.write(result)
|
[
"84270297+Albert13-Cao@users.noreply.github.com"
] |
84270297+Albert13-Cao@users.noreply.github.com
|
8a8977321a0f440069acd184f18d33fa63785950
|
8385ac9da3f7802877957ac03724558fb51cef91
|
/map-marketingcloud-worker/venv/lib/python3.8/site-packages/kasasa_common/aws/s3/__init__.py
|
e2654b34999491263f674cabb53db3db173b92d3
|
[] |
no_license
|
databar-team/nbc
|
eb0487db6a9b9adcd16e79db11f3cfea0624bc60
|
7076953c0014a540e379ca4d239ca4d87d9d709e
|
refs/heads/master
| 2023-03-26T16:48:18.527998
| 2020-06-09T16:51:17
| 2020-06-09T16:51:17
| 261,843,904
| 0
| 1
| null | 2021-03-25T23:49:54
| 2020-05-06T18:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 117
|
py
|
from .s3_helpers import get_s3, get_bucket_and_key, get_secrets
__all__ = [get_s3, get_bucket_and_key, get_secrets]
|
[
"Vladimir.Zivkovic@nbcuni.com"
] |
Vladimir.Zivkovic@nbcuni.com
|
688b268d42bcb22b3788bb577d61ca6e4f601485
|
f7a9c306b07ad116f3a5d9826725702156e5e9a9
|
/libinit.py
|
6ee14571f8420f6f3ee256b374441fcbd92c6924
|
[] |
no_license
|
Leocodefocus/pylib
|
43c76fa2ca0747ccfa65a87d9688c9d5484a10d1
|
71ea60faf3bbcd8bc2cc1333e30cb7f0b9834c30
|
refs/heads/master
| 2021-01-15T12:02:16.646120
| 2017-11-21T09:42:24
| 2017-11-21T09:42:24
| 99,640,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
#!/usr/bin/python
#!-*-coding:utf-8-*-
def init_cve():
item={}
item['vul_id']=''
item['type']=''
class logs:
def __init__(self):
self.logs = {}
self.init_log()
self.sql_ = "insert into actionlogs(ymdtime,vulname,action,starttime,endtime,ps,exception,excpinfo) values('{}','{}','{}','{}','{}','{}','{}','{}')"
def init_log(self):
self.logs['ymdtime'] = ''
self.logs['vulname'] = ''
self.logs["action"] = ''
self.logs['starttime'] = ''
self.logs['endtime'] = ''
self.logs['ps'] = ''
self.logs['exception'] = ''
self.logs['excpinfo'] = ''
def set_log(self,time,vulname,action,starttime,endtime,ps,exception,excpinfo):
self.logs['ymdtime'] = time
self.logs['vulname'] = vulname
self.logs["action"] = action
self.logs['starttime'] = starttime
self.logs['endtime'] = endtime
self.logs['ps'] = ps
self.logs['exception'] = exception
self.logs['excpinfo'] = excpinfo
def get_sql(self):
sql_cent = self.sql_.format(self.logs['ymdtime'].replace("'","\""),self.logs['vulname'].replace("'","\""),self.logs['action'].replace("'","\""),self.logs['starttime'].replace("'","\""),self.logs['endtime'].replace("'","\""),self.logs['ps'].replace("'","\""),self.logs['exception'].replace("'","\""),self.logs['excpinfo'].replace("'","\""))
return sql_cent
class nvd_sql():
def __init__(self):
self.vuln_insert = "insert into vuln_nvd(cve_id,vuln_desc,vuln_score,cwe_id,last_modified_time,published_time,nvd_url,create_user,create_time,update_user,update_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
self.vuln_cvss = "insert into vuln_nvd_cvss(cve_id,score,av_en,ac_en,au_en,c_en,i_en,a_en,source,generated_on_time,create_user,create_time)values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
self.vuln_refer = "insert into vuln_nvd_refer(cve_id,refer_type,source,content,link,create_user,create_time)values('%s','%s','%s','%s','%s','%s','%s')"
self.vuln_tag = "insert into vuln_nvd_tag(cve_id,tag_id,create_user,create_time)values('%s','%s','%s','%s')"
|
[
"noreply@github.com"
] |
Leocodefocus.noreply@github.com
|
90528c4992035d806fe712ac226293dbf241b7b2
|
0ad9bff18e87f4b65cf78c3ca1c3de3d283c12f9
|
/pygame_basic/2_background.py
|
2fd0219ea29b36429449ad865fee13e1aef3aa05
|
[] |
no_license
|
ggpp0909/practice
|
3fe80a8c1b276718c98c6d832ff694bbb8bca2d2
|
3208a44ef6e66a4e78c221e87d1a539e36cf99d6
|
refs/heads/main
| 2023-06-02T17:35:43.859266
| 2021-06-18T16:18:30
| 2021-06-18T16:18:30
| 367,926,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
import pygame
pygame.init() #초기화 (반드시필요)
#화면 크기 설정
screen_width = 480 #가로크기
screen_height = 640 #가로크기
screen = pygame.display.set_mode((screen_width,screen_height))
#화면 타이틀 설정
pygame.display.set_caption("JYN") #게임이름
#배경 이미지 불러오기
background = pygame.image.load("C:/Users/Administrator/Desktop/python workspace/pygame_basic/background.png")
#이벤트 루프
running = True #게임이 진행중인가?
while running :
for event in pygame.event.get(): #어떤 이벤트가 발생하였는가?
if event.type == pygame.QUIT: #창이 닫히는 이벤트가 발생하였는가?
running = False #게임이 진행중이 아님
screen.blit(background,(0,0)) #배경그리기
#screen.fill((R,G,B)) 로 그림안불러오고 RGB값으로 칠할수도 있음
pygame.display.update() #게임화면을 다시그리기!
# pygame 종료
pygame.quit()
|
[
"noreply@github.com"
] |
ggpp0909.noreply@github.com
|
93f62cf5f45ff291e916702ee1bf494550bd12fe
|
bcfbec72fc84b618adcab3aa15e4cff281668286
|
/crypto_track/settings.py
|
afe88c3075ccd194b6fd4197d8d064498b8eb054
|
[] |
no_license
|
anshuUnity/crypto-price-tracking-app
|
93c93f9b52d88598a234e1946b5e1173b8fc4c82
|
c34af627b792ae6b645b11c643074151777ac4f5
|
refs/heads/main
| 2023-08-29T03:20:33.879980
| 2021-10-18T13:55:13
| 2021-10-18T13:55:13
| 376,506,134
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
"""
Django settings for crypto_track project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-q*_x1!z+2tdu=qv1gah+)$4opx$a=@a-d*20#i)bkeddh5k-td'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crypto'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crypto_track.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ASGI_APPLICATION = 'crypto_track.asgi.application'
# LEARN CHANNELS
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
},
},
}
# Databas
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# CELERY TASK STUFF
BROKER_URL = 'redis://127.0.0.1:6379'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'UTC'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"50448283+Ankit2099@users.noreply.github.com"
] |
50448283+Ankit2099@users.noreply.github.com
|
dccd970756dc1a92c51f3b0d83dbe879beba3315
|
0f708fc31d7fa92cbbf02696cf911e9f7a409aef
|
/build/lib/TEF/auto_set_dtypes.py
|
4dd124f8e3f74f0e4e610040a227b824a0761e8d
|
[
"MIT"
] |
permissive
|
tll549/TEF
|
6af08d72ec0eaa3443986aab1d724d6d60b9799f
|
8dc50b0d0966b782b04fcd9fc846c0756820782f
|
refs/heads/master
| 2020-06-30T06:34:24.257028
| 2019-09-17T04:16:56
| 2019-09-17T04:16:56
| 200,756,742
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,342
|
py
|
import numpy as np
import pandas as pd
import re
import io
def convert_column_list(columns, idx_or_labels, to_idx_only=False, to_label_only=False):
# assert sum([idx is not None, labels is not None]) == 1, 'should only provide either idx or labels'
# if idx is not None:
# return [columns[i] for i in idx]
# elif labels is not None:
# return [np.where(columns==cn)[0][0] for cn in labels]
o = []
for i in idx_or_labels:
# int -> str
if isinstance(i, int):
if not to_idx_only:
o.append(columns[i])
else:
o.append(i)
# str -> int
elif isinstance(i, str):
if not to_label_only:
o.append(np.where(columns==i)[0][0])
else:
o.append(i)
return o
def auto_set_dtypes(df, max_lev=10,
set_datetime=[], set_category=[], set_int=[], set_float=[], set_object=[], set_bool=[],
set_datetime_by_pattern=r'\d{4}-\d{2}-\d{2}',
verbose=1):
df = df.copy() # need this or will change original df
if verbose:
record = pd.DataFrame({'before': df.dtypes}).transpose()
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
dtypes_before = s.split('\n')[-3]
for c in range(df.shape[1]): # for every cols
cur = df.iloc[:, c]
if c in set_object or c in convert_column_list(df.columns, set_object, to_idx_only=True):
df.iloc[:, c] = df.iloc[:, c].astype('object') # don't know why if its already datetime it doesn't change
elif c in set_datetime or c in convert_column_list(df.columns, set_datetime, to_idx_only=True):
df.iloc[:, c] = pd.to_datetime(cur, errors='coerce')
elif c in set_bool or c in convert_column_list(df.columns, set_bool, to_idx_only=True):
df.iloc[:, c] = cur.astype('bool')
elif c in set_category or c in convert_column_list(df.columns, set_category, to_idx_only=True):
df.iloc[:, c] = cur.astype('category')
elif c in set_int or c in convert_column_list(df.columns, set_int, to_idx_only=True):
df.iloc[:, c] = cur.astype('Int64') # use 'Int64' instead of int to ignore nan, can't handle object type
elif c in set_float or c in convert_column_list(df.columns, set_float, to_idx_only=True):
df.iloc[:, c] = cur.astype(float)
else:
if set_datetime_by_pattern:
if sum(cur.notnull()) > 0: # not all are null
fisrt_possible_date = cur[cur.notnull()].iloc[0] # use the first not null
if re.match(set_datetime_by_pattern, str(fisrt_possible_date)):
df.iloc[:, c] = pd.to_datetime(cur, errors='coerce')
continue
if set(cur.unique().tolist()) == set([False, True]):
# 0, 1 will becomes bool here
df.iloc[:, c] = cur.astype('bool')
elif len(cur.unique()) <= max_lev and cur.dtype.name == 'object': # only change to category from object, in case changing for example int (2, 3, 4, 5) to category
df.iloc[:, c] = cur.astype('category')
if verbose:
if verbose == 'summary' or verbose == 1:
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
dtypes_after = s.split('\n')[-3]
print('before', dtypes_before)
print('after ', dtypes_after)
elif verbose == 'detailed' or verbose >= 2:
record = record.append(pd.DataFrame({'after': df.dtypes}).transpose(), sort=False)
record.loc['idx'] = list(range(df.shape[1]))
record = record.append(df.sample(3), sort=False)
pd.set_option('display.max_columns', record.shape[1])
print(record)
# check possible id cols and category
if verbose != 0:
l = df.columns.tolist()
check_list = ['id', 'key', 'number']
ignore_list = ['bid', 'accident']
possible_id_list = [c for c in range(len(l)) if
any([x in l[c].lower() for x in check_list]) and
all([x not in l[c].lower() for x in ignore_list]) and
df.iloc[:, c].dtype.name != 'object' and # ignore current object
df.iloc[:, c].nunique() / df.iloc[:, c].notnull().sum() > 0.5] # number of unique should be high enough
if len(possible_id_list) > 0:
print()
print(f'possible identifier cols: {", ".join([str(c)+" "+l[c] for c in possible_id_list])}')
print(f'consider using set_object={possible_id_list}')
possible_cat_list = [c for c in range(len(l)) if
('int' in df.iloc[:, c].dtype.name or 'float' in df.iloc[:, c].dtype.name) and
df.iloc[:, c].nunique() < max_lev]
if len(possible_cat_list) > 0:
print()
str_list = [f'{c} {l[c]} ({df.iloc[:, c].nunique()} levls)' for c in possible_cat_list]
print(f'possible category cols: {", ".join(str_list)}')
print(f'consider using set_category={possible_cat_list}')
return df
|
[
"tll549@yahoo.com.tw"
] |
tll549@yahoo.com.tw
|
49db68fb8220b97f65446eeba1685fd45bd3468d
|
78f5c537cb4d95771956f434a54d2cddb6078ff1
|
/trial.py
|
31e11ffec6b3ae933ed348d18289746742aedb46
|
[] |
no_license
|
sushmitapatil77/MachineLearing
|
2dc569e4562a4196a868db0ce23f4fecea0285c8
|
407808322e4bec8327a959e582e6288f4484e15e
|
refs/heads/master
| 2021-08-07T15:37:46.288680
| 2020-10-10T22:19:30
| 2020-10-10T22:19:30
| 226,447,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
import cv2
import numpy as np
#import joblib
#import glob
#from tensorflow.keras.models import load_model
def test_model(x_test,facenet,clf,classifier_name):
#def test_model(x_test):
#facenet = load_model('models/facenet_keras.h5')
#clf = joblib.load('models/FaceNet-SVM-model.joblib')
x_test = cv2.resize(x_test, (160, 160))
x_test = np.expand_dims(prewhiten(x_test),axis=0)
#cv2.imshow('',x_test[0,:,:,:])
new_x_test = l2_normalize(facenet.predict(x_test))
if classifier_name == 1:
y_predicted = clf.predict(new_x_test)
else:
y_predicted = np.argmax(clf.predict(new_x_test)) + 1
print("prediction for ",y_predicted)
if y_predicted > 200:
return "Access Granted", (0, 255, 0)
else:
return "Access Denied", (0, 0, 255)
def prewhiten(x):
if x.ndim == 4:
axis = (1, 2, 3)
size = x[0].size
elif x.ndim == 3:
axis = (0, 1, 2)
size = x.size
else:
raise ValueError('Dimension should be 3 or 4')
mean = np.mean(x, axis=axis, keepdims=True)
std = np.std(x, axis=axis, keepdims=True)
std_adj = np.maximum(std, 1.0/np.sqrt(size))
y = (x - mean) / std_adj
return y
def l2_normalize(x, axis=-1, epsilon=1e-10):
output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
return output
if __name__ == '__main__':
datasetpath = './'
test_model(datasetpath)
|
[
"sushmitapatil@mymail.mines.edu"
] |
sushmitapatil@mymail.mines.edu
|
e426afbf4816f4bfae3dada43aff40672af1d85d
|
21b733c24abe27a806632eb1db213d7898887e89
|
/pvCrawler/esConnector.py
|
b98412295de3142464f170d8fbb962c2f193367a
|
[] |
no_license
|
Insomnia1437/PV_crawler
|
9ce26adffe7440fe61f2650b77df676153eee8b9
|
5b0102f965ea8c83e1e769e367e1468a9a4c473c
|
refs/heads/master
| 2020-11-25T09:22:01.392563
| 2020-05-27T14:21:58
| 2020-05-27T14:21:58
| 228,593,705
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,278
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020-05-15
# @File : esConnector
# @Software: PyCharm
# @Author : Di Wang(KEK Linac)
# @Email : sdcswd@gmail.com
from elasticsearch import Elasticsearch
def _generate_bulk_body(index, doc: list):
es_body = []
index_api = {"index": {"_index": index}}
for item in doc:
es_body.append(index_api)
es_body.append(item)
return es_body
class ESConnector:
def __init__(self, url):
self.es = Elasticsearch([url])
def es_ok(self):
return self.es.ping()
def index_exist(self, index):
if self.es.indices.exists(index):
print('Info: index already exists! %s' % index)
return True
return False
def index_create(self, index):
settings = {"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"analysis": {
"analyzer": {
"epics_pv_analyzer": {
"type": "custom",
"tokenizer": "pv_tokenizer",
"filter": [
"lowercase"
]
}
},
"tokenizer": {
"pv_tokenizer": {
"type": "char_group",
"tokenize_on_chars": [
":",
"-",
"_",
" "
]
}
}
}
},
"mappings": {
"properties": {
"PVNAME": {
"type": "text",
"analyzer": "epics_pv_analyzer",
"search_analyzer": "epics_pv_analyzer"
},
"PVTYPE": {
"type": "keyword",
"ignore_above": 256
},
"DTYP": {
"type": "text"
},
"NELM": {
"type": "integer"
},
"PINI": {
"type": "keyword"
},
"FLNK": {
"type": "text",
"analyzer": "epics_pv_analyzer",
"search_analyzer": "epics_pv_analyzer"
},
"FTVL": {
"type": "keyword"
}
}
}
}
self.es.indices.create(index=index, ignore=400, body=settings)
print('Created Index %s' % index)
def insert_data(self, index, doc, ):
"""
add data into the elastic search database
:param index: index name
:param doc: data
:return: response json data from elasticsearch
"""
body = _generate_bulk_body(index, doc)
# add bulk API timeout value: 300 seconds
return self.es.bulk(body=body, index=index, request_timeout=300)
if __name__ == '__main__':
es = ESConnector("http://localhost:9200")
# es.es.cat.indices()
# print(es.es.ping())
|
[
"sdcswd@gmail.com"
] |
sdcswd@gmail.com
|
3c71a6ee23e30a3ca1c6838f6b7c3492e7a01f7e
|
1892324f7bcb2273543a5b08d5dbf8a101d67803
|
/examples/tiling_7.py
|
30a37689e4124d0de32656c8af0fca09d4e2b82f
|
[
"MIT"
] |
permissive
|
byewokko/penrose
|
b5b5266585969d5528e2f04b0a985c150f492e2b
|
f339a6f0a2c9b0486f0b46a517bb9b8907c4be07
|
refs/heads/master
| 2023-02-21T11:51:29.732515
| 2020-12-24T23:33:11
| 2020-12-24T23:33:11
| 302,706,319
| 12
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
import multigrid
from ntiling import TilingBuilder
from drawing.pil_draw_simple import Draw
palette = [
"#ffee7d",
"#b767ff",
"#44fadd",
"#fe91ca",
"#ffe0f7",
]
# palette = [
# "#7fb414",
# "#df0e62",
# "#127681",
# "#fac70b",
# "#092a35",
# ]
draw = Draw(scale=90, width=4*1280, height=4*1280, bg_color=palette[-1])
draw.line_color = None
index_range = (-4, 4)
offsets = list(map(float, "0.04885131 0.38705046 0.15540683 0.37524718 0.09360688 0.04554864 0.0424169".split(" ")))
grid = multigrid.Multigrid(7, offsets)
# grid = multigrid.Multigrid(7, [0.11071195, 0.40178219, 0.38167641, 0.05840904, 0.26593674, 0.30876262, 0.40169052])
tiling_builder = TilingBuilder(grid)
tiling_builder.prepare_grid(index_range)
tiling_builder.generate_rhomb_list()
for rhomb in tiling_builder._rhombs.values():
c = (rhomb.type() in (1,6,2,5)) + (rhomb.type() in (1,6))
# c = (rhomb.type() in (1, 2, 5, 6)) + (rhomb.type() in (1, 6))
draw.polygon(rhomb.xy(), color=palette[c])
for a, b in rhomb.get_edges():
draw.edge(a.get_xy(homogenous=False), b.get_xy(homogenous=False), color=palette[-2], width=6)
draw.show()
|
[
"byewokko@seznam.cz"
] |
byewokko@seznam.cz
|
153c199e641bb7e75c52b7a481fbad9d3693ffa3
|
d5bea21ab2005d4a0c42edbc5df8ddf52b859547
|
/src/sbahn_ticker.py
|
4cbf6532c318c43597435cde560dd490ce72adea
|
[] |
no_license
|
tmbo/alexa-vbb-skill
|
3eb0a5a7cab1149fab6cdb880fba900685518a0f
|
3d18c49bb1f62f2ae4d3c6b7a80a9df561ef5fb1
|
refs/heads/master
| 2021-03-27T16:02:30.717173
| 2018-04-19T14:17:02
| 2018-04-19T14:17:02
| 110,363,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,300
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from src import utils
from src import vbb
from src import speech
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = ("Die S-Bahn lässt grüßen. "
"Frage nach der nächsten Abfahrt, zum Beispiel mit: "
"'Alexa, frage bahn info wann die nächste bahn fährt'")
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Bitte frage doch nach der nächsten Bahnfahrt."
should_end_session = False
return speech.response(session_attributes, card_title, speech_output,
reprompt_text, should_end_session)
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Sänk you for traveling wis se S-Bahn!"
# Setting this to true ends the session and exits the skill.
should_end_session = True
return speech.response({}, card_title, speech_output,
None, should_end_session)
def set_station_in_session(intent, session):
""" Sets the color in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'station_name' in intent['slots']:
station_name = intent['slots']['station_name']['value']
session_attributes = {"station_name": station_name}
speech_output = "Ok, deine Station " + \
station_name + \
" ist gespeichert. Du kannst mich nach dem nächsten " \
"Zug fragen indem du folgendes sagst: " \
"wann kommt der nächste zug?"
reprompt_text = "Du kannst mich nach dem nächsten Zug fragen indem du " \
"" \
"" \
"" \
"folgendes sagst: " \
"wann kommt der nächste zug?"
else:
speech_output = "Ich habe deine Station nicht verstanden. " \
"Bitte versuche es noch einmal."
reprompt_text = "Ich habe deine Station nicht verstanden. " \
"Bitte sag mir deine Station, indem du zum Beispiel " \
"folgendes sagest: " \
"meine station ist griebnitzsee"
return speech.response(session_attributes, card_title, speech_output,
reprompt_text, should_end_session)
def next_train_speech_output():
station = "Griebnitzsee"
departures = vbb.fetch_next_trains_for_stop()
formatted_departures = utils.voice_join([
"{} nach {} in {}".format(
d["name"], d["direction"], utils.wait_time_str(d["time"]))
for d in departures
])
if len(departures) == 1:
return "Von {} fährt: {}.".format(
station, formatted_departures)
elif len(departures) > 1:
return "Von {} fahren: {}.".format(
station, formatted_departures)
else:
return "Entschuldige, ich konnte keine Abfahrten finden."
def get_next_train(intent, session):
session_attributes = {}
reprompt_text = None
speech_output = next_train_speech_output()
should_end_session = True
# if session.get('attributes', {}) and "station_name" in session.get(
# 'attributes', {}):
#
# should_end_session = True
# else:
# speech_output = "Ich weiß nicht was deine Station ist. " \
# "Bitte teile mir zuerst deine S-Bahn station mit."
# should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return speech.response(session_attributes, intent['name'], speech_output,
reprompt_text, should_end_session)
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
if session.get('attributes', {}) and "station_name" in session.get(
'attributes', {}):
return get_next_train()
else:
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "set_station":
return set_station_in_session(intent, session)
elif intent_name == "request_train":
return get_next_train(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == \
"AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
if (event['session']['application']['applicationId'] !=
"amzn1.ask.skill.a15c6fcd-13de-4d4e-bb9a-46759f860fb6"):
raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
if __name__ == '__main__':
logging.basicConfig(level="DEBUG")
print(next_train_speech_output())
print(vbb.fetch_stops_by_name("Berlin"))
|
[
"tom@rasa.ai"
] |
tom@rasa.ai
|
5f61972788ba3a81ffa79a70e9b1f057fcd4a44b
|
ad12aa57066c2dc9623ea49bd670548d144acddc
|
/Framework for ML/ML/CLS_Attendence.py
|
e6ff1084e765c6bbdc8d0386695f2c28f21afe67
|
[] |
no_license
|
youssefsamy2014/MyProgML
|
343233496e925bbdbbadcb2ba351f2f2aa767fcf
|
894dea63a6d1ab43593e541f1d8b1dc19717916f
|
refs/heads/master
| 2023-08-25T10:13:39.499943
| 2021-10-18T15:42:26
| 2021-10-18T15:42:26
| 337,539,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,915
|
py
|
from models import Entity_list_user,Entity_list_Attendance
from DB import db
from cv2 import cv2
import datetime
import time
class CLS_Attendance():
def __init__(self,name):
self.name=name
def Attendence(self):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("Assets\\TrainingImageLabel\\trainer.yml")
harcascadePath = "Assets\\haarcascade\\haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
cam = cv2.VideoCapture("Assets\\avideos\\"+self.name+".webm")
#add='http://192.168.1.12:8080/video'
#cam.open(add)
font = cv2.FONT_HERSHEY_SIMPLEX
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
_, img = cam.read()
#img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
#count = 0
# If confidence is less them 100 ==> "0" : perfect match
if (confidence < 100):
user = Entity_list_user.query.filter_by(FacultyID=id).first()
Name = user.FirstName + '_' + user.LastName
# Label = '[' + str(id) + '_' + Name + ']'
confidence = " {0}%".format(round(100 - confidence))
check_user = Entity_list_Attendance.query.filter_by(FacultyID=id, Date=date, InOut='In').first()
if check_user :
if not check_user.InOut=='Out':
new_user = Entity_list_Attendance(FacultyID=id, Name=Name, Time=timeStamp, Date=date, InOut='Out')
db.session.add(new_user)
db.session.commit()
else :
new_user = Entity_list_Attendance(FacultyID=id, Name=Name, Time=timeStamp, Date=date, InOut='In')
db.session.add(new_user)
db.session.commit()
else:
# Label = "{ UNKNOWN }"
confidence = " {0}%".format(round(100 - confidence))
# cv2.putText(img,Label,(x + 5, y - 5),font,.5,(255, 0, 255),1)
# cv2.putText(img,str(confidence),(x + 5, y + h - 5),font,1,(255, 255, 0),1)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
if not date:
return {'message': 'no one of student attend today'}
else:
students = Entity_list_Attendance.query.filter_by(Date=date, InOut='In').all()
output = []
for student in students:
Students_attend = {}
Students_attend['FacultyID'] = student.FacultyID
Students_attend['Name'] = student.Name
output.append(Students_attend)
return output ,True
# cv2.imshow('camera', img)
# if (cv2.waitKey(1) == ord('q')):
# cam.release()
# cv2.destroyAllWindows()
# break
|
[
"49346290+youssefsamy2014@users.noreply.github.com"
] |
49346290+youssefsamy2014@users.noreply.github.com
|
b13a9ba21ec33550166e6db7b778ded67b72f63b
|
cdaa13ca7971ba3fa4ef279d181407db1d104677
|
/162. Find Peak Element.py
|
bb0efb1e510e3abd54671274ddc01d5bd3baad6d
|
[] |
no_license
|
jingweiwu26/Practice_code
|
e319726c960aea4caf6da16eb47a4ba6ab944207
|
3bab0224256c206fc459e53384fd3abf4a711fe2
|
refs/heads/master
| 2022-01-31T15:13:10.059963
| 2022-01-27T18:46:37
| 2022-01-27T18:46:37
| 81,012,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 23:38:31 2018
@author: Wu Jingwei
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size=len(nums)
for i in range(1, size-1):
if nums[i]>nums[i-1] and nums[i]>nums[i+1]:
return i
if size==1 or nums[0]>nums[1]:
return 0
return size-1
class Solution(object):
def findPeakElement(self, nums):
left=0
right=len(nums)-1
while left<right:
mid=(left+right)/2
if nums[mid]<nums[mid+1]:
left=mid+1
else:
right=mid
return left
|
[
"noreply@github.com"
] |
jingweiwu26.noreply@github.com
|
89a8fbb62948846a95711b7126edd32b131ca3a9
|
bc0a7540636d9366c658e2446445467f98001cd9
|
/tests/test_loadmat.py
|
4414fc44cbe27a567585cb3851699df9ad95da85
|
[
"MIT"
] |
permissive
|
Austin-Crabtree/matfiles
|
03c3b9511a531bdf210b7c1b3d0c69f642f56c7c
|
d95e987d358513181ef2a9b83e5709845187f4ab
|
refs/heads/main
| 2023-03-20T05:31:07.733618
| 2021-03-20T02:14:10
| 2021-03-20T02:14:10
| 349,523,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
import unittest
import numpy as np
from matfiles import loadmat
class MyTestCase(unittest.TestCase):
def test_simpleload(self):
case = {
'a': np.array([[116], [101], [115], [116]]),
'b': np.array([[ 1.], [ 2.], [ 3.], [ 4.], [ 5.], [ 6.], [ 7.], [ 8.], [ 9.], [10.]]),
's': {
'a': np.array([[116], [101], [115], [116]]),
'b': np.array([[ 1.], [ 2.], [ 3.], [ 4.], [ 5.], [ 6.], [ 7.], [ 8.], [ 9.], [10.]])}
}
contents = loadmat('../test-data/simple.mat')
if __name__ == '__main__':
unittest.main()
|
[
"crabbyinatree@protonmail.com"
] |
crabbyinatree@protonmail.com
|
15e1353fdb1e0ca9c9a0029db24fc43e80b32a97
|
e4386f2f2e48eb22287ed9cec8d51445f695fe96
|
/stack.py
|
29aa7947e15dd6b817870527180c055001d57909
|
[] |
no_license
|
sreenath-kada/Python
|
e1b726c984accfab2cb5415b8d5e4ec22b0ad2c0
|
f232db76809126b145ed34604a964dc0a0498eea
|
refs/heads/master
| 2020-05-04T10:37:59.337755
| 2019-04-03T19:33:57
| 2019-04-03T19:33:57
| 179,091,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
class Stack:
def __init__(self):
self.stack = []
self.iterations = 0
def stack_push(self, data):
self.stack.append(data)
self.iterations += 1
def stack_pop(self):
self.iterations += 1
if self.stack is None:
print("Stack is empty")
else:
return self.stack.pop()
def stack_peek(self):
print(f"Stack looks like this after")
print(self.iterations)
print("iterations")
for data in self.stack:
print(data)
stack = Stack()
stack.stack_push('Mon')
stack.stack_push('Tue')
stack.stack_peek()
stack.stack_push('Wed')
stack.stack_push('Thu')
stack.stack_peek()
stack.stack_pop()
stack.stack_pop()
stack.stack_pop()
stack.stack_peek()
|
[
"noreply@github.com"
] |
sreenath-kada.noreply@github.com
|
76ec0bfa00c7191eecde64d7a553a7998771bae9
|
4b1a3db3249a51bcd2b9699edcb60c2716b2889e
|
/discharge.py
|
e851bc189eee5ad877ce01278d2da3087be6cfb8
|
[] |
no_license
|
shohei/tank-simulation
|
799e7c8db543f639984dbadb1b41b83fc818b831
|
d74b8d09eb1d5e1fa1fa61327f4fff35754ac5eb
|
refs/heads/master
| 2021-01-02T09:14:46.709964
| 2015-07-29T13:48:52
| 2015-07-29T13:48:52
| 39,488,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#*** TANK PRESSURE SIMULATION ***
#*** AUTHOR: SHOHEI AOKI ********
#*** PROGRAMMED: 22 JUL 2015 ****
from pylab import *
##### PARAMETER AREA ############
gamma = 1.4
de = 2 #[mm]
Ae = pi*(de*0.001/2)*(de*0.001/2)#[m2]
T0i = (24 + 273.15) # Room temperature
R = 289 # J/(kg dot K)
V = 1/1000.0 #1L as [m3]
sigma = sqrt(gamma*((2/(gamma+1))**((gamma+1)/(gamma-1)))) # critical flow efficient
##### SIMULATION FOR AIR DISCHARGE FROM TANK ##
t = arange(0.01,100,0.01) # 100 sec
pi = 0.1013*5*(10**6) # 5[MPa]
V_array = [1/1000.0,10/1000.0,20/1000.0]
for V in V_array:
p0 = (1 + (((gamma - 1)/2)*(Ae*sigma*sqrt(R * T0i))/V)*t)**((-1)*2*gamma/(gamma-1))*pi
plot(t,p0*(10**-6))
##### VISUALIZATION AREA #########
title('TANK PRESSURE TRANSITION BY AIR DISCHARGE')
legend(('1L','10L','20L'),'upper right')
xlabel('t [sec]')
ylabel('p0 [MPa]')
savefig('./image/tank-discharge.png')
show()
|
[
"shoaok@gmail.com"
] |
shoaok@gmail.com
|
3fb764293b9870d147abe9b85e9cd11943995550
|
9849e98406768caba0cb9c9ac9300688fd012179
|
/doc_gen/apps.py
|
380b518525739e9a7d8a348feca6275b06dc5f92
|
[] |
no_license
|
Trevahok/Extractinator-9000
|
3cb4e1c6d20d355cef156c8e7f2ca57137b38188
|
131122fb25a6513dc765495fbe0622ad67ca29cc
|
refs/heads/master
| 2020-04-29T10:29:21.700611
| 2019-05-18T11:17:56
| 2019-05-18T11:17:56
| 176,063,513
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
from django.apps import AppConfig
class DocGenConfig(AppConfig):
name = 'doc_gen'
|
[
"vighneshss@gmail.com"
] |
vighneshss@gmail.com
|
05a92429c41bbe7dfce417ff86c07367317dc447
|
973c2a5fd8c37497e91487a1cbc34a489a0d0108
|
/bin/home.py
|
b6cc7a8204c700706c6dd305f0fba2869e72ce26
|
[] |
no_license
|
BBATools/PWCode
|
24d3b730cf06b2ee2aa5edb59ea806c2a62b1ea3
|
6d2696962d65a799359395ee5528766cec5e0d13
|
refs/heads/master
| 2021-02-15T04:34:38.025770
| 2020-08-18T11:45:33
| 2020-08-18T11:45:33
| 244,863,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,970
|
py
|
# MIT License
# Original work Copyright (c) 2018 François Girault
# Modified work Copyright 2020 Morten Eek
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# from console import ConsoleUi, Processing
from common.xml_settings import XMLSettings
import inspect
import commands
import os
import webbrowser
import pickle
import shutil
import tkinter as tk
from tkinter import ttk
# from tkinter import filedialog
from settings import COLORS
from gui.dialog import multi_open
import pathlib
class HomeTab(ttk.Frame):
def __init__(self, parent, app):
super().__init__(parent, style="Home.TFrame", padding=[56, 12, 8, 8])
self.heading = ttk.Label(self, text=app.settings.name, style="Heading.TLabel")
self.heading.pack(side=tk.TOP, anchor=tk.W)
global subsystem_frames
subsystem_frames = []
self.system_dir = None
self.project_dir_created = False
frame = ttk.Frame(self, style="Home.TFrame")
frame.pack(fill=tk.BOTH, expand=1, pady=12)
frame.pack_propagate(False)
self.left_frame = ttk.Frame(frame, style="Home.TFrame")
self.left_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.right_frame = ttk.Frame(frame, style="Home.TFrame")
self.right_frame.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1, padx=(0, 56))
self.show_start(app)
self.show_help(app)
# self.folder_list = LinksFrame(self)
# self.folder_list.pack(side=tk.TOP, anchor=tk.N, padx=(8, 0), pady=3, fill=tk.X)
def open_home_url(self):
webbrowser.open('https://github.com/BBATools/PWCode', new=2)
def show_help(self, app):
self.subheading = ttk.Label(self, text=app.settings.desc, style="SubHeading.TLabel")
self.subheading.pack(side=tk.TOP, anchor=tk.W, after=self.heading)
self.description = ttk.Label(self, text=app.settings.long_desc, style="Text.TLabel")
self.description.pack(side=tk.TOP, anchor=tk.W, after=self.subheading)
for widget in self.right_frame.winfo_children():
widget.destroy()
subsystem_frames.clear()
self.project_dir_created = False
LinksFrame(
self.right_frame,
"Help",
(
("GitHub repository", self.open_home_url),
),
).pack(side=tk.TOP, anchor=tk.W, pady=12)
def show_start(self, app):
LinksFrame(
self.left_frame,
"Start",
(
("Export Data", lambda: self.export_data_project(app)),
("Convert Files", lambda: self.convert_files_project(app)), # TODO: Legg inn sjekk på at på PWLinux for at denne skal vises
("New File", app.command_callable("new_file")),
("Open File ...", app.command_callable("open_file")),
("Open Folder ...", app.command_callable("open_folder")),
),
).pack(side=tk.TOP, anchor=tk.W, pady=12)
self.recent_links_frame = RecentLinksFrame(self.left_frame, app).pack(side=tk.TOP, anchor=tk.W, pady=12)
# self.left_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
def system_entry_check(self, app): # TODO: Slå sammen med run_plugin? Med arg om run? Også duplisering av kode i selve plugin main
system_name = self.project_frame.name_entry.get()
if not system_name:
msg = 'Missing system name'
msg_label.config(text=msg)
return
else:
msg_label.config(text='')
self.system_dir = app.data_dir + system_name + '_' # --> projects/[system_]
system_dir = self.system_dir
archive = system_dir[:-1] + '/' + system_name + '.tar'
# TODO: Flere sjekker? Sjekke mot config xml fil og, eller bare?
# TODO: Gjenbruke mappe hvis finnes og tom eller bare visse typer innhold?
if os.path.isfile(archive):
msg = "'" + archive + "' already exists"
msg_label.config(text=msg)
return
ok = self.create_project_dir(system_dir, system_name)
if not ok:
return
return 'ok'
def create_project_dir(self, path, project_name):
if not self.project_dir_created:
try:
os.mkdir(path)
self.project_dir_created = True
except OSError:
msg = "Can't create destination directory '%s'" % (path)
msg_label.config(text=msg)
return
pathlib.Path(path + '/.pwcode').mkdir(parents=True, exist_ok=True)
self.project_frame.configure(text=' ' + project_name + ' ')
self.project_frame.name_entry.configure(state=tk.DISABLED)
return 'ok'
def reset_rhs(self, header):
global msg_label
self.project_dir_created = False
self.subheading.pack_forget()
self.description.pack_forget()
for widget in self.right_frame.winfo_children():
widget.destroy()
frame = ttk.Frame(self.right_frame, style="SubHeading.TLabel")
frame.pack(side=tk.TOP, anchor=tk.W, pady=12, fill=tk.X)
header_label = ttk.Label(frame, text=header, style="SubHeading.TLabel")
header_label.pack(side=tk.LEFT, anchor=tk.N, pady=4, padx=1, fill="both", expand="yes")
msg_label = ttk.Label(frame, text="", style="Links.TButton")
msg_label.pack(side=tk.LEFT, anchor=tk.E, pady=4, padx=(0, 12))
def config_init(self, def_name):
config_dir = os.environ["pwcode_config_dir"] # Get PWCode config path
config_path = config_dir + '/tmp/' + def_name + '.xml'
if os.path.isfile(config_path):
os.remove(config_path)
return XMLSettings(config_path), config_dir
def run_plugin(self, app, project_name, config_dir, def_name):
base_path = app.data_dir + project_name
if def_name == 'export_data':
base_path = app.data_dir + project_name + '_'
for filename in os.listdir(config_dir + def_name):
new_path = base_path + '/.pwcode/' + filename
if filename == 'main.py':
new_path = base_path + '/.pwcode/' + project_name + '_' + def_name + '.py'
path = new_path
shutil.copy(config_dir + def_name + '/' + filename, new_path)
app.model.open_file(path)
tab_id = app.editor_frame.path2id[path]
file_obj = app.editor_frame.id2path[tab_id]
text_editor = app.editor_frame.notebook.nametowidget(tab_id)
self.show_help(app)
text_editor.run_file(file_obj, False)
def export_data(self, app):
def_name = inspect.currentframe().f_code.co_name
config_dir = self.export_check(app)
if config_dir:
project_name = self.project_frame.name_entry.get()
self.run_plugin(app, project_name, config_dir, def_name)
# TODO: Må lese fra xml i tmp først og så kopiere xml til prosjektmappe. Fortsatt riktig?
def convert_files(self, app):
def_name = inspect.currentframe().f_code.co_name
config, config_dir = self.config_init(def_name)
if not hasattr(self.project_frame, 'folders_frame'):
msg_label.config(text='No folders added')
return
project_name = self.project_frame.name_entry.get()
if not project_name:
msg_label.config(text='Missing project name')
return
ok = self.create_project_dir(app.data_dir + project_name, project_name)
if ok:
msg_label.config(text='')
else:
return
config.put('name', self.project_frame.name_entry.get())
config.put('options/merge', self.project_frame.merge_option.get())
i = 1
for frame, path in self.project_frame.folders_frame.folders.items():
# frame.remove_button.configure(state=tk.DISABLED)
config.put('folders/folder' + str(i), path)
i += 1
# self.project_frame.merge_option_frame.configure(state=tk.DISABLED)
# self.project_frame.name_frame.folder_button.configure(state=tk.DISABLED)
config.save()
self.run_plugin(app, project_name, config_dir, def_name)
def convert_files_project(self, app):
self.reset_rhs("Convert Files")
self.project_frame = Project(self.right_frame, app, self, "Project Name:", text=" New Data Project ", relief=tk.GROOVE)
self.project_frame.pack(side=tk.TOP, anchor=tk.W, fill="both", expand=1, pady=12)
name_frame = self.project_frame.name_frame
name_frame.folder_button = ttk.Button(name_frame, text='Add Folder', style="Entry.TButton", command=lambda: self.project_frame.choose_folder(app))
name_frame.folder_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
run_button = ttk.Button(name_frame, text='Run', style="Run.TButton", command=lambda: self.convert_files(app))
run_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
options_frame = ttk.Frame(self.project_frame, style="SubHeading.TLabel")
options_frame.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
# options_label = ttk.Label(options_frame, text="Options:", width=16)
# options_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=3)
merge_label = ttk.Label(options_frame, text="Merge Subfolders:")
merge_label.pack(side=tk.LEFT, anchor=tk.N, pady=3, padx=(8, 0))
options = ['', 'False', 'True']
self.project_frame.merge_option = tk.StringVar()
self.project_frame.merge_option.set(options[1])
merge_option = ttk.OptionMenu(options_frame, self.project_frame.merge_option, *options)
merge_option.pack(side=tk.LEFT, anchor=tk.N, pady=3, padx=(0, 55))
merge_option.configure(width=4)
# self.project_frame.merge_option = var
self.project_frame.merge_option_frame = merge_option
def export_data_project(self, app):
self.reset_rhs("Export Data")
self.project_frame = Project(self.right_frame, app, self, "System Name:", text=" New Data Project ", relief=tk.GROOVE)
self.project_frame.pack(side=tk.TOP, anchor=tk.W, fill="both", expand=1, pady=12)
name_frame = self.project_frame.name_frame
subsystem_button = ttk.Button(name_frame, text='Add Subsystem', style="Entry.TButton", command=lambda: self.subsystem_entry(app))
subsystem_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
# TODO: Lag def export_data(self, app):
run_button = ttk.Button(name_frame, text='Run', style="Run.TButton", command=lambda: self.export_data(app))
run_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
options_frame = ttk.Frame(self.project_frame, style="SubHeading.TLabel")
options_frame.pack(side=tk.TOP, anchor=tk.W, fill=tk.X, pady=(0, 20))
options_label = ttk.Label(options_frame, text="Database Options:", width=16)
options_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=3)
# # TODO: Flytt denne linjen opp på system nivå
# # TODO: Legg inn sjekk på at ikke duplikat folder --> i choose_folder kode?
memory_label = ttk.Label(options_frame, text="Allocated memory:")
memory_label.pack(side=tk.LEFT, anchor=tk.N, pady=3)
options = ['', '3 Gb', '4 Gb', '5 Gb', '6 Gb', '7 Gb', '8 Gb']
self.project_frame.memory_option = tk.StringVar()
self.project_frame.memory_option.set(options[2])
memory_option = ttk.OptionMenu(options_frame, self.project_frame.memory_option, *options)
memory_option.pack(side=tk.LEFT, anchor=tk.N, pady=3, padx=(0, 55))
memory_option.configure(width=4)
ddl_label = ttk.Label(options_frame, text="DDL Generation:")
ddl_label.pack(side=tk.LEFT, anchor=tk.N, pady=3)
options = ['', 'Native', 'SQL Workbench']
self.project_frame.ddl_option = tk.StringVar()
self.project_frame.ddl_option.set(options[1])
ddl_option = ttk.OptionMenu(options_frame, self.project_frame.ddl_option, *options)
ddl_option.pack(side=tk.LEFT, anchor=tk.N, pady=3)
ddl_option.configure(width=12)
def subsystem_entry(self, app):
ok = None
if len(subsystem_frames) == 0:
ok = self.system_entry_check(app)
else:
ok = self.export_check(app) # TODO: Riktig med 'ok' her?
if ok:
if len(subsystem_frames) == 0:
self.project_frame.pack_forget()
self.project_frame.pack(side=tk.TOP, anchor=tk.W, fill="both", expand=0, pady=(0, 12))
subsystem_frame = SubSystem(self.right_frame, app, self, text=" New Subsystem ", relief=tk.GROOVE)
subsystem_frame.pack(side=tk.TOP, anchor=tk.W, fill="both", expand=1, pady=12)
subsystem_frames.append(subsystem_frame)
def export_check(self, app):
# TODO: Sjekk kobling eller at kan brukes som mappenavn her hvis db subsystem og ikke bare filer
config, config_dir = self.config_init('pwcode')
config.put('name', self.project_frame.name_entry.get())
config.put('options/memory', self.project_frame.memory_option.get())
config.put('options/ddl', self.project_frame.ddl_option.get())
i = 0
subsystem_names = []
for subsystem in subsystem_frames:
subsystem_name = None
folder_paths = []
for frame, path in subsystem.folders_frame.folders.items():
folder_paths.append(path)
db_name = subsystem.db_name_entry.get().lower()
db_schema = subsystem.db_schema_entry.get().lower()
msg = None
if (len(db_name) == 0 or len(db_schema) == 0):
if folder_paths:
subsystem_name = 'files' + str(i)
i += 1
else:
msg = 'Missing subsystem name'
elif subsystem_name in subsystem_names:
msg = 'Duplicate subsystem name'
else:
subsystem_name = db_name + '_' + db_schema
if msg:
msg_label.config(text=msg)
# WAIT: Slette system mappe hvis tom her? Også når cancel?
return
msg_label.config(text='')
subsystem_names.append(subsystem_name)
subsystem.configure(text=' ' + subsystem_name + ' ')
config.put('subsystems/' + subsystem_name + '/db_name', db_name)
config.put('subsystems/' + subsystem_name + '/schema_name', db_schema)
j = 0
for path in folder_paths:
config.put('subsystems/' + subsystem_name + '/folders/folder' + str(j), path)
j += 1
config.save()
return config_dir
class Project(ttk.LabelFrame):
def __init__(self, parent, app, grandparent, entry_text, *args, **kwargs):
super().__init__(parent, *args, **kwargs, style="Links.TFrame")
self.grandparent = grandparent
self.merge_option = None
self.merge_option_frame = None
self.memory_option = None
self.ddl_option = None
self.name_frame = ttk.Frame(self, style="SubHeading.TLabel")
self.name_frame.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.name_label = ttk.Label(self.name_frame, text=entry_text, width=16)
self.name_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=(4, 3))
self.name_entry = make_entry(self.name_frame, app, 56)
self.name_entry.pack(side=tk.LEFT, anchor=tk.N, pady=(4, 3))
self.name_entry.focus()
self.cancel_button = ttk.Button(self.name_frame, text='Discard', style="Links.TButton", command=lambda: self.grandparent.show_help(app))
self.cancel_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
def choose_folder(self, app):
if not hasattr(self, 'folders_frame'):
self.folders_frame = LinksFrame(self)
self.folders_frame.pack(side=tk.TOP, anchor=tk.N, padx=(8, 0), pady=3, fill=tk.X)
path = multi_open(app.data_dir, mode='dir')
self.folders_frame.add_folder(path, lambda p=path: app.command_callable("open_folder")(p), 70)
class SubSystem(ttk.LabelFrame):
def __init__(self, parent, app, grandparent, *args, **kwargs):
super().__init__(parent, *args, **kwargs, style="Links.TFrame")
self.grandparent = grandparent
self.frame1 = ttk.Frame(self, style="SubHeading.TLabel")
self.frame1.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.db_name_label = ttk.Label(self.frame1, text="DB Name:", width=8)
self.db_name_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=(4, 3))
self.db_name_entry = make_entry(self.frame1, app, 25)
self.db_name_entry.pack(side=tk.LEFT, anchor=tk.N, pady=(4, 3))
self.db_name_entry.focus()
self.db_schema_label = ttk.Label(self.frame1, text="Schema Name:", width=12)
self.db_schema_label.pack(side=tk.LEFT, anchor=tk.N, padx=(12, 0), pady=(4, 3))
self.db_schema_entry = make_entry(self.frame1, app, 25)
self.db_schema_entry.pack(side=tk.LEFT, anchor=tk.N, pady=(4, 3))
self.cancel_button = ttk.Button(self.frame1, text='Discard', style="Links.TButton", command=lambda: self.subsystem_remove())
self.cancel_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
self.folder_button = ttk.Button(self.frame1, text='Add Folder', style="Entry.TButton", command=lambda: self.choose_folder(app))
self.folder_button.pack(side=tk.RIGHT, anchor=tk.N, pady=3, padx=(0, 12))
self.frame2 = ttk.Frame(self, style="SubHeading.TLabel")
self.frame2.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.jdbc_url_label = ttk.Label(self.frame2, text="JDBC Url:", width=8)
self.jdbc_url_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=3)
self.jdbc_url_entry = make_entry(self.frame2, app, 64)
self.jdbc_url_entry.pack(side=tk.LEFT, anchor=tk.N, pady=3)
self.frame3 = ttk.Frame(self, style="SubHeading.TLabel")
self.frame3.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.db_user_label = ttk.Label(self.frame3, text="DB User:", width=8)
self.db_user_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=3)
self.db_user_entry = make_entry(self.frame3, app, 25)
self.db_user_entry.pack(side=tk.LEFT, anchor=tk.N, pady=3)
self.db_pwd_label = ttk.Label(self.frame3, text="DB Password:", width=12)
self.db_pwd_label.pack(side=tk.LEFT, anchor=tk.N, padx=(12, 0), pady=3)
self.db_pwd_entry = make_entry(self.frame3, app, 25)
self.db_pwd_entry.pack(side=tk.LEFT, anchor=tk.N, pady=3)
self.frame5 = ttk.Frame(self, style="SubHeading.TLabel")
self.frame5.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
options = ['', 'Exclude Tables (comma separated)', 'Include Tables (comma separated)']
self.var = tk.StringVar()
self.var.set(' '.join(options[1].split(' ')[:2]) + ':')
self.var.trace("w", self.get_option)
self.tables_option = ttk.OptionMenu(self.frame5, self.var, *options)
self.tables_option.pack(side=tk.LEFT, anchor=tk.N, pady=3, padx=(8, 0))
self.tables_option.configure(width=12)
self.tables_entry = make_entry(self.frame5, app, 57)
self.tables_entry.pack(side=tk.LEFT, anchor=tk.N, pady=3)
self.frame6 = ttk.Frame(self, style="SubHeading.TLabel")
self.frame6.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.overwrite_label = ttk.Label(self.frame6, text="Overwrite Tables:", width=15)
self.overwrite_label.pack(side=tk.LEFT, anchor=tk.N, padx=(8, 0), pady=3)
self.overwrite_entry = make_entry(self.frame6, app, 57)
self.overwrite_entry.pack(side=tk.LEFT, anchor=tk.N, pady=(3, 6))
self.folders_frame = LinksFrame(self)
self.folders_frame.pack(side=tk.TOP, anchor=tk.N, padx=(8, 0), pady=3, fill=tk.X)
def choose_folder(self, app):
path = multi_open(app.data_dir, mode='dir')
self.folders_frame.add_folder(path, lambda p=path: app.command_callable("open_folder")(p), 70)
def get_option(self, *args):
value = ' '.join(self.var.get().split(' ')[:2]) + ':'
self.var.set(value)
self.tables_option.configure(state=tk.NORMAL) # Just for refreshing widget
def subsystem_remove(self):
subsystem_frames.remove(self)
self.destroy()
if len(subsystem_frames) == 0:
self.grandparent.project_frame.pack_forget()
self.grandparent.project_frame.pack(side=tk.TOP, anchor=tk.W, fill="both", expand=1)
class LinksFrame(ttk.Frame):
""" A container of links and label that packs vertically"""
def __init__(self, parent, title=None, links=None):
super().__init__(parent, style="Links.TFrame")
self.folders = {}
if title:
self.title = ttk.Label(self, text=title, style="SubHeading.TLabel")
self.title.pack(side=tk.TOP, anchor=tk.W, pady=4, padx=1)
if links:
for label, action in links:
if action:
self.add_link(label, action)
else:
self.add_label(label)
def add_link(self, label, action):
ttk.Button(self, text=label, style="Links.TButton", command=action).pack(side=tk.TOP, anchor=tk.W)
def add_folder(self, path, action, width):
if not path:
msg_label.config(text='Not a valid path.')
return
if path in self.folders.values():
msg_label.config(text='Duplicate folder')
return
label = 'Folder: ' + path
folder_frame = ttk.Frame(self, style="SubHeading.TLabel")
folder_frame.pack(side=tk.TOP, anchor=tk.W, fill=tk.X)
self.folders[folder_frame] = path
folder_frame.folder = ttk.Button(folder_frame, text=label, style="SideBar.TButton", command=action, width=width)
folder_frame.folder.pack(side=tk.LEFT, anchor=tk.N, pady=(1, 0))
folder_frame.remove_button = ttk.Button(folder_frame, text=' x', style="SideBar.TButton", command=lambda: self.remove_folder(folder_frame))
folder_frame.remove_button.pack(side=tk.LEFT, anchor=tk.N, pady=(1, 0))
msg_label.config(text='')
def remove_folder(self, folder_frame):
del self.folders[folder_frame]
folder_frame.pack_forget()
def add_label(self, text):
ttk.Label(self, text=text, style="Links.TLabel").pack(side=tk.TOP, anchor=tk.W)
class RecentLinksFrame(LinksFrame):
"""A frame display a list of last opened in the model"""
def __init__(self, parent, app):
super().__init__(parent, "Open Recent")
self.app = app
app.model.add_observer(self)
if os.path.exists(self.app.tmp_dir + "/recent_files.p"):
self.app.recent_links = pickle.load(open(self.app.tmp_dir + "/recent_files.p", "rb"))
self.update_recent_links(None)
def update_recent_links(self, new_file_obj):
if new_file_obj:
if new_file_obj.path in self.app.recent_links.keys():
del self.app.recent_links[new_file_obj.path]
self.app.recent_links.update({new_file_obj.path: new_file_obj})
for widget in self.winfo_children():
if isinstance(widget, ttk.Button):
widget.destroy()
for path, file_obj in reversed(self.app.recent_links.items()):
if os.path.isfile(file_obj.path):
if 'PWCode/bin/tmp/Untitled-' in file_obj.path:
if os.path.getsize(file_obj.path) == 0:
os.remove(file_obj.path)
continue
if file_obj in self.app.model.openfiles:
continue
self.add_link(file_obj.basename, lambda p=path: self.app.command_callable("open_file")(p))
def on_file_closed(self, file_obj):
"""model callback"""
self.update_recent_links(file_obj)
def on_file_open(self, file_obj):
"""model callback"""
self.update_recent_links(None)
def make_entry(parent, app, width):
entry = tk.Entry(parent,
font=app.font,
bg=COLORS.sidebar_bg,
disabledbackground=COLORS.sidebar_bg,
fg=COLORS.fg,
disabledforeground=COLORS.sidebar_fg,
bd=0,
insertbackground=COLORS.link,
insertofftime=0,
width=width,
highlightthickness=0,
)
return entry
|
[
"mortenee@gmail.com"
] |
mortenee@gmail.com
|
df28bf7bc78ed48e41e24ccb8453cd99dda0ca8b
|
f543bd15eb3e746b379c735e6a1e44b32ca52e26
|
/pokeswsh-raidreset.py
|
eea6e1bd3d27c74381a9b46321aeb04cad89c3bf
|
[] |
no_license
|
suhamaHZK/pokeswsh-autotool
|
899e004ebfb44f0731c733a49bde1d51c0d23e42
|
8c029431b6f6fe1dc508b637bfa6822b6c994c6b
|
refs/heads/master
| 2021-01-04T18:17:13.703578
| 2020-02-23T04:51:18
| 2020-02-23T04:51:18
| 240,705,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,031
|
py
|
################################################################
# レイドバトルが始まり次第ホスト機をリセットします。 #
# インターネットに接続して「みんなで 挑戦!」を選ぶ直前まで #
# 手動で進めてから、本スクリプトを起動してください。 #
# #
# Switch本体やソフトの遅延によりうまく動かない場合があります。 #
# その場合は、適宜sleepの秒数を伸ばしてください。 #
# #
# 動作確認環境 #
# Python 3.7.5 #
# pyserial 3.4 #
# macOS 10.14.6 #
################################################################
# memo
# バックアップあか始めるオプションをつけたい -b
# →同時押しができない!
import argparse
import serial
from time import sleep
def macro(load_backup = False, pin=''):
if pin != '':
if wait_enter('パスワード '+pin+' を設定します。Enterを押してください...'): break
while True:
if pin != '':
set_pin(pin)
print('パスワードは '+pin+' です。')
# 「みんなで 挑戦!」を選ぶ直前からスタート
if wait_enter('Enterで募集開始します...'): break #Enterを待つ。何か入力された場合(空文字列じゃなかった場合)はそこで終了
send('Button A', 0.1)#みんなで挑戦
if wait_enter('2台目の参加を待っています...'): break
send('HAT TOP', 0.1)# 「準備完了!」にカーソルをあわせる
send('HAT CENTER', 0.5)
send('Button A', 0.1)#押す
sleep(0.5)
send('Button A', 0.1)#「バトルを 開始する」を選択
sleep(0.5)
send('Button A', 0.1)#「参加人数が たりません!」
sleep(0.5)
send('Button A', 0.1)#「サポートのトレーナーが参加しますがよろしいですか?」→はい
sleep(0.1)
if wait_enter('バトル開始を待っています...'): break
# リセットする
send('Button HOME', 0.1)#ホームへ
sleep(1.2)
send('Button X', 0.1)#終了します
sleep(1.2)
send('Button A', 0.1)#終了を選択
sleep(3) #終了処理待ち
send('Button A', 0.1)#ゲーム選択
sleep(2)
send('Button A', 0.1)#ユーザ選択
sleep(20) #起動権チェック、ゲームの起動処理、OPムービー
if load_backup:
wait_enter('上とBを押しながらXを押してください。ロード画面になったらEnter')
# hold('HAT TOP', 0.1)
# hold('Button B', 0.1)
# hold('Button X', 0.1)
# send('HAT CENTER', 0.1)
# send('RELEASE')# ↑+B+Xでバックアップからゲーム開始
# sleep(3)
send('Button A', 0.1)#確認画面で「はじめる」を押す
sleep(3)
send('Button A', 0.1)#注意書き
sleep(10) #セーブデータ読み込み処理待ち
else:
send('Button A', 0.1)#ゲーム開始
sleep(10) #セーブデータ読み込み処理待ち
send('Button Y', 0.1)#YY通信
sleep(2)
send('Button START', 0.1)#インターネット接続
sleep(10) #接続完了待ち
send('Button B', 0.1)#「接続しました」を閉じる
sleep(0.5)
send('Button B', 0.1)#YY通信を閉じる
sleep(2)
send('Button A', 0.1)#巣穴を調べる
sleep(5) #通信待機中
# 「みんなで 挑戦!」を選ぶところまで戻ってきた
def set_pin(pin):
pinpad=[ # [x,y] で表記。横がx 縦がy
[1,3], # 0
[0,0],[1,0],[2,0], # 1 2 3
[0,1],[1,1],[2,1], # 4 5 6
[0,2],[1,2],[2,2] # 7 8 9
]
pos = [0,0] # 初期位置は "1" (左上)
send('Button START', 0.1)#PIN入力画面へ
sleep(1)
for i in range(4):
pindigit = int(pin[i])
nextpos = pinpad[pindigit]
xmove = nextpos[0] - pos[0]
ymove = nextpos[1] - pos[1]
if pindigit == 0: # 0に移動
# 横移動
if xmove > 0: # 正なら右に動く
for j in range(xmove):
send('HAT RIGHT', 0.1)
send('HAT CENTER', 0.1)
else:
for j in range(xmove * -1):
send('HAT LEFT', 0.1)
send('HAT CENTER', 0.1)
# 縦移動
if ymove > 0: # 正なら下に動く
for j in range(ymove):
send('HAT BOTTOM', 0.1)
send('HAT CENTER', 0.1)
else:
for j in range(ymove * -1):
send('HAT TOP', 0.1)
send('HAT CENTER', 0.1)
else: # 0以外に移動
# 縦移動
if ymove > 0: # 正なら下に動く
for j in range(ymove):
send('HAT BOTTOM', 0.1)
send('HAT CENTER', 0.1)
else:
for j in range(ymove * -1):
send('HAT TOP', 0.1)
send('HAT CENTER', 0.1)
# 横移動
if xmove > 0: # 正なら右に動く
for j in range(xmove):
send('HAT RIGHT', 0.1)
send('HAT CENTER', 0.1)
else:
for j in range(xmove * -1):
send('HAT LEFT', 0.1)
send('HAT CENTER', 0.1)
pos = nextpos
send('Button A', 0.1)#入力
sleep(0.2)
send('Button START', 0.1)#確定
sleep(1)
send('Button A', 0.1)#よろしいですか? →A (言語によって注意書きの長さ=Aボタン回数がちがうかも)
sleep(0.5)
parser = argparse.ArgumentParser()
parser.add_argument('port')
parser.add_argument('-b','--backup', action='store_true')
parser.add_argument('-p','--pin', default='')
args = parser.parse_args()
def send(msg, duration=0):
print('SEND '+msg)
ser.write(f'{msg}\r\n'.encode('utf-8'));
sleep(duration)
ser.write(b'RELEASE\r\n');
def hold(msg, duration=0):
print('HOLD '+msg)
ser.write(f'{msg}\r\n'.encode('utf-8'));
sleep(duration)
def wait_enter(notice):
print(notice)
r = input()
return r
ser = serial.Serial(args.port, 9600)
try:
# First Link
send('Button L', 0.1)
sleep(1)
macro(load_backup=args.backup,pin=args.pin)
send('RELEASE')
sleep(0.5)
ser.close()
except KeyboardInterrupt:
send('RELEASE')
sleep(0.5)
ser.close()
|
[
"suama.hiseki+github@gmail.com"
] |
suama.hiseki+github@gmail.com
|
5b9b6079ef1dc9a06651c23622225e22548e2446
|
a637aef1a43af42d220bbc24e1ffca7667c355c4
|
/2A- REST-Single-Site-KV-Store/test_assignment2a.py
|
30b0b573ac9984811710e9541d2ad1ebc1131367
|
[] |
no_license
|
ebasaldu/Distrubuted-Systems-128
|
03fce33d32e9f91a0a49c438201c987ab2650671
|
f0de41952154819ecc115b2e5c82a4892cd99b0f
|
refs/heads/master
| 2021-01-07T17:29:55.555216
| 2020-02-20T02:30:04
| 2020-02-20T02:30:04
| 241,769,010
| 0
| 0
| null | 2020-02-20T02:11:41
| 2020-02-20T01:48:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,715
|
py
|
###################
# Course: CMPS128
# Date: Spring 2019
# Assignment: #2A
# Author: Reza NasiriGerdeh
# Email: rnasirig@ucsc.edu
###################
import unittest
import subprocess
import requests
import sys
import random
import time
hostname = 'localhost' # Windows and Mac users can change this to the docker vm ip
portNumber = '8082'
baseUrl = 'http://' + hostname + ":" + portNumber
class TestHW2(unittest.TestCase):
def test_a_get_nonexisting_key(self):
response = requests.get( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 404)
self.assertEqual(responseInJson['doesExist'], False)
self.assertEqual(responseInJson['message'], 'Error in GET')
self.assertEqual(responseInJson['error'], 'Key does not exist')
def test_b_delete_nonexisting_key(self):
response = requests.delete( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 404)
self.assertEqual(responseInJson['doesExist'], False)
self.assertEqual(responseInJson['message'], 'Error in DELETE')
self.assertEqual(responseInJson['error'], 'Key does not exist')
def test_c_put_nonexistent_key(self):
response = requests.put(baseUrl + '/key-value-store/' + "subject1", json={'value': "Data Structures"})
responseInJson = response.json()
self.assertEqual(response.status_code, 201)
self.assertEqual(responseInJson['message'], 'Added successfully')
self.assertEqual(responseInJson['replaced'], False)
def test_d_get_after_put_nonexisting_key(self):
response = requests.get( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(responseInJson['doesExist'], True)
self.assertEqual(responseInJson['message'], 'Retrieved successfully')
self.assertEqual(responseInJson['value'], 'Data Structures')
def test_e_put_existent_key(self):
response = requests.put(baseUrl + '/key-value-store/' + "subject1", json={'value': "Distributed Systems"})
responseInJson = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(responseInJson['message'], 'Updated successfully')
self.assertEqual(responseInJson['replaced'], True)
def test_f_get_after_put_existing_key(self):
response = requests.get( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(responseInJson['doesExist'], True)
self.assertEqual(responseInJson['message'], 'Retrieved successfully')
self.assertEqual(responseInJson['value'], 'Distributed Systems')
def test_g_delete_existing_key(self):
response = requests.delete( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(responseInJson['doesExist'], True)
self.assertEqual(responseInJson['message'], 'Deleted successfully')
def test_h_get_after_delete_existing_key(self):
response = requests.get( baseUrl + '/key-value-store/subject1')
responseInJson = response.json()
self.assertEqual(response.status_code, 404)
self.assertEqual(responseInJson['doesExist'], False)
self.assertEqual(responseInJson['message'], 'Error in GET')
self.assertEqual(responseInJson['error'], 'Key does not exist')
def test_i_put_key_too_long(self):
tooLongKey = '6TLxbmwMTN4hX7L0QX5_NflWH0QKfrTlzcuM5PUQHS52___lCizKbEMxLZHhtfww3KcMoboDLjB6mw_wFfEz5v_TtHqvGOZnk4_8aqHga79BaHXzpU9_IRbdjYdQutAU0HEuji6Ny1Ol_MSaBF4JdT0aiG_N7xAkoPH3AlmVqDN45KDGBz7_YHrLnbLEK11SQxZcKXbFomh9JpH_sbqXIaifqOy4g06Ab0q3WkNfVzx7H0hGhNlkINf5PF12'
value = "haha"
response = requests.put( baseUrl + '/key-value-store/' + tooLongKey, json={'value': value})
responseInJson = response.json()
self.assertEqual(response.status_code, 400)
self.assertEqual(responseInJson['message'], 'Error in PUT')
self.assertEqual(responseInJson['error'], 'Key is too long')
def test_j_put_key_with_no_value(self):
response = requests.put(baseUrl + '/key-value-store/subject1', json={})
responseInJson = response.json()
self.assertEqual(response.status_code, 400)
self.assertEqual(responseInJson['message'], 'Error in PUT')
self.assertEqual(responseInJson['error'], 'Value is missing')
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
ebasaldu.noreply@github.com
|
c96dffd7ff8bf676991e5042380792fef8003cfb
|
44b968d41a0b18cc23be9f74fd1783208422d548
|
/backend/env/bin/sqlformat
|
0c9977ed4ca2512d976e9e9e0071f5b95d320168
|
[] |
no_license
|
cbovino/CrossfitCMS
|
2c42b50122f7cbce639e4204851f86d9926730e2
|
5488faa5d079643f8e0b342a9c42e1b7a3c5e377
|
refs/heads/master
| 2023-01-08T14:34:03.105586
| 2020-02-27T19:27:42
| 2020-02-27T19:27:42
| 218,548,075
| 0
| 0
| null | 2023-01-05T08:30:21
| 2019-10-30T14:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
#!/Users/connorbovino/djreact/backend/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"connor.bovino@gmail.com"
] |
connor.bovino@gmail.com
|
|
ae71635fa891932e6bb80ee24c7fe03119cda96c
|
3d17d0da384c12b9232d4acd3174305e27dece6e
|
/analyse.py
|
c31b18cf0df2beac53ab9b39dac6a245121e8a0c
|
[] |
no_license
|
takuto-mikada/openpose-golf
|
7a794b20998e9c987635c30db9158019829a74ae
|
362130fb2c99c2aa8ef374e4bee140cb87c59804
|
refs/heads/main
| 2023-02-11T04:23:18.141440
| 2020-12-16T13:23:03
| 2020-12-16T13:23:03
| 321,701,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,090
|
py
|
import json
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
import os
nameList = ["hideki_01", "hideki_02",
"mcllroy_01", "mcllroy_02", "mcllroy_03", "mcllroy_04",
"rose_01", "rose_02",
"spieth_01", "spieth_02", "spieth_03",
"thomas_02",
"tiger_03"
]
nameList = ["hideki_01", "hideki_02",
"mcllroy_01","mcllroy_03", "mcllroy_04",
"spieth_02", "spieth_03",
"thomas_02",
"tiger_03"
]
for filename in nameList:
print(filename)
codec = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter("video/est/%s.mp4"%(filename), codec, 30, (1920, 1080))
DATA = json.load(open("data/analyse/%s.json"%(filename), 'r'))
cap = cv2.VideoCapture("video/%s.mp4"%(filename))
if not os.path.exists("images/analyse/%s"%(filename)):
os.mkdir("images/analyse/%s"%(filename))
time = []
angleRow = []
changePhase = []
phase = 0
changePhase.append(0)
center = (0, 0)
for i in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
ret, img = cap.read()
oriImg = img
if "1" in DATA["%s"%(i)].keys() and "8" in DATA["%s"%(i)].keys() and "11" in DATA["%s"%(i)].keys():
cx1 = int((DATA["%s"%(i)]["8"][0] + DATA["%s"%(i)]["11"][0])/2)
cy1 = int((DATA["%s"%(i)]["8"][1] + DATA["%s"%(i)]["11"][1])/2)
cx2 = int((DATA["%s"%(i)]["1"][0] + cx1)/2)
cy2 = int((DATA["%s"%(i)]["1"][1] + cy1)/2)
center = (cx2, cy2)
if center[0]!=0 and center[0]!=0:
cv2.circle(img, (cx2, cy2), 10, (0, 255, 0), thickness=-1)
if "4" in DATA["%s"%(i)].keys():
rist = (int(DATA["%s"%(i)]["4"][0]), int(DATA["%s"%(i)]["4"][1]))
cv2.circle(img, rist, 10, (0, 0, 255), thickness=-1)
if "7" in DATA["%s"%(i)].keys():
rist = (int(DATA["%s"%(i)]["7"][0]), int(DATA["%s"%(i)]["7"][1]))
cv2.circle(img, rist, 10, (255, 0, 0), thickness=-1)
if "4" in DATA["%s"%(i)].keys() and "7" in DATA["%s"%(i)].keys():
rist = (int((DATA["%s"%(i)]["4"][0] + DATA["%s"%(i)]["7"][0])/2), int((DATA["%s"%(i)]["4"][1] + DATA["%s"%(i)]["7"][1])/2))
cv2.circle(img, rist, 10, (0, 255, 0), thickness=-1)
cv2.line(img, rist, (cx2, cy2), (0, 255, 0), thickness=5, lineType=cv2.LINE_4)
video.write(img)
angle = (rist[0] - cx2, -(rist[1] - cy2))
angle = math.atan2(angle[1], angle[0]) * 180 / math.pi
time.append(i)
angleRow.append(angle)
with open("data/analyse_result/%s.json"%(filename), 'w') as f:
json.dump(angleRow, f, indent=4)
cv2.imwrite("images/analyse/%s/%04d.jpg"%(filename, i), img)
# img = cv2.resize(img, (1280, 640))
# cv2.imshow("A", img)
# cv2.waitKey(30)
if phase==0 and angle<-150:
phase = 1
changePhase.append(i)
elif phase==1 and 0<=angle and angle<150:
phase = 2
changePhase.append(i)
elif phase==2 and 150<angle:
phase = 3
changePhase.append(i)
elif phase==3 and -150<angle and angle<=0:
phase = 4
changePhase.append(i)
video.release()
changePhase.append(i)
print(changePhase)
plt.plot(time, angleRow)
plt.savefig("data/analyse_result/%s.png"%(filename))
# plt.show()
plt.gca().clear()
# top pose detection
topPhase = {}
for i in range(changePhase[2], changePhase[3]):
topPhase.update({i : angleRow[i]})
score = sorted(topPhase.items(), key=lambda x:x[1])
print("top : ", score[0][0])
diff = []
preAngle = 0
for i in range(len(angleRow)):
angle = angleRow[i]
diff.append(angle - preAngle)
preAngle = angle
# plt.plot(diff)
# plt.ylim(-20, 20)
# plt.show()
# plt.gca().clear()
# plt.savefig("diff.png")
addressPhase = []
for i in range(changePhase[0], changePhase[1]):
if np.absolute(diff[i])<10:
addressPhase.append(i)
print("address : ", addressPhase[int(len(addressPhase)/2)])
finishPhase = []
for i in range(changePhase[4], changePhase[-1]):
if np.absolute(diff[i])<10 and angleRow[i]>90:
finishPhase.append(i)
print("finish : ", finishPhase[int(len(finishPhase)/2)])
cap = cv2.VideoCapture("video/%s.mp4"%(filename))
for i in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
ret, img = cap.read()
if i==score[0][0]:
cv2.imwrite("images/analyse/%s_top.jpg"%(filename), img)
elif i==addressPhase[int(len(addressPhase)/2)]:
cv2.imwrite("images/analyse/%s_address.jpg"%(filename), img)
elif i==finishPhase[int(len(finishPhase)/2)]:
cv2.imwrite("images/analyse/%s_finish.jpg"%(filename), img)
|
[
"tmdu.mikada@gmail.com"
] |
tmdu.mikada@gmail.com
|
67cf38dc2257b719682391c2d54996982c9b8db1
|
c7faef6d4c0a965032a37953f34373816ce092c1
|
/Products/MetaPublisher2/library/compatibility/historical.py
|
f458599fd708f8b30dc7109289df426da19247b0
|
[
"ZPL-2.1"
] |
permissive
|
sfluehnsdorf/MetaPublisher2
|
2d52a6baf1822a94ba12f66c86760953dd1cead4
|
4688baa9182919a8f8da8a0afbd68997e4453708
|
refs/heads/master
| 2019-01-01T23:22:04.677296
| 2013-06-23T12:03:46
| 2013-06-23T12:03:46
| null | 0
| 0
| null | null | null | null |
ISO-8859-3
|
Python
| false
| false
| 37,321
|
py
|
# -*- coding: iso-8859-15 -*-
# ============================================================================
#
# M e t a P u b l i s h e r 2
#
# ----------------------------------------------------------------------------
# Copyright (c) 2002-2013, Sebastian Lühnsdorf - Web-Solutions and others
# For more information see the README.txt file or visit www.metapulisher.org
# ----------------------------------------------------------------------------
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL).
#
# A copy of the ZPL should accompany this distribution.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
# ============================================================================
__doc__ = """Historical Compatability
To ensure to continued operation of deprecated resources, this module provides
wrappers and handlers for these outdated resources. It also provides an API for
logging calls to deprecated resources.
$Id: library/compatibility/historical.py 11 2013-05-09 22:54:03Z sfluehnsdorf $
"""
__version__ = '$Revision: 2.3 $'[11:-2]
# ============================================================================
# Module Imports
from Products.MetaPublisher2.bases.field.legacyfield import LegacyFieldPlugin as FieldPlugin
from Products.MetaPublisher2.bases.frontend.legacyfrontend import LegacyFrontendPlugin as InterfacePlugin
from Products.MetaPublisher2.bases.widget.legacywidget import LegacyWidgetPlugin as WidgetPlugin
from Products.MetaPublisher2.bases.storage.legacystorage import LegacyStoragePlugin as StoragePlugin
from Products.MetaPublisher2.bases.plugin.legacyplugin import all_plugintypes
from Products.MetaPublisher2.library.application import permission_access_configuration, permission_access_entries, permission_change_configuration, permission_change_entries, permission_create_entries, permission_manage, permission_manage_designs, permission_manage_frontends, permission_manage_presets, permission_publish_frontends, permission_save_presets, permission_zmi
from Products.MetaPublisher2.library.common import ClassSecurityInfo, InitializeClass, Folder, OrderedFolder, true, false
from Products.MetaPublisher2.library.compatibility.deprecation import deprecated_form, deprecated_method
# ============================================================================
# Module Exports
__all__ = [
'HistoricalCompatibility',
'InterfacesFolder',
'standard_form_footer',
'standard_form_header',
'TestError',
]
# ============================================================================
# Legacy Field Value Test Exception
class TestError(Exception):
"""!TXT! Legacy Field Value Test Error"""
def __init__(self, **args):
for key in args.keys():
setattr(self, key, args[key])
# ============================================================================
# Interface Default Layout
standard_form_header = '''\
<dtml-var standard_html_header>
<h2><dtml-var title_or_id></h2>
'''
standard_form_footer = '''\
<p align="center">
<a target="_blank" href="http://metapublisher.org">
<img src="<dtml-var BASEPATH1>/misc_/MetaPublisher2/MP2Powered.gif" border="0" alt="Powered by MetaPublisher2">
</a>
</p>
<dtml-var standard_html_footer>
'''
# ============================================================================
# InterfacesFolder Base Class
class InterfacesFolder(Folder):
"""InterfacesFolder Base Class"""
meta_type = 'InterfacesFolder'
# ----------------------------------------------------------------------------
# Class Security
InitializeClass(InterfacesFolder)
# ============================================================================
# Historical Compatibility Mix-In Class
class HistoricalCompatibility:
"""!TXT! Historical Compatibility Mix-In Class"""
security = ClassSecurityInfo()
# ------------------------------------------------------------------------
# MetaPublisher2
security.declarePublic('zmp2')
def zmp2(self):
"""DEPRECATED: !TXT! Return this instance of MetaPublisher2"""
deprecated_method('zmp2')
return self.get_MetaPublisher2()
security.declarePublic('get_MetaPublisher2_url')
def get_MetaPublisher2_url(self):
"""DEPRECATED: !TXT! Return this instance's absolute url"""
deprecated_method('get_MetaPublisher2_url')
return self.get_MetaPublisher2_url()
# ------------------------------------------------------------------------
# UserInterface
security.declarePublic('manage_zmp2_css')
def manage_zmp2_css(self, REQUEST=None):
"""DEPRECATED: !TXT! MetaPublisher2 CSS"""
deprecated_form('manage_zmp2_css')
return self.manage_MetaPublisher2_css(self, REQUEST)
security.declarePublic('sp')
def sp(self, w=1, h=1, **kw):
"""DEPRECATED: !TXT! Return a tag for a spacer image with specifiable dimensions"""
deprecated_method('sp')
params = ''
for key in kw.keys():
params = params + ' %s="%s"' % (key, kw[key])
tag = '<img src="p_/sp" width="%s" height="%s" border="0" alt=""%s/>'
return tag % (self.REQUEST.BASEPATH1, w, h, params)
# ------------------------------------------------------------------------
# Configuration Constraints
# ------------------------------------------------------------------------
# Configuration Fields
security.declareProtected(permission_access_configuration, 'manage_fieldsBrowserForm')
def manage_fieldsBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_fieldsBrowserForm')
return self.fields_form(self, REQUEST)
security.declareProtected(permission_change_configuration, 'manage_fieldsNewForm')
def manage_fieldsNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_fieldsNewForm')
return self.add_field_form(self, REQUEST)
security.declareProtected(permission_access_configuration, 'getField')
def getField(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('getField')
return self.get_field(self, storageId, fieldId)
security.declareProtected(permission_access_configuration, 'fieldIds')
def fieldIds(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldIds')
return self.field_ids(storageId)
security.declareProtected(permission_access_configuration, 'fieldItems')
def fieldItems(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldItems')
return self.field_items(storageId)
security.declareProtected(permission_access_configuration, 'fieldValues')
def fieldValues(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldValues')
return self.field_values(storageId)
security.declareProtected(permission_change_configuration, 'manage_fieldsNew')
def manage_fieldsNew(self, storageId, fieldType, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldsNew')
return self.add_field(storageId, fieldType, REQUEST)
security.declareProtected(permission_change_configuration, 'delField')
def delField(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('delField')
return self.delete_field(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsDelete')
def manage_fieldsDelete(self, storageId, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldsDelete')
return self.delete_fields(storageId, ids, REQUEST)
security.declareProtected(permission_change_configuration, 'delFields')
def delFields(self, storageId, fieldIds=[]):
"""DEPRECATED: !TXT!"""
deprecated_method('delFields')
return self.delete_fields(storageId, fieldIds)
security.declareProtected(permission_change_configuration, 'renameField')
def renameField(self, storageId, fieldId, newId):
"""DEPRECATED: !TXT!"""
deprecated_method('renameField')
return self.rename_field(storageId, fieldId, newId)
security.declareProtected(permission_access_configuration, 'fieldsSortable')
def fieldsSortable(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldsSortable')
return self.are_fields_sortable(storageId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveTop')
def manage_fieldsMoveTop(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveTop')
return self.move_fields_to_top(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldTop')
def moveFieldTop(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldTop')
return self.move_fields_to_top(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveUp')
def manage_fieldsMoveUp(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveUp')
return self.move_fields_up(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldUp')
def moveFieldUp(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldUp')
return self.move_fields_up(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveDown')
def manage_fieldsMoveDown(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveDown')
return self.move_fields_down(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldDown')
def moveFieldDown(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldDown')
return self.move_fields_down(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveBottom')
def manage_fieldsMoveBottom(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveBottom')
return self.move_fields_to_bottom(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldBottom')
def moveFieldBottom(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldBottom')
return self.move_fields_to_bottom(storageId, fieldId)
# ------------------------------------------------------------------------
# Configuration Identifiers
security.declareProtected(permission_access_configuration, 'newEntryId')
def newEntryId(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('newEntryId')
return self.new_entry_id(storageId)
# ------------------------------------------------------------------------
# Configuration Indexing
security.declareProtected(permission_access_configuration, 'manage_storagesIndexForm')
def manage_storagesIndexForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Indexing Form"""
deprecated_form('manage_storagesIndexForm')
return self.indexing_form(self, REQUEST)
# ------------------------------------------------------------------------
# Configuration Inheritance
# ------------------------------------------------------------------------
# Configuration Relations
# ------------------------------------------------------------------------
# Configuration Storages
security.declareProtected(permission_change_configuration, 'manage_storagesBrowserForm')
def manage_storagesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Storages Form"""
deprecated_form('manage_storagesBrowserForm')
return self.storages_form(self, REQUEST)
security.declareProtected(permission_change_configuration, 'manage_storagesNewForm')
def manage_storagesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! New Storage Form"""
deprecated_form('manage_storagesNewForm')
return self.add_storage_form(self, REQUEST)
security.declareProtected(permission_access_configuration, 'getStorage')
def getStorage(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('getStorage')
return self.get_storage(storageId)
security.declareProtected(permission_access_configuration, 'storageIds')
def storageIds(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageIds')
return self.storage_ids()
security.declareProtected(permission_access_configuration, 'storageItems')
def storageItems(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageItems')
return self.storage_items()
security.declareProtected(permission_access_configuration, 'storageValues')
def storageValues(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageValues')
return self.storage_values()
security.declareProtected(permission_change_configuration, 'manage_storagesNew')
def manage_storagesNew(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_storagesNew')
return self.add_storage(REQUEST)
security.declareProtected(permission_change_configuration, 'manage_storagesDelete')
def manage_storagesDelete(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_storagesDelete')
return self.delete_storages(ids, REQUEST)
# ------------------------------------------------------------------------
# Configuration Triggers
# ------------------------------------------------------------------------
# Data Entries
security.declareProtected(permission_access_entries, 'manage_entriesBrowserForm')
def manage_entriesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! management form for entries"""
deprecated_form('manage_entriesBrowserForm')
return self.entries_form(self, REQUEST)
security.declareProtected(permission_create_entries, 'manage_entriesNewForm')
def manage_entriesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! management form for adding entries"""
deprecated_form('manage_entriesNewForm')
return self.entries_form(self, REQUEST)
security.declareProtected(permission_create_entries, 'manage_entriesNew')
def manage_entriesNew(self, REQUEST=None):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('manage_entriesNew')
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST.get('entryId', None))
entry_id = self.add_entry(source, entry_id, REQUEST.form)
self.redirect(
REQUEST,
'entries_form',
message='!TXT! Entry "%s" in Storage "%s" added.' % (entry_id, source)
)
security.declareProtected(permission_create_entries, 'manage_entriesNewMore')
def manage_entriesNewMore(self, REQUEST=None):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('manage_entriesNewMore')
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST.get('entryId', None))
entry_id = self.add_entry(source, entry_id, REQUEST.form)
self.redirect(
REQUEST,
'add_entry_form',
message='!TXT! Entry "%s" in Storage "%s" added.' % (entry_id, source)
)
security.declareProtected(permission_change_entries, 'manage_entriesEdit')
def manage_entriesEdit(self, REQUEST=None):
"""DEPRECATED: !TXT! edit entry in storage"""
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST['entryId'])
self.edit_entry(source, entry_id, REQUEST.form, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesDelete')
def manage_entriesDelete(self, storageId, ids=[], REQUEST=None):
"""DEPRECATED: !TXT! delete entries from storage"""
deprecated_method('manage_entriesDelete')
return self.delete_entries(storageId, ids, REQUEST)
security.declareProtected(permission_access_entries, 'entryIds')
def entryIds(self, storageId):
"""DEPRECATED: !TXT! retrieve list of ids of entries in storage"""
deprecated_method('entryIds')
return self.entry_ids(storageId)
security.declareProtected(permission_access_entries, 'entryItems')
def entryItems(self, storageId):
"""DEPRECATED: !TXT! retrieve list of id, value tuples of entries in storage"""
deprecated_method('entryItems')
return self.entry_items(storageId)
security.declareProtected(permission_access_entries, 'entryValues')
def entryValues(self, storageId):
"""DEPRECATED: !TXT! retrieve list of values of entries in storage"""
deprecated_method('entryValues')
return self.entry_values(storageId)
security.declareProtected(permission_access_entries, 'getEntry')
def getEntry(self, storageId, entryId):
"""DEPRECATED: !TXT! retrieve entry from storage"""
deprecated_method('getEntry')
return self.get_entry(self, storageId, entryId)
security.declareProtected(permission_create_entries, 'addEntry')
def addEntry(self, storageId, entryId, entryData={}, **args):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('addEntry')
entryData.update(args)
return self.add_entry(self, storageId, entryId, entryData)
security.declareProtected(permission_change_entries, 'delEntries')
def delEntries(self, storageId, entryIds=[]):
"""DEPRECATED: !TXT! delete entries from storage"""
deprecated_method('delEntries')
return self.delete_entries(storageId, entryIds)
security.declareProtected(permission_change_entries, 'delEntry')
def delEntry(self, storageId, entryId):
"""DEPRECATED: !TXT! delete entry from storage"""
deprecated_method('delEntry')
return self.del_entry(storageId, entryId)
security.declareProtected(permission_change_entries, 'editEntry')
def editEntry(self, storageId, entryId, entryData={}, **args):
"""DEPRECATED: !TXT! edit entry in storage"""
deprecated_method('editEntry')
entryData.update(args)
return self.edit_entry(storageId, entryId, entryData)
security.declareProtected(permission_change_entries, 'renameEntry')
def renameEntry(self, entryId, newId):
"""DEPRECATED: !TXT! rename entry in storage"""
deprecated_method('renameEntry')
return self.rename_entry(entryId, newId)
security.declareProtected(permission_change_entries, 'manage_entriesMoveBottom')
def manage_entriesMoveBottom(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry to bottom"""
deprecated_method('manage_entriesMoveBottom')
return self.move_entry_to_bottom(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveDown')
def manage_entriesMoveDown(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry down"""
deprecated_method('manage_entriesMoveDown')
return self.move_entry_down(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveTop')
def manage_entriesMoveTop(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry to top"""
deprecated_method('manage_entriesMoveTop')
return self.move_entry_to_top(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveToPosition')
def manage_entriesMoveToPosition(self, storageId, entryId, position, REQUEST=None):
"""DEPRECATED: !TXT! move entry to position"""
deprecated_method('manage_entriesMoveToPosition')
return self.move_entry(storageId, entryId, position, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveUp')
def manage_entriesMoveUp(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry up"""
deprecated_method('manage_entriesMoveUp')
return self.move_entry_up(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'moveEntryBottom')
def moveEntryBottom(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry to bottom"""
deprecated_method('moveEntryBottom')
return self.move_entry_to_bottom(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryDown')
def moveEntryDown(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry down"""
deprecated_method('moveEntryDown')
return self.move_entry_down(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryTop')
def moveEntryTop(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry to top"""
deprecated_method('moveEntryTop')
return self.move_entry_to_top(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryToPosition')
def moveEntryToPosition(self, storageId, entryId, position):
"""DEPRECATED: !TXT! move entry to position"""
deprecated_method('moveEntryToPosition')
return self.move_entry(storageId, entryId, position)
security.declareProtected(permission_change_entries, 'moveEntryUp')
def moveEntryUp(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry up"""
deprecated_method('moveEntryUp')
return self.move_entry_up(storageId, entryId)
security.declareProtected(permission_access_entries, 'getEntryPosition')
def getEntryPosition(self, storageId, entryId):
"""DEPRECATED: !TXT! retrieve position of entry"""
deprecated_method('getEntryPosition')
return self.get_entry_position(storageId, entryId)
security.declareProtected(permission_access_entries, 'getEntryField')
def getEntryField(self, storageId, entryId, fieldId, default=None):
"""DEPRECTAED: !TXT! Get the value of an Entry's Field."""
deprecated_method('getEntryField')
self.get_entry_field(storageId, entryId, fieldId, default)
security.declareProtected(permission_change_entries, 'setEntryField')
def setEntryField(self, storageId, entryId, fieldId, value):
"""DEPRECTAED: !TXT! Set the value of an Entry's Field."""
deprecated_method('setEntryField')
self.set_entry_field(storageId, entryId, fieldId, value)
# ------------------------------------------------------------------------
# Data Exports
# ------------------------------------------------------------------------
# Data Expressions
# ------------------------------------------------------------------------
# Data Imports
# ------------------------------------------------------------------------
# Data Queries
security.declareProtected(permission_access_entries, 'manage_entriesQueriesForm')
def manage_entriesQueriesForm(self, REQUEST=None):
"""DEPRECTAED: !TXT!"""
return self.queries_form(self, REQUEST)
# ------------------------------------------------------------------------
# Data Reports
# ------------------------------------------------------------------------
# Data Search
# ------------------------------------------------------------------------
# Data Transfer
# ------------------------------------------------------------------------
# Publisher Audit
# ------------------------------------------------------------------------
# Publisher Caching
security.declareProtected(permission_manage_designs, 'manage_storagesCacheForm')
def manage_storagesCacheForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_storagesCacheForm')
return self.caching_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Designs
security.declareProtected(permission_manage_designs, 'manage_interfacesStylesForm')
def manage_interfacesStylesForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_interfacesStylesForm')
return self.designs_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Frontends
security.declareProtected(permission_manage_frontends, 'manage_interfacesDelete')
def manage_interfacesDelete(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT! Delete specified Frontends"""
deprecated_method('manage_interfacesDelete')
return self.del_frontends(ids, REQUEST)
security.declareProtected(permission_manage_frontends, 'interfaceValues')
def interfaceValues(self):
"""DEPRECATED: !TXT! Return values of Frontends"""
deprecated_method('interfaceValues')
return self.frontend_values()
security.declareProtected(permission_manage_frontends, 'interfaceItems')
def interfaceItems(self):
"""DEPRECATED: !TXT! Return tuples of id, value of Frontends"""
deprecated_method('interfaceItems')
return self.frontend_items()
security.declareProtected(permission_manage_frontends, 'interfaceIds')
def interfaceIds(self):
"""DEPRECATED: !TXT! Return ids of Frontends"""
deprecated_method('interfaceIds')
return self.frontend_paths()
security.declareProtected(permission_manage_frontends, 'getInterface')
def getInterface(self, interfaceId):
"""DEPRECATED: !TXT! Return the specified Frontend"""
deprecated_method('getInterface')
return self.get_frontend(interfaceId)
security.declareProtected(permission_manage_frontends, 'getInterfacePaths')
def getInterfacePaths(self):
"""DEPRECATED: !TXT! Return all Frontend object paths recursively"""
deprecated_method('getInterfacePaths')
return self.get_frontend_parents()
security.declareProtected(permission_manage_frontends, 'manage_interfacesBrowserForm')
def manage_interfacesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Frontends Form"""
deprecated_form('manage_interfacesStylesForm')
return self.frontends_form(self, REQUEST)
security.declareProtected(permission_manage_frontends, 'manage_interfacesNewForm')
def manage_interfacesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Add Frontend Form"""
deprecated_form('manage_interfacesNewForm')
return self.add_frontend_form(self, REQUEST)
security.declareProtected(permission_manage_frontends, 'manage_interfacesNew')
def manage_interfacesNew(self, REQUEST=None):
"""DEPRECATED: !TXT! Call specified Frontend's factory"""
deprecated_method('manage_interfacesNew')
return self.add_frontend_type(REQUEST)
# ------------------------------------------------------------------------
# Publisher Languages
# ------------------------------------------------------------------------
# Publisher Renderer
security.declareProtected(permission_publish_frontends, 'manage_interfacesRender')
def manage_interfacesRender(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_interfacesRender')
self.render_frontends(ids)
security.declareProtected(permission_publish_frontends, 'manage_interfacesRenderForm')
def manage_interfacesRenderForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Render Frontends Form"""
deprecated_form('manage_interfacesRenderForm')
return self.renderer_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Widgets
security.declareProtected(permission_manage_frontends, 'getWidgetsForField')
def getWidgetsForField(self, formTypeId, fieldTypeId):
"""DEPRECATED: !TXT!"""
deprecated_method('getWidgetsForField')
return self.get_widgets_for_field(formTypeId, fieldTypeId)
# ------------------------------------------------------------------------
# Service Assistant
security.declareProtected(permission_manage, 'manage_aboutAssistantForm')
def manage_aboutAssistantForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Assistant Form"""
deprecated_form('manage_aboutAssistantForm')
return self.assistant_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Community
# ------------------------------------------------------------------------
# Service Feedback
security.declareProtected(permission_zmi, 'manage_aboutFeedbackForm')
def manage_aboutFeedbackForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Feedback Form"""
deprecated_form('manage_aboutFeedbackForm')
return self.feedback_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Help
# ------------------------------------------------------------------------
# Service Manual
security.declareProtected(permission_zmi, 'manage_aboutManualForm')
def manage_aboutManualForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Manual Form"""
deprecated_form('manage_aboutManualForm')
return self.manual_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Reference
# ------------------------------------------------------------------------
# Service Release
security.declareProtected(permission_zmi, 'manage_aboutReleaseForm')
def manage_aboutReleaseForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Release Form"""
deprecated_form('manage_aboutReleaseForm')
return self.release_form(self, REQUEST)
security.declareProtected(permission_zmi, 'manage_aboutVersion')
def manage_aboutVersion(self):
"""DEPRECATED: !TXT! Return the contents of the VERSION.txt file"""
deprecated_method('manage_aboutVersion')
return self.read_release_version_file()
security.declareProtected(permission_zmi, 'manage_aboutReadMe')
def manage_aboutReadMe(self):
"""DEPRECATED: !TXT! Return the contents of the README.txt file"""
deprecated_method('manage_aboutReadMe')
return self.read_release_readme_file()
security.declareProtected(permission_zmi, 'manage_aboutLicense')
def manage_aboutLicense(self):
"""DEPRECATED: !TXT! Return the contents of the LICENSE.txt file"""
deprecated_method('manage_aboutLicense')
return self.read_release_license_file()
# ------------------------------------------------------------------------
# System Events
# ------------------------------------------------------------------------
# System Integrity
security.declareProtected(permission_manage, 'manage_systemIntegrityForm')
def manage_systemIntegrityForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Integrity Form"""
deprecated_form('manage_systemIntegrityForm')
return self.integrity_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Plugins
security.declareProtected(permission_manage, 'manage_systemPluginsForm')
def manage_systemPluginsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Plugins Form"""
deprecated_form('manage_systemPluginsForm')
return self.plugins_form(self, REQUEST)
security.declareProtected(permission_manage, 'pluginIds')
def pluginIds(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return ids of installed MetaPublisher2 plugins"""
deprecated_method('pluginIds')
return map(lambda item: item[0], self.pluginItems(pluginTypes))
security.declareProtected(permission_manage, 'pluginItems')
def pluginItems(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return tuples of id, value of installed MetaPublisher2 plugins"""
deprecated_method('pluginItems')
result = []
if isinstance(pluginTypes, str):
pluginTypes = [pluginTypes, ]
elif len(pluginTypes) == 0:
pluginTypes = validPluginTypes
for plugin_type in pluginTypes:
for id, plugin in self.plugin_items():
if plugin.get('visibility', None) in pluginTypes:
result.append((id, plugin))
return result
security.declareProtected(permission_manage, 'pluginValues')
def pluginValues(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return values of installed MetaPublisher2 plugins"""
deprecated_method('pluginValues')
return map(lambda item: item[1], self.pluginItems(pluginTypes))
security.declareProtected(permission_manage, 'getPlugin')
def getPlugin(self, pluginType):
"""DEPRECATED: !TXT! Return the specified MetaPublisher2 plugin"""
deprecated_method('getPlugin')
return self.get_plugin(pluginType)
# ------------------------------------------------------------------------
# System Presets
security.declareProtected(permission_manage_presets, 'manage_presetsBrowserForm')
def manage_presetsBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Presets Form"""
deprecated_form('manage_presetsBrowserForm')
return self.presets_form(self, REQUEST)
security.declareProtected(permission_save_presets, 'manage_presetsNewForm')
def manage_presetsNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Add Preset Form"""
deprecated_form('manage_presetsNewForm')
return self.save_preset_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Profiles
security.declareProtected(permission_manage, 'manage_systemProfilesForm')
def manage_systemProfilesForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Profiles Form"""
deprecated_form('manage_systemProfilesForm')
return self.profiles_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Settings
security.declareProtected(permission_manage, 'manage_systemSettingsForm')
def manage_systemSettingsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Settings Form"""
deprecated_form('manage_systemSettingsForm')
return self.settings_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Tools
security.declareProtected(permission_manage, 'manage_systemToolsForm')
def manage_systemToolsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Tools Form"""
deprecated_form('manage_systemToolsForm')
return self.tools_form(self, REQUEST)
# ----------------------------------------------------------------------------
# Class Security
InitializeClass(HistoricalCompatibility)
# !!! historical.py - handle request vars (entryId instead of entry_id, etc.)
|
[
"sfl@luehnsdorf.com"
] |
sfl@luehnsdorf.com
|
3d8932d7ae7a374fb4a9079e524d480316a6f5d4
|
817ff801938d25776b2564b3087c8a3c674da1a7
|
/NUP153_AnalyseComplex/WT_Minimization/chainBCP/WT_chainBCP_Minimization_6.py
|
93b4c101aa994d29251e3c4bc96938fd94e7bad9
|
[] |
no_license
|
yanghaobojordan/HIV1-Capsid
|
b22e21a9ad530ae11f128f409e298c5ab68871ee
|
f44f04dc9886e660c1fe870936c48e0e5bb5adc6
|
refs/heads/main
| 2023-04-09T01:27:26.626676
| 2021-04-23T18:17:07
| 2021-04-23T18:17:07
| 360,968,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'WT_chainBCP_Minimization_6.pdb'
txtfile = 'WT_chainBCP_Minimization_6.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Repacking Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
for i in range(10000):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Minimization Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD')
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
|
[
"yanghaobojordan@gmail.com"
] |
yanghaobojordan@gmail.com
|
9f4a4daa608a920aafce684a30429bf510d9d867
|
b381b5ce79ec03e281cba7e6ea253b286205fba1
|
/openstack/map_reduce/v1/job.py
|
2ab8baab2c33c86bccb1719887437cb769f0ca27
|
[
"Apache-2.0"
] |
permissive
|
sun363587351/python-openstacksdk
|
972eedc24199c3b8a15bd21accd29a6ec70febdb
|
f9e055300b1c79637d7b6a791168427f27322d73
|
refs/heads/master
| 2020-12-02T19:23:01.771376
| 2017-07-05T13:24:47
| 2017-07-05T13:24:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2 as resource
from openstack.auto_scaling import auto_scaling_service
from openstack.auto_scaling.v1 import get_next_marker
from openstack.map_reduce import map_reduce_service
class ExecutableJob(resource.Resource):
"""Executable Job
The executable job indicates for a job and job-execution(I do not know why
the attributes is so different with job and job-execution...)
"""
#: Properties
#: Job name
job_name = resource.Body("job_name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
job_type = resource.Body("job_type")
#: Path of the .jar package or .sql file for job execution
jar_path = resource.Body("jar_path")
#: Key parameter for job execution
arguments = resource.Body("arguments")
#: Path for inputting data which must start with ``/`` or ``s3a://``
input = resource.Body("input")
#: Path for outputting data, which must start with / or s3a://
output = resource.Body("output")
#: Path for storing job logs that record job running status.
#: This path must start with / or s3a://
job_log = resource.Body("job_log")
#: Whether to delete the cluster after the jobs are complete
shutdown_cluster = resource.Body("shutdown_cluster")
#: Data import and export
file_action = resource.Body("file_action")
#: whether to submit the job when the cluster is ready.
submit_job_once_cluster_run = resource.Body(
"submit_job_once_cluster_run", type=bool)
#: HiveQL statement
hql = resource.Body("hql")
#: SQL program path
hive_script_path = resource.Body("hive_script_path")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
class Job(resource.Resource):
resource_key = "job"
resources_key = "jobs"
base_path = "/jobs"
service = map_reduce_service.MapReduceService()
# capabilities
allow_create = True
allow_update = True
patch_update = True
allow_list = True
allow_get = True
allow_delete = True
_query_mapping = resource.QueryParameters(
"sort_by"
)
#: Properties
#: Job name
name = resource.Body("name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
type = resource.Body("type")
#: A list of programs to be executed by the job
mains = resource.Body("mains", type=list)
#: A list of job-binaries required by the job
libs = resource.Body("libs", type=list)
#: Reserved attribute, user customer interfaces
interface = resource.Body("interface", type=list)
#: Job description
description = resource.Body("description")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
#: UTC date and time of the job created time
created_at = resource.Body("created_at")
#: UTC date and time of the job last updated time
updated_at = resource.Body("updated_at")
#: The tenant this job belongs to
tenant_id = resource.Body("tenant_id")
|
[
"iampurse@vip.qq.com"
] |
iampurse@vip.qq.com
|
dbbb78de7586a3fd69f564f7384cb29ca7f56999
|
5793b470eea39ba99ff4d16325d462440647b77d
|
/System/Threads/thread-count.py
|
e0c36329cec628396bc4ee866d2c6f9daca41c6c
|
[] |
no_license
|
zhongjiezheng/python
|
01a99438fc4681817d4d0e623673afa1e488864c
|
5c5725ad0e75d07e016b64d79eddf3d88a524fa0
|
refs/heads/master
| 2021-01-17T11:57:12.373343
| 2015-07-08T09:04:48
| 2015-07-08T09:04:48
| 35,549,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
"""
thread basics: start 5 copies of a function running in parallel;
uses time.sleep so that the main thread doesn't die too earlly--
this kills all other threads on some platforms; stdout is shared:
thread output may be intermixd in this version arbitrarily.
"""
import _thread as thread, time
def counter(myId, count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (myId, i))
for i in range(5):
thread.start_new_thread(counter,(i, 5))
time.sleep(6)
print('Main thread exiting.')
|
[
"zzjie1991@gmail.com"
] |
zzjie1991@gmail.com
|
3d58e2a7aa91b20b65c15e42b5bcc9ed7e707215
|
04e58ca58d96355e6439d60b3b632b36d82b1855
|
/Framestore/main.py
|
329c156972b969d3c251791f6283428086b900dd
|
[] |
no_license
|
tommyhooper/legacy_python_utils
|
325f2b7590e30ec2248f9d4159fb4e824307e88e
|
a8734fa74171d928b08cfe783fa0e02ac6185a4d
|
refs/heads/master
| 2020-12-05T13:05:00.809230
| 2020-02-13T21:11:16
| 2020-02-13T21:11:16
| 66,403,853
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,054
|
py
|
#!/usr/bin/env python
import glob
import os
import stat
import sys
import time
import re
import traceback
import Queue
import threading
import commands
from datetime import datetime
from wiretap import Wiretap
from A52.utils import print_array
from A52.utils import diskutil
from A52.utils import numberutil
from A52.utils import fileutil
from A52.dlLibrary import dlLibrary
class FramestoreException(Exception):
pass
class Framestore(object,Wiretap):
"""
Main class to manipulate the Framestore class
"""
FRAMESTORES = { 'conform01':{ 4: {
'name':'SANSTONE08',
'path':'/Volumes/F5412SATA02/conform01/p4'
}
},
'flame01': { 4: {
'name':'SANSTONE01',
'path':'/Volumes/F6500SAS01/flame01/p4'
},
5: {
'name':'SANSTONE10',
'path':'/Volumes/F6500SAS02/flame01/p5'
}
},
'flame02': { 4: {
'name':'SANSTONE02',
'path':'/Volumes/F6412SAS05/flame02/p4'
},
5: {
'name':'SANSTONE09',
'path':'/Volumes/F6500Rental02/flame02/p5'
}
},
'flame03': { 4: {
'name':'SANSTONE03',
'path':'/Volumes/F6412SAS04/flame3/p4'
}
},
'flame04': { 4: {
'name':'SANSTONE04',
'path':'/Volumes/F6412SAS03/flame04/p4'
}
},
'flame06': { 4: {
'name':'SANSTONE11',
'path':'/Volumes/DH3723SAS01/flame06/p4'
}
},
'smack01': { 4: {
'name':'SANSTONE06',
'path':'/Volumes/F5412SATA01/smack01/p4'
}
},
'smoke01': { 4: {
'name':'SANSTONE05',
'path':'/Volumes/F6412SAS02/smoke01/p4'
},
5: {
'name':'SANSTONE07',
'path':'/Volumes/F6500Rental01/smoke01/p5'
}
},
}
def __init__(self,host,partition):
# run the wiretap init
#super(Framestore,self).__init__()
self.host = host
self.partition = partition
self.volume = 'stonefs%d' % partition
self.name = self.FRAMESTORES[host][partition]['name']
self.path = self.FRAMESTORES[host][partition]['path']
self.locks = {}
# stat objects for each project:
self.pstats = {}
self.pstat_totals = {
'frames_self':0,
'frames_shared':0,
'frames_total':0,
'bytes_self':0,
'bytes_shared':0,
'bytes_total':0,
'dsp_bytes_self':'',
'dsp_bytes_shared':'',
'dsp_bytes_total':'',
}
# stat dictionaries for project groups
self.pstat_groups = {}
def __getattr__(self,name):
if name == 'bytes_used':
self.format_df()
return self.bytes_used
if name == 'bytes_free':
self.format_df()
return self.bytes_free
if name == 'bytes_total':
self.format_df()
return self.bytes_total
if name == 'dsp_bytes_used':
self.format_df()
return self.dsp_bytes_used
if name == 'dsp_bytes_free':
self.format_df()
return self.dsp_bytes_free
if name == 'dsp_bytes_total':
self.format_df()
return self.dsp_bytes_total
if name == 'percent_used':
self.format_df()
return self.percent_used
if name == 'fraction_used':
self.format_df()
return self.fraction_used
return super(Framestore,self).__getattr__(name)
#message = "'%s' has no attribute %s" % (__name__,name)
#raise AttributeError,message
def scan_libraries(self):
"""
Scan libraries and sync them
to the database.
"""
# build a list of libraries
# found on the filesystem
fs_list = []
host = self.host
volume = self.volume
proj_home = '/hosts/%s/usr/discreet/clip/%s' % (host,volume)
projects = glob.glob('%s/*' % proj_home)
for project in projects:
project_name = os.path.basename(project)
# scan for libraries
libs = glob.glob('%s/*.000.clib' % (project))
for lib in libs:
library_name = os.path.basename(lib)
mod_date = fileutil.mod_date(lib)
fs_list.append((project_name,library_name,mod_date))
# build an array of libraries
# found in the db
db_array = dlLibrary.get_libraries(host=host,volume=volume)
# reset the refcounts
dlLibrary.reset_refcounts(host,volume)
# insert / update db records
for project,lib,mod_date in fs_list:
if not dlLibrary.is_excluded(lib):
try:
uid = db_array[project][lib]['uid']
dlLibrary.update_mod_date(uid,mod_date)
except:
obj = dlLibrary( host=host,
volume=volume,
project=project,
name=lib,
date_modified=mod_date,
refcount=1)
obj.save()
# purge zero refcount libraries
dlLibrary.purge_refcounts(host,volume)
def get_libraries(self,dl_project_name,source='wiretap'):
"""
Get the libraries for 'dl_project' on the framestore
"""
parent = "%s/%s" % (self.volume,dl_project_name)
return self._get_children(node=parent,node_type='LIBRARY')
def get_users(self,source='wiretap'):
"""
Get the users on the framestore
if source is 'db' get the users from the db
"""
editing = self._get_children(node='/%s/users/editing' % self.volume,node_type='USER')
effects = self._get_children(node='/%s/users/effects' % self.volume,node_type='USER')
users = {'editing':{},'effects':{}}
for i in editing:
users['editing'][i] = editing[i]
for i in effects:
users['effects'][i] = effects[i]
return users
def get_projects(self,source='wiretap'):
"""
Get the projects on the framestore
"""
if source == 'wiretap':
return self._get_children(node=self.volume,node_type='PROJECT')
elif source == 'db':
# in order to find the current projects from the db we
# need to use the dl_libraries table since that is
# kept up to date
#return dlProject.find(framestore_uid=self.data['uid'])
return dlLibrary.get_current_projects(host=self.host,volume=self.volume)
def find_project(self,dl_project_name):
"""
Get the projects on the framestore
"""
nodes = self._get_children(node=self.volume,node_type='PROJECT')
if type(nodes) is tuple:
return nodes
projects = []
for i in nodes:
if nodes[i]['name'] == dl_project_name:
return nodes[i]
return None
def find_user(self,category,user):
"""
Find 'user' on the framestore.
Searches under both categories
('effects' and 'editing')
"""
parent = "%s/users/%s" % (self.volume,category)
print "\nSearching for %s on /%s/%s %s\n" % (user,self.host,parent,'USER')
nodes = self._get_children(node=parent,node_type='USER')
if type(nodes) is tuple:
return nodes
for i in nodes:
if nodes[i]['name'] == user:
print "Found user %s" % nodes[i]['name']
return nodes[i]
return None
def create_user(self,user_category,user,xmlstream):
"""
Create a user on the given framestore
"""
# create the user node
parent = "/%s/users/%s" % (self.volume.strip('/'),user_category)
return self._create_node(parent,'USER',user,xmlstream=xmlstream)
def create_project(self,dl_project_name,xmlstream):
"""
Create a project on the given framestore
"""
# create the project node
return self._create_node(self.volume,'PROJECT',dl_project_name,xmlstream=xmlstream)
def create_library(self,dl_project_name,library):
"""
Create a library 'library' for the project 'dl_project_name' on this framestore
"""
# create the project node
parent = "%s/%s" % (self.volume,dl_project_name)
return self._create_node(parent,'LIBRARY',library)
def create_library(self,dl_project_name,library):
"""
Create a library for the given project.
If the library exists nothing new will happen.
"""
parent = "/%s/%s" % (self.volume,dl_project_name)
return self._create_node(parent,'LIBRARY',library)
def df(self,mount_base=None):
"""
Get the disk free for this framestore.
'mount_base' can be specified if the df
is going through a different mount point
such as an automount path.
e.g. /hosts/meta01/Volumes/...
"""
# mount name
if mount_base:
mount_dir = "/%s/Volumes/%s" % (mount_base,self.data['mount_name'])
else:
mount_dir = "/Volumes/%s" % (self.data['mount_name'])
if not os.path.exists(mount_dir):
raise FramestoreException,"Mount path does not exist: %s" % mount_dir
df = diskutil.df(mount_dir)
self.data['bytes_total'] = df['bytes_total']
self.data['bytes_free'] = df['bytes_free']
return df
#@staticmethod
def get_stones(host=None):
"""
Get the stonefs nodes listed by the wiretap server
for the host: 'host'.
If 'host' is None all known hosts will be
listed.
NOTE: not all nodes will be valid.
"""
fs = Framestore()
objs = []
if host:
stones = fs._get_children()
for stone in stones:
cls = Framestore(host=host)
for key in stones[stone]:
if key == 'name':
cls.data['volume'] = stones[stone][key]
objs.append(cls)
return objs
get_stones = staticmethod(get_stones)
def du(self,verbose=False):
self._du_fs(verbose=verbose)
#return self._du_db()
def _du_fs(self,verbose=False):
"""
Estimate the 'total' size of each 'production' project on
the framestore. This involves analyzing every user project
for a specific project in one group.
"""
frame_map = FrameMap(Framestore=self)
dlprojects = self.get_projects()
for i,dlp in dlprojects.iteritems():
if dlp['name'] == '12A142--Visa_100_Jesse':
#print "PROJECT:",dlp['name']
frame_map.add_project(dlp['name'])
# frame_map.add_project(dlp['name'])
start = datetime.today()
frame_map.poll(verbose=verbose)
end = datetime.today()
delta = end-start
#print "DELTA:",delta
def get_clips(self,node):
"""
Get clips out of a library node.
"""
clips = []
children = self._get_children(node)
for x,y in children.iteritems():
if y['type'] == 'REEL':
clips.extend(self.get_clips(y['node']))
elif y['type'] == 'CLIP':
clips.append(y)
return clips
def get_refcount(self):
"""
Analyze the ref counts and see
who has a lock on the framestore.
"""
reflog = '/hosts/%s/usr/discreet/clip/%s/.ref.log' % (self.data['host'],self.data['volume'])
rec = '/hosts/%s/usr/discreet/clip/%s/.ref.rec' % (self.data['host'],self.data['volume'])
locks = {}
f = open(rec)
for line in f.readlines():
host,hexid,pid,utime,count,name = line.split()
date = datetime.fromtimestamp(float(utime))
if not locks.has_key(hexid):
locks[hexid] = { 'name':name,
'host':host,
'count':int(count),
'pid':pid,
'log':{utime:int(count)}
}
else:
locks[hexid]['count']+= int(count)
locks[hexid]['log'][utime] = int(count)
self.locks = locks
return self.locks
def show_locks(self):
"""
Show the current locks on this framestore.
"""
# get or update the current ref count
self.get_refcount()
if self.is_locked():
print "\n%-28s%-16s%12s" % ('Locks on %s/%s' % (self.data['host'],self.data['volume']),'host','PID')
print "-"*56
for hexid,info in self.locks.iteritems():
if info['count'] >=1:
print "%-28s%-16s%12s" % (info['name'],info['host'],str(info['pid']).lstrip('0'))
print "-"*56
else:
print "\nNo locks on %s/%s" % (self.data['host'],self.data['volume'])
def is_locked(self):
"""
Return True if the framestore has an active lock
and False if not.
"""
self.get_refcount()
for hexid,info in self.locks.iteritems():
if info['count'] >=1:
return True
return False
class StatThread(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
frame_obj = self.queue.get()
frame_obj.framestat()
#print " Stat:%s\r" % (frame_obj.path)
self.queue.task_done()
class FrameMap:
"""
A full or partial representation
of the media cache on a Framestore.
"""
def __init__(self,Framestore):
self.Framestore = Framestore
self.frames = {}
self.projects = []
self.tree = {}
self.frame_count_self = 0
self.frame_count_shared = 0
self.bytes_self = 0
self.bytes_shared = 0
self.bytes_total = 0
def __getattr__(self,name):
if name == 'total_frames':
return self.frame_count_self + self.frame_count_shared
if name == 'total_bytes':
return self.bytes_self + self.bytes_shared
if name == 'dsp_bytes':
total_bytes = self.bytes_self + self.bytes_shared
return numberutil.humanize(total_bytes,scale='bytes')
if name == 'dsp_bytes_self':
return numberutil.humanize(self.bytes_self,scale='bytes')
if name == 'dsp_bytes_shared':
return numberutil.humanize(self.bytes_shared,scale='bytes')
message = "'%s' has no attribute %s" % (__name__,name)
raise AttributeError,message
def add_project(self,project):
"""
Add a MappedProject to the FrameMap
which will be polled for frame
counts and sizes.
"""
# see if we already have this project in the map
if len([p for p in self.projects if p.full_project_name == project]) > 0:
return
# add a MappedProject to this FrameMap
obj = MappedProject(project,self.Framestore,self)
self.projects.append(obj)
def poll(self,verbose=False):
"""
Get framecounts from the wiretap server
and get stats on each frame discovered.
"""
for project in self.projects:
project.du(verbose=verbose)
# collect stats for shared vs unique frames
self.calculate_sizes(verbose=verbose)
print "\n\nTotal FrameMap size:",numberutil.humanize(self.bytes_total,scale='bytes')
print "Total Shared FrameMap size:",numberutil.humanize(self.bytes_shared,scale='bytes')
print "Total Self FrameMap size:",numberutil.humanize(self.bytes_self,scale='bytes')
print "\n\n"
if verbose:
for project in self.projects:
print "Project:",project.full_project_name
print " bytes total:",numberutil.humanize(project.bytes_total,scale='bytes')
print " bytes shared:",numberutil.humanize(project.bytes_shared,scale='bytes')
print " bytes self:",numberutil.humanize(project.bytes_self,scale='bytes')
print " frames self:",project.frame_count_self
print " frames shared:",project.frame_count_shared
print " frames total:",len(project.frames)
# store the results in the db
#self.store_results()
def get_frame_stats(self):
"""
Get frame stats from doing a
series of long listings in the
media cache directory
"""
path = self.Framestore.path
print "PATH:",path
# for _dir in glob.glob("%s/*" % path):
# for frame in commands.getoutput('ls -lf %s' % _dir):
# print ">",frame
pass
def get_frame_stats_threaded(self,threads=12):
"""
Get the framestats using the
StatThread to speed things up.
"""
self.queue = Queue.Queue()
# spawn the threads
for i in range(threads):
print "Starting thread",i
t = StatThread(self.queue)
t.setDaemon(True)
t.start()
# get frame stats and calculate
# the master framesize
for frame in self.frames.values():
self.queue.put(frame)
ttl = self.queue.qsize()
while not self.queue.empty():
crnt = ttl - self.queue.qsize()
pct = int(round(crnt/float(ttl)*100))
print " %d of %d [%d%%]\r" % (crnt,ttl,pct),
sys.stdout.flush()
print "Queue is empty"
# self.queue.join()
# i = 1
# count = len(self.frames.values())
# for frame in self.frames.values():
# frame.framestat()
# pct = int(round(i/float(count)*100))
# if verbose:
# print " %d of %d [%d%%]\r" % (i,count,pct),
# sys.stdout.flush()
# self.bytes_total+=frame.st_size
# i+=1
def calculate_sizes(self,verbose=False):
"""
Add up all of the frame sizes
for each project stored in self.projects
separated by shared vs unique.
"""
start = time.time()
print " Getting frame sizes..."
#self.get_frame_stats()
#print "DONE: Elapsed time:",time.time() - start
print "\n Calculating project sizes..."
for frame in self.frames.values():
if frame.refcount > 1:
self.bytes_shared+=frame.st_size
# get the master(s) for the projects
# this frame is referenced to.
# if this frame goes cross-project it
# will have more than one master.
# if there is no master for a project
# then the project is it's own master
masters = []
for project in frame.projects:
master = self.find_master(project)
if master not in masters:
masters.append(master)
for master in masters:
master.frame_count_shared+=1
master.bytes_shared+=frame.st_size
else:
self.bytes_self+=frame.st_size
project = frame.projects[0]
project.frame_count_self+=1
project.bytes_self+=frame.st_size
# add this frame size to the running project
# total for each project
for project in frame.projects:
project.bytes_total+=frame.st_size
return
def find_master(self,mapped_project):
"""
Find the master for the given MappedProject object.
"""
if mapped_project.master:
return mapped_project
for master in [p for p in self.projects if p.master]:
if master.key == mapped_project.key:
return master
class MappedProject:
"""
A project inside a FrameMap.
Holds mapped frames associated with the project.
"""
def __init__(self,project,Framestore,FrameMap):
self.full_project_name = project
self.Framestore = Framestore
self.FrameMap = FrameMap
self.frames = {}
self.master = False
self.parse_project(project)
self.frame_count_self = 0
self.frame_count_shared = 0
self.bytes_self = 0
self.bytes_shared = 0
self.bytes_total = 0
def parse_project(self,project):
"""
Split up the project into it's
3 parts: job_number, project_name, extension
The 'job_key' is the job_number and project_name
(no extension). This is also the project 'group'
that subprojects (user projects) fall into.
"""
# the regx we're going to use to identify
# project names. I could pull the name out
# of the dl_projects table but this grouping
# is somewhat arbitrary and if someone happens
# to create a project name manually I want to
# catch it. (I also avoid import issues with
# the dlProject class since it is already
# importing the framestore class)
regx = re.compile('.*([0-9]{2}[A-Z][0-9]{3})[-_]*(.*)(_[A-Z,a-z]*)$')
try:
job_num,name,ext = regx.search(project).groups()
project_key = "%s-%s" % (job_num,name)
except:
job_num,name,ext = ('misc',project,None)
project_key = "misc"
# set the 'master' flag if this project is a master
if project[-7:] == '_MASTER':
self.master = True
self.job_number = job_num
self.project_name = name
self.extension = ext
self.key = project_key
def du(self,verbose=False):
"""
Collect the unique frames from a project
then calculate the size
NOTE: ** experimental **
Not sure how accurate the size estimate will be since it's
difficult to tell if the API is giving us all the frames
"""
dl_project = self.full_project_name
if verbose: print "[41mP[m %s" % dl_project
libraries = self.Framestore.get_libraries(dl_project)
for i,lib in libraries.iteritems():
if verbose: print " [42mL[m Getting clips for: %s" % lib['name']
for clip in self.Framestore.get_clips(lib['node']):
frame_count = self.Framestore._get_frame_count(clip['node'])
# NOTE: pulling the metadata for still frames seems
# to crash the wiretapd so we'll skip them for now.
if frame_count < 2:
continue
if verbose:
print " [44mC[m Getting %s frames for: %s" % (frame_count.__int__(),clip['name'])
try:
_frames = self.Framestore.get_frames(clip['node'],stream='DMXEDL')
#_frames = self.Framestore.get_frames(clip['node'],stream='XML')
except Exception,error:
# clips in Lost_+_Found are apparently
# not wiretap friendly
if lib['name'] != 'Lost_+_Found':
print "get_frames failed for %s" % clip['name']
for k,v in clip.iteritems():
print "\t%s: %s" % (k,v)
print "Error:",str(error)
except KeyboardInterrupt:
raise
else:
for frame in _frames:
self.add_frame(frame)
def add_frame(self,frame):
"""
Add a frame to this mapped project
and also to the master frame list
"""
# if this frame is not already in the master
# frame map, add it.
if not self.FrameMap.frames.has_key(frame):
self.FrameMap.frames[frame] = MappedFrame(frame,self)
else:
# if the frame is already in the master map
# then add this project to the MappedFrame
self.FrameMap.frames[frame].add_project(self)
# store the MappedFrame in this MappedProject as well
if not self.frames.has_key(frame):
frame_obj = self.FrameMap.frames[frame]
self.frames[frame] = frame_obj
if len(frame_obj.projects) == 1:
self.frame_count_self+=1
elif len(frame_obj.projects) > 1:
self.frame_count_shared+=1
class MappedFrame:
"""
A frame inside of a FrameMap.
Stores information about an idividual
frame in the media cache like size
and # of hardlinks etc...
"""
def __init__(self,path,MappedProject):
self.path = path
self.st_size = 0
self.nlink = 0
self.projects = [MappedProject]
def __getattr__(self,name):
if name == 'refcount':
return len(self.projects)
message = "'%s' has no attribute %s" % (__name__,name)
raise AttributeError,message
def add_project(self,MappedProject):
"""
Adds a project to the list
of projects that reference
this frame.
"""
if MappedProject.full_project_name not in [n.full_project_name for n in self.projects]:
self.projects.append(MappedProject)
def framestat(self,verbose=False):
"""
Get frame stats
"""
if verbose: print "\t\t\t[43mF[m %s\r" % self.path,
try:
stat = fileutil.stat(self.path)
self.st_size = stat.st_size
self.st_nlink = stat.st_nlink
except:
if verbose: print "\t\t\t[41mERROR[m: could not get size for %s" % self.path
if verbose:print
if __name__ == '__main__':
#f = Framestore.find(host='flame03',volume='stonefs4')[0]
#fs = Framestore.find(status='active')
fs = Framestore('flame01',4)
# print fs.get_projects()
fs.du(verbose=True)
# fs.du(source='db',verbose=True)
# print fs.pstats
# a = fs.scan_libraries()
# s4 = fs.get_projects()
# print "\nSTONEFS5"
# for i,p in s4.iteritems():
# print "\t",p
# f = Framestore.find(uid=10)[0]
# fs = Framestore.find(status='active')
# for f in fs:
# f.sync_libraries()
# f.show_locks()
pass
|
[
"tommy.hooper@a52.com"
] |
tommy.hooper@a52.com
|
54d19de6307807e13be2af90d2467614f5e2ac64
|
be8f53156948411266926ea17fb6d9f22722e877
|
/Orders/logic/order_logic.py
|
aae666f78186d805f06717e73d90685fa119766c
|
[] |
no_license
|
vrusi/booking-backend
|
ba38932df2d90c257a2ce438123e4c1c17156c7d
|
d4ef6871fbc48fdbd86370ca404f1769f17dec1a
|
refs/heads/master
| 2023-04-12T07:08:21.756997
| 2021-04-29T23:56:59
| 2021-04-29T23:56:59
| 353,716,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
from Orders.models.models import *
from Orders.parsers.order_parser import OrderParser
class OrderLogic:
parser = OrderParser()
def get_list_orders(self, user):
try:
orders = Order.objects.filter(booked_by=user)
except Exception as e:
raise e
api_orders = [self.parser.parse_order(order) for order in orders]
return api_orders
def get_order(self, user, order_id):
try:
order = Order.objects.filter(booked_by=user, id=order_id)[0]
except Exception as e:
raise e
api_order = self.parser.parse_order(order)
return api_order
def create_order(self, user, acc_id):
try:
Order.objects.create(
acc_booked_id=acc_id,
booked_by=user
)
except Exception as e:
raise e
def update_order(self, user, order_id, data):
try:
order = Order.objects.filter(booked_by=user, id=order_id)[0]
except Exception as e:
raise e
order.acc_booked_id = data['acc_id']
order.save()
api_order = self.parser.parse_order(order)
return api_order
def delete_order(self, user, order_id):
try:
order = Order.objects.filter(booked_by=user, id=order_id)[0]
except Exception as e:
raise e
order.delete()
|
[
"me@vrusinkova.com"
] |
me@vrusinkova.com
|
44a276ef4ae63143025b692f8fde27074f5976e9
|
dc723ba04d723b077a723bebb02813b8009f6e9e
|
/system/services/frontend/infrastructure/healthcheck-scheduler/job.py
|
490be2831c4e6c2e3dfb7ff1382c5c0d4ba14f38
|
[] |
no_license
|
ngelsevier/preprint
|
e33247cb589d3de505f219d0242a3738d5455648
|
0a6a57bc962c29e277f105070977867280381d85
|
refs/heads/master
| 2020-03-20T23:42:53.825949
| 2018-06-19T08:06:36
| 2018-06-19T08:06:36
| 137,859,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import requests
SEARCH_STRING = 'Effects of the Real Plan on the Brazilian Banking System'
EXPECTED_RESULT_STRING = '<em>Effects</em> <em>of</em> <em>the</em> <em>Real</em> <em>Plan</em> <em>on</em> ' + \
'<em>the</em> <em>Brazilian</em> <em>Banking</em> <em>System</em>'
def lambda_handler(event, context):
response = requests.get('https://www.ssrn.com/n/fastsearch',
params={'query': SEARCH_STRING},
auth=('ssrn-els', 'gEP8FuBY'))
if response.status_code != 200:
raise Exception('Expected response code was 200, but got "{actual_status_code}"'
.format(actual_status_code=(response.status_code)))
extractedResult = response.text[response.text.index('class="paper"'):]
if EXPECTED_RESULT_STRING not in extractedResult:
raise Exception('Unable to find known paper with "{search_string}"'
.format(search_string=(SEARCH_STRING)))
if __name__ == '__main__':
lambda_handler(None, None)
|
[
"r.ng@elsevier.com"
] |
r.ng@elsevier.com
|
371c7cef5ddbf0037f98ef6c94c7bbf264689628
|
97f79eb45dbc420f6d37f7c13f2c32011aa2a0fe
|
/flickr/models.py
|
be01fd2ab9ee0a6e42e961cfe5a0c6e7e453e5e1
|
[] |
no_license
|
pfig/feedify
|
312264a8de58ef0411aee215c34e7486c3819977
|
dc87f0e0fec1cb647750f7f14917c965f7d2635e
|
refs/heads/master
| 2021-01-16T20:59:49.224137
| 2012-11-27T22:50:29
| 2012-11-27T22:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,843
|
py
|
from django.db import models, IntegrityError
from django.conf import settings
from django.core.cache import cache
import urlparse
import urllib
import datetime
import oauth2
import uuid
import json
import time
EXTRAS = "date_upload,date_taken,owner_name,icon_server,original_format,description,geo,tags,machine_tags,o_dims,media,path_alias,url_t,url_s,url_m,url_z,url_l,url_o"
class FlickrException(Exception):
def __init__(self, code, message):
self.code = code
super(FlickrException, self).__init__(message)
def __unicode__(self):
return u"%s: %s"%(self.code, self.message)
class RequestToken(models.Model):
key = models.CharField(max_length=100, null=False, blank=False, unique=True)
secret = models.CharField(max_length=100, null=False, blank=False)
created = models.DateTimeField(default=datetime.datetime.utcnow)
def __str__(self):
data = {"oauth_token": self.key, "oauth_token_secret": self.secret}
return urllib.urlencode(data)
@classmethod
def from_string(cls, string):
data = dict(urlparse.parse_qsl(string))
try:
return cls.objects.create(key = data["oauth_token"], secret = data["oauth_token_secret"])
except IntegrityError:
return cls.objects.get(key = data["oauth_token"])
def token(self):
return oauth2.Token(self.key, self.secret)
class AccessToken(models.Model):
key = models.CharField(max_length=100, null=False, blank=False, unique=True)
secret = models.CharField(max_length=100, null=False, blank=False)
created = models.DateTimeField(default=datetime.datetime.utcnow, null=False, blank=False)
fetched = models.DateTimeField(null=True)
updated = models.DateTimeField(blank=False, null=False)
username = models.CharField(max_length=100, null=False, blank=False)
nsid = models.CharField(max_length=20, null=False, blank=False, unique=True)
fullname = models.CharField(max_length=100, null=False, blank=False)
feed_secret = models.CharField(max_length=13, null=False, blank=False, unique=True)
def __str__(self):
data = {"oauth_token": self.key, "oauth_token_secret": self.secret}
return urllib.urlencode(data)
@classmethod
def from_string(cls, string):
data = dict(urlparse.parse_qsl(string))
properties = dict(
key = data["oauth_token"],
secret = data["oauth_token_secret"],
username=data["username"],
nsid=data["user_nsid"],
fullname = data.get("fullname", data["username"]),
updated = datetime.datetime.utcnow(),
)
try:
return cls.objects.create(**properties)
except IntegrityError:
try:
token = cls.objects.get(key=properties["key"])
if token.nsid != properties["nsid"]:
raise Exception("token re-used for another user. BAD THING.")
except cls.DoesNotExist:
token = cls.objects.get(nsid=properties["nsid"])
for k, v in properties.items():
setattr(token, k, v)
token.save()
return token
def token(self):
return oauth2.Token(self.key, self.secret)
def save(self, *args, **kwargs):
if not self.feed_secret:
self.feed_secret = str(uuid.uuid4())[:13]
return super(AccessToken, self).save(*args, **kwargs)
def call(self, method, name, **kwargs):
consumer = oauth2.Consumer(key=settings.FLICKR_API_KEY, secret=settings.FLICKR_API_SECRET)
client = oauth2.Client(consumer, self.token())
args = dict(
method = name,
format = "json",
nojsoncallback = "1",
)
args.update(kwargs)
params = urllib.urlencode(args)
start = time.time()
if method == "get":
resp, content = client.request("%s?%s"%(settings.FLICKR_API_URL, params), "GET")
else:
resp, content = client.request(settings.FLICKR_API_URL, "POST", body=params)
self.last_time = time.time() - start
if resp['status'] != '200':
raise FlickrException(0, "flickr API error : %s %s"%(resp["status"], content))
if args["format"] == "json":
data = json.loads(content)
if data["stat"] != "ok":
raise FlickrException(data["code"], data["message"])
return data
return content
def recent_photos(self, no_instagram=False, just_friends=False, include_self=False):
self.last_time = None
cache_key = 'flickr_items_%s_%s_%s_%s'%(self.id, no_instagram, just_friends, include_self)
photos = cache.get(cache_key)
if not photos:
response = self.call("get", "flickr.photos.getContactsPhotos",
count = 50,
extras = EXTRAS,
just_friends = (just_friends and "1" or "0"),
include_self = (include_self and "1" or "0"),
)
photos = response["photos"]["photo"]
def filter_instagram(p):
mt = p["machine_tags"].split()
return not "uploaded:by=instagram" in mt
if no_instagram:
photos = filter(filter_instagram, photos)
cache.set(cache_key, photos, 120)
for p in photos:
p["description"] = p["description"]["_content"]
p["link"] = "http://flickr.com/photos/%s/%s"%(p["pathalias"] or p["owner"], p['id'])
p["upload_date"] = datetime.datetime.utcfromtimestamp(float(p["dateupload"]))
p["tags"] = p["tags"].split()
return photos
def touch(self):
self.fetched = datetime.datetime.utcnow()
self.save()
|
[
"tom@movieos.org"
] |
tom@movieos.org
|
ddb8374329c0329407245961714ca9183b79aea4
|
0cdd45b2c9ad9153ddae010978e2ff1fc1b9c71e
|
/danci/settings.py
|
c61f326ef6434a6b92d0963e5034d77bea76fbd7
|
[] |
no_license
|
kiss3256/danci
|
3777f4f22b36b310331a27f3bfae63288431f2ec
|
660a368e9a14a1c997035b736806d7f9662d1dd3
|
refs/heads/master
| 2020-05-03T14:36:07.302490
| 2019-05-08T01:41:45
| 2019-05-08T01:41:45
| 178,682,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for danci project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'danci'
SPIDER_MODULES = ['danci.spiders']
NEWSPIDER_MODULE = 'danci.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'danci (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'danci.middlewares.DanciSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'danci.middlewares.DanciDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'danci.pipelines.DanciPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"leytonxu@gmail.com"
] |
leytonxu@gmail.com
|
7630c8e792dffc9e7609a9b24099e964e7a48df0
|
c29a713e84be273b8e0b91634c0045ed40de9105
|
/contrib/config-example.py
|
7a51046ddca630a43091b64fe513b0ff31a956e5
|
[] |
no_license
|
jane-hnatiuk/tranliterator
|
2e6b79631cb4850660d945662e83e70e15427f4f
|
6f2797bbe4efe38044c08e8840ccfb07b92c4619
|
refs/heads/master
| 2021-01-19T12:03:33.670853
| 2017-01-24T20:05:10
| 2017-01-24T20:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
class Config(object):
DEBUG = False
CSRF_ENABLED = True
class ProductionConfig(Config):
DEBUG = False
TRANSLATOR_ID = 'id_prod'
TRANSLATOR_SECRET = 'secret_prod'
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
TRANSLATOR_ID = 'id_dev'
TRANSLATOR_SECRET = 'secret_dev'
|
[
"k.scherban@gmail.com"
] |
k.scherban@gmail.com
|
ee31fca9e13eace1152029a6fef01b99c5d8aadf
|
1b3eb66d1c8e9947849bf14870e736d14488d8be
|
/TM_des_cbc_0_vis.py
|
c810df25e58308f9382e0996485f76245186f5b6
|
[] |
no_license
|
AntonAlbertovich/cipher-visualization
|
87721d6c47c9d9a17111cc52c06337e2b4d1aed3
|
2aa8022daaad136e423514e93a9824c5b28a8074
|
refs/heads/master
| 2020-08-03T14:20:19.827975
| 2019-10-08T20:30:03
| 2019-10-08T20:30:03
| 211,783,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
objects = []
performance = []
with open("TM_des_cbc_0.txt", "rb") as f:
available = False
temp = ""
for line in f:
print(line)
try:
this_per = int(line)
temp = this_per
available = True
except ValueError:
if(available == True):
performance.append(temp)
objects.append(line[:2])
available = False
else:
continue
f.close()
y_pos = np.arange(len(objects))
plt.bar(y_pos, performance, align='center', alpha=1.0)
plt.xticks(y_pos, objects, rotation = 90)
plt.ylabel('Instances in Document')
plt.title('Individual Characters')
plt.show()
print(len(objects))
print(len(performance))
print(objects)
print(performance)
|
[
"noreply@github.com"
] |
AntonAlbertovich.noreply@github.com
|
2d25dc2c818efe033ad59ee1eb11f2c8ccfde452
|
1ea966542e28e24b2f3f7d5e0352cbdc110a979a
|
/Algorithm/Programmers/Programmers_2개이하로다른비트.py
|
9a1ef5bccf76215078cf0ce46a5b4b707c54eb9b
|
[] |
no_license
|
yunhacho/SelfStudy
|
9ff7002362f6e9d8fe7d1ca3ccf94ee96726f635
|
99912af3df014a6864893c9274dbf83ff9ed05a8
|
refs/heads/main
| 2023-08-25T06:56:21.116419
| 2021-10-28T07:35:09
| 2021-10-28T07:35:09
| 360,470,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
def solution(numbers):
binarys=[]
for n in numbers:
b='0'+ bin(n)[2:]; i=len(b)-b[::-1].find('0')-1
binarys.append('{}{}{}'.format(b[:i],'1' if len(b)-1==i else '10',b[i+2:]))
return [int(b, 2) for b in binarys]
|
[
"jyh_5530@naver.com"
] |
jyh_5530@naver.com
|
a5bdaa8e9a727c27b7d64b956d252fd66588dfe7
|
b516a1d0791bba8010ad0a8616c4be259589ce5c
|
/anidb/model.py
|
5bc42d2d56d81688209b35883f85b3d6baba68c2
|
[] |
no_license
|
p0psicles/pyanihttp
|
769cd585a9351deea6ca5c7ad8af5aa32fcaee37
|
6a67899e4491afd6b020edb7781e37b2781ad1df
|
refs/heads/master
| 2021-01-17T19:59:33.580955
| 2012-02-26T20:09:55
| 2012-02-26T20:09:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,563
|
py
|
__all__ = ["Anime", "Category", "Title", "Episode", "Tag"]
class Entity(object):
def __init__(self, id):
self._id = int(id)
@property
def id(self):
"""The id of this entity"""
return self._id
@id.setter
def id(self, value):
self._id = value
class Titled(object):
"""
Base class for all classes with a `titles` attribute
"""
def __init__(self):
self.titles = {}
def add_title(self, title):
"""
Add a new title to this entity
:param title: The title to add
"""
assert title.lang is not None
assert title.title is not None
if title.lang in self.titles:
self.titles[title.lang].append(title)
else:
self.titles[title.lang] = [title]
class Typed(object):
"""
Base class for all classes with a `type` attribute
"""
def __init__(self):
self._type = None
@property
def type(self):
"""The type property"""
return self._type
@type.setter
def type(self, value):
self._type = value
class Named(object):
"""
Base class for all classes with a `name` attribute
"""
def __init__(self):
self.name = None
@property
def name(self):
"""The name property"""
return self._name
@name.setter
def name(self, value):
self._name = value
class Described(object):
"""
Base class for all classes with a `description` attribute
"""
def __init__(self):
self._description = None
@property
def description(self):
"""The description property"""
return self._description
@description.setter
def description(self, value):
self._description = value
class Anime(Entity, Titled, Typed, Described):
"""
An anime. Identified by an `aid`
"""
def __init__(self, aid):
Entity.__init__(self, aid)
Titled.__init__(self)
Typed.__init__(self)
Described.__init__(self)
self._type = None
self._episodecount = None
self._episodes = {}
self._startdate = None
self._enddate = None
self._categories = []
self._tags = []
self._ratings = {
"permanent":
{ "count": None, "rating": None},
"temporary":
{ "count": None, "rating": None},
"review":
{ "count": None, "rating": None}
}
def add_category(self, category):
"""
Add a new category to this anime
:param category: The category to add
"""
if not isinstance(category, Category):
raise TypeError("Category expected")
else:
self._categories.append(category)
def add_episode(self, episode):
"""
Adds an episode to this anime
:param episode: :class:`anidb.model.Episode`
"""
if isinstance(episode, Episode):
self._episodes[episode.epno] = Episode
else:
raise TypeError("Episode expected")
def set_rating(self, which, count, rating):
"""
Set the rating of this anime
:param which: Which rating. Either `temporary`, `permanent` or
`reviews`
:param count: The number of votes
:param rating: The rating
"""
if which in self._ratings.keys():
self._ratings[which]["count"] = float(count)
self._ratings[which]["rating"] = rating
else:
raise ValueError("Unknown kind of rating")
def add_tag(self, tag):
"""
Adds a tag to this anime
:param tag: A :class:`anidb.model.Tag`
"""
self._tags.append(tag)
@property
def episodecount(self):
"""The episodecount property"""
return self._episodecount
@episodecount.setter
def episodecount(self, value):
self._episodecount = int(value)
@property
def startdate(self):
"""The startdate property"""
return self._startdate
@startdate.setter
def startdate(self, value):
self._startdate = value
@property
def enddate(self):
"""The enddate property"""
return self._enddate
@enddate.setter
def enddate(self, value):
self._enddate = value
@property
def ratings(self):
"""The ratings property"""
return self._ratings
@property
def episodes(self):
"""The episodes property"""
return self._episodes
@property
def categories(self):
"""The categories property"""
return self._categories
@property
def tags(self):
"""The tags property"""
return self._tags
class Episode(Entity, Titled):
"""
An episode. Identified by an `id`
"""
def __init__(self, id):
Entity.__init__(self, id)
Titled.__init__(self)
self._length = None
self._airdate = None
self._epno = None
self._rating = None
def set_rating(self, votes, rating):
self._rating = (int(votes), float(rating))
@property
def epno(self):
"""The epno property"""
return self._epno
@epno.setter
def epno(self, value):
self._epno = value
@property
def airdate(self):
"""The airdate property"""
return self._airdate
@airdate.setter
def airdate(self, value):
self._airdate = value
@property
def length(self):
"""The length property"""
return self._length
@length.setter
def length(self, value):
self._length = value
class Category(Entity, Named, Described):
"""
An AniDB category
"""
def __init__(self, id):
Entity.__init__(self, id)
Named.__init__(self)
Described.__init__(self)
self._hentai = False
self._weight = 0
self._name = None
self._description = None
self._parentid = None
@property
def hentai(self):
"""
Whether or not this category contains hentai material
:rtype: Boolean
"""
return self._hentai
@hentai.setter
def hentai(self, value):
self._hentai = value
@property
def weight(self):
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
@property
def parentid(self):
"""The parentid property"""
return self._parentid
@parentid.setter
def parentid(self, value):
self._parentid = value
class Title(Typed):
def __init__(self, lang, type=None, title=None, exact=False):
Typed.__init__(self)
assert lang is not None
self._lang = lang
self._type = type
self._title = title
self._exact = exact
def __str__(self):
return self._title
@property
def lang(self):
"""
The language of the title. A complete list is available at `the
AniDB wiki <http://wiki.anidb.net/w/User:Eloyard/anititles_dump>`_
"""
return self._lang
@lang.setter
def lang(self, value):
self._lang = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def exact(self):
"""
Whether or not this is an exact match, in case the correspondig
:class:`anidb.model.Anime` object was retrieved via a search.
:rtype: Boolean
"""
return self._exact
@exact.setter
def exact(self, value):
self._exact = value
class Tag(Entity, Named, Described):
def __init__(self, id):
Entity.__init__(self, id)
Named.__init__(self)
Described.__init__(self)
self._spoiler = False
self._approval = None
self._count = None
@property
def count(self):
"""The count property"""
return self._count
@count.setter
def count(self, value):
self._count = int(value)
@property
def spoiler(self):
"""The spoiler property"""
return self._spoiler
@spoiler.setter
def spoiler(self, value):
self._spoiler = value
@property
def approval(self):
"""The approval property"""
return self._approval
@approval.setter
def approval(self, value):
self._approval = int(value)
|
[
"themineo@gmail.com"
] |
themineo@gmail.com
|
7fa32c3ea39a48ed635213b68a5c69b5931edf48
|
5211ed7c50556c904fce461cfc412e3679934feb
|
/bin/thresholder.py
|
5e97b186aa11169b860bcbf4878f6837262e7218
|
[] |
no_license
|
WalterBrito/Social-Website
|
1d4cb261656bc0e5326a5cc0bef723766610103e
|
988a5c9335861f3e5a288e131d6c6184acba9ad8
|
refs/heads/master
| 2021-01-09T20:33:03.217470
| 2016-07-20T13:42:13
| 2016-07-20T13:42:13
| 62,829,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
#!/home/fedora/Projetos/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
|
[
"fedora@localhost.localdomain"
] |
fedora@localhost.localdomain
|
2bae3b5a04eecfdea7f941579f8042efea19fa28
|
ef1b77f02623178fd6a24fe459d452f17c47009c
|
/posts/views.py
|
b35b2157e8f9a585bdb8d9e247cdb648cf8247f3
|
[] |
no_license
|
praveen-stox/mb-app
|
24bb2404e6c9ce6b666f1419ae94699f0e942b19
|
2c24873f3be575e48e223bc6609f4e68839b91fd
|
refs/heads/master
| 2023-06-25T02:32:54.277101
| 2021-07-19T17:03:11
| 2021-07-19T17:03:11
| 387,536,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Post
class HomePageView(ListView):
model=Post
template_name='home.html'
context_object_name='all_posts_list'
# Create your views here.
|
[
"praveen.matlab@gmail.com"
] |
praveen.matlab@gmail.com
|
924a3f8ab9aff1788f8b5f5653b579ab4b7aebbb
|
e93f117a0c538341b2fc49009149a64009fad3f5
|
/uw_gws/utils.py
|
df681f867b26e8714ec0c18b407e089bb0467a62
|
[] |
no_license
|
abztrakt/uw-gws
|
8cf8bafe4875bc0e95dfd117de9b47e2d97eee66
|
cfe88c0575cb9d63ff48be10b8a69cd9fcb1d713
|
refs/heads/master
| 2021-01-01T18:41:34.903300
| 2015-06-17T18:34:57
| 2015-06-17T18:34:57
| 10,850,175
| 0
| 0
| null | 2013-09-12T21:37:43
| 2013-06-21T19:03:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,213
|
py
|
import httplib
from django.conf import settings
from django.contrib.auth.models import User,Group
import xml.dom.minidom
def get_group_info(group):
'''
Returns the information of a group via Groups Web Service in XHTML.
@return (dictionary,boolean)
'''
URL = settings.URL + 'group/%s/' % group
connection = httplib.HTTPSConnection(settings.GWS_HOST,settings.GWS_PORT,settings.KEY_FILE,settings.CERT_FILE)
connection.request(settings.METHOD,URL)
response = connection.getresponse()
body = response.read()
connection.close()
if response.status == 404:
return ({'error':"Error: The group '%s' was not found in the Groups Web Service, instead a 404 error was returned." % group}, False)
return ({'group_info':body},True)
def get_group_members(group):
'''
Returns the members of a group via Groups Web Service.
@return (dictionary,boolean)
'''
URL = settings.URL + 'group/%s/member' % group
connection = httplib.HTTPSConnection(settings.GWS_HOST,settings.GWS_PORT,settings.KEY_FILE,settings.CERT_FILE)
connection.request(settings.METHOD,URL)
response = connection.getresponse()
body = response.read()
connection.close()
if response.status == 404:
return ({'error':"Error: The group '%s' was not found in the Groups Web Service, instead a 404 error was returned." % group}, False)
# Use XHTML parsing to get the group members.
group_members_array = []
dom = xml.dom.minidom.parseString(body)
listitems = dom.getElementsByTagName('li')
for member in listitems:
m = member.firstChild
if (m is not None and m.getAttribute('class') == 'member'):
group_members_array.append(m.firstChild.data)
return ({'group_members':group_members_array},True)
def update_group_members(group):
'''
Updates the users of a group for use in the django databases.
'''
# Grab all of the members of the group from the group web service. If the group doesn't exist, stop further processing.
result, group_exists = get_group_members(group)
if group_exists:
# If the group exists in the web service, get or create the group.
django_group,django_group_created = Group.objects.get_or_create(name=group)
# Grab all of the members of the group in django.
users = sorted([user.username for user in django_group.user_set.all()])
result = sorted(result['group_members'])
# Primarily used for testing, these lists will be populated with any updates to the database regarding groups.
current_users = []
created_users = []
removed_users = []
added_users = []
if users == result:
current_users = users
else:
# Grab or create the users from the group and add them to the group.
for member in result:
user,user_created = User.objects.get_or_create(username=member)
if user_created:
# Create the user. Set default permissions.
user.is_staff = False
user.is_superuser = False
user.save()
created_users.append(user)
if user.username in users:
current_users.append(user)
elif not user_created and user.username not in users:
added_users.append(user)
# Now add them to the group.
user.groups.add(django_group)
user.save()
#Now check which people weren't in the group and remove them.
removed_users = [user for user in users if user not in result]
for member in removed_users:
user = User.objects.get(username=member)
user.groups.remove(django_group)
result = {
'current_users': current_users,
'created_users': created_users,
'removed_users': removed_users,
'added_users': added_users,
'is_updated': True,
}
return result
else:
result['is_updated'] = False
return result
|
[
"mnnguyen@uw.edu"
] |
mnnguyen@uw.edu
|
745b47f4b9653e1adb5938a611487ad9e3201e35
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/944.py
|
bd687a861c6ef672174fcc914271cffea1314b06
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281
| 2022-03-09T22:36:20
| 2022-03-09T22:36:20
| 370,508,127
| 1
| 0
|
MIT
| 2022-03-09T22:36:20
| 2021-05-24T23:16:10
| null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
class Solution:
def minDeletionSize(self, A):
return sum(any(a[j] > b[j] for a, b in zip(A, A[1:])) for j in range(len(A[0])))
|
[
"cenkay.arapsagolu@gmail.com"
] |
cenkay.arapsagolu@gmail.com
|
4da391e6845015007b01093614347747e5b52720
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R4/benchmark/startPyquil309.py
|
b9a32c54bbe98a26e0fe98284c0b30b521a3eef5
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
# qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += Y(3) # number=8
prog += Y(3) # number=9
prog += SWAP(1,0) # number=10
prog += SWAP(1,0) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil309.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
99bd86ca470ce752e248ec45820f5d1e977e210f
|
4d6db4761fe07f2bfc2801400608cb0b6a814dfe
|
/copy_file.py
|
185a9b067de0c8156660745d001a010cad243279
|
[] |
no_license
|
jacekfigiel/Organize-your-folders
|
2b7266d44c227a151d8a77705a99a43c69e2eb19
|
4942d3ef788a23feeee711805e03bddbbdd3622e
|
refs/heads/main
| 2023-07-16T22:54:53.316051
| 2021-08-30T10:16:01
| 2021-08-30T10:16:01
| 401,299,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
from pathlib import Path
import shutil
import os
# you can move specific type of file using this program
source_folder = r"C:"
target_folder = r"C:"
for file_name in Path(source_folder).glob("*.txt"):#you can chnage file type
shutil.move(os.path.join(source_folder,file_name), target_folder)
|
[
"jacekfigo1988@gmail.com"
] |
jacekfigo1988@gmail.com
|
9c2e5b9526c6eadce1fc38a03bb4c1f15495d7bc
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_create_security_group_option.py
|
0f2264525c7e8c7ce51ffcb365afd1fd1693468f
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,837
|
py
|
# coding: utf-8
import pprint
import re
import six
class NeutronCreateSecurityGroupOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, description=None, name=None):
"""NeutronCreateSecurityGroupOption - a model defined in huaweicloud sdk"""
self._description = None
self._name = None
self.discriminator = None
if description is not None:
self.description = description
if name is not None:
self.name = name
@property
def description(self):
"""Gets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:return: The description of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:param description: The description of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:return: The name of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:param name: The name of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronCreateSecurityGroupOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
b78c69393969e3f18725cef9e420f118fdf82b4b
|
66b1a09e3e8b7ece4c703bf02a33824c57c105f1
|
/Skyline.py
|
8ba36699c801c7a04044fa7136af8a191db11238
|
[] |
no_license
|
jordibosch20/telegram_bot
|
9bb0e3f53a2dcd1e6e3a14d9ab2fe53060cf26b1
|
88ab088f37ec46085e779c477a0dcfa0d27da852
|
refs/heads/master
| 2022-09-24T19:19:22.611328
| 2020-05-30T00:15:29
| 2020-05-30T00:15:29
| 265,609,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,766
|
py
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import time
class Skyline():
def __init__(self, edificis=[]):
# Per defecte si no passem el parametre edificis s'inicialitzara a []
self.edificis = edificis
# Dues funcions estatiques que ens ajudaran a ordenar segons necessitem
@staticmethod
def order_minim(x):
return x[0]
@staticmethod
def order_maxim(x):
return x[2]
def ordenar(self):
# Deixa objecte ordenat petit a gran. També calcula punt_maxim i punt_minim
self.edificis.sort(key=self.order_maxim, reverse=True)
self.punt_maxim = self.edificis[0][2]
self.edificis.sort(key=self.order_minim)
self.punt_minim = self.edificis[0][0]
def suma_numero(self, num):
# Retorna un nou Skyline traslladat num unitats
edificis1 = self.edificis.copy()
for counter, valor in enumerate(self.edificis):
xmin = valor[0] + num
h = valor[1]
xmax = valor[2] + num
edificis1[counter] = (xmin, h, xmax)
return Skyline(edificis1)
def resta_numero(self, num):
# Retorna un nou Skyline traslladat -num unitats
edificis1 = self.edificis.copy()
for counter, valor in enumerate(self.edificis):
xmin = valor[0] - num
h = valor[1]
xmax = valor[2] - num
edificis1[counter] = (xmin, h, xmax)
return Skyline(edificis1)
def multiplicar_numero(self, num):
# Retorna Skyline replicat num vegades
self.ordenar()
desplacament = self.punt_maxim - self.punt_minim
edificis1 = self.edificis.copy()
# Important fer el .copy() per evitar que cada cop que afegim a edificis1, safegeixi
# tambe a edificis ja que apuntarien al mateix "objecte" sino
for counter, valor in enumerate(self.edificis):
for j in range(1, num):
xmin = valor[0] + j*(desplacament)
h = valor[1]
xmax = valor[2] + j*(desplacament)
edificis1.append((xmin, h, xmax))
return Skyline(edificis1)
def reflectir(self):
# Retorna Skyline resultat de reflectir respecte un mirall el centre l'Skyline actual
self.ordenar()
# Ho ordenem per tenir definit desplacament
edificis1 = self.edificis.copy()
desplacament = self.punt_maxim - self.punt_minim
print(desplacament)
for counter, valor in enumerate(self.edificis):
xminfinal = valor[2] + desplacament - 2*(valor[2]-self.punt_minim)
h = valor[1]
xmaxfinal = valor[0] + desplacament - 2*(valor[0]-self.punt_minim)
edificis1[counter] = (xminfinal, h, xmaxfinal)
return Skyline(edificis1)
def unio_sky(self, sky_aux):
# Retorna Skyline unio afegint els edificis del parametre objecte al nostre objecte. No treu els solapats
ed1 = self.edificis
ed2 = sky_aux.edificis
edtotal = ed1 + ed2
return Skyline(edtotal)
def unio(self, edtotal):
# Retorna al parametre self.linia el conjunt d'edificis SENSE solapament
# Ho fem utilitzant un algoritme divide and conquer
if not edtotal:
self.linia = []
return self.linia
if len(edtotal) == 1:
self.linia = [[edtotal[0][0], edtotal[0][1]], [edtotal[0][2], 0]]
return self.linia
mig = len(edtotal) // 2
esquerra = self.unio(edtotal[:mig])
dreta = self.unio(edtotal[mig:])
self.linia = self.merge(esquerra, dreta)
return self.linia
def merge(self, esquerra, dreta):
# Ajunta dos Skylines ordenats, traient edificis solapats.
a1, a2 = 0, 0 # alcada1 i alcada2
i, j = 0, 0 # iteradors per a cada llista
res = [[0, 0]] # resultat de les parelles
while i < len(esquerra) and j < len(dreta):
x0 = esquerra[i][0]
x1 = dreta[j][0]
if x0 <= x1:
a1 = esquerra[i][1]
i += 1
if x1 <= x0:
a2 = dreta[j][1]
j += 1
if max(a1, a2) != res[-1][1]:
# comprovem que el nou maxim sigui diferent de l'alcada anterior,
# ja que sino no introduiriem cap nou "punt"
res.append([min(x0, x1), max(a1, a2)])
res.extend(dreta[j:])
res.extend(esquerra[i:])
# ja que un cop acabada la i o la j, nomes falta afegir els altres de laltre
return res[1:]
def intersec_rectangles(self, a1, a2):
# Rep dos rectangles i ens diu si es solapen. En cas que es solapin ens diu quin es mes llarg
a, b = a1[0], a1[2]
c, d = a2[0], a2[2]
p1 = not ((b <= c) or (d <= a))
if (max(b, d) == b):
# El primer es el mes llarg
p2 = 1
else:
# El segon es el mes llarg
p2 = 2
# retorna quin acaba mes lluny tambe
return p1, p2
def interseccio(self, sky1):
# Retorna un nou Skyline obtingut fent la interseccio de dos skylines
self.ordenar()
sky1.ordenar()
i = 0
j = 0
ed0 = self.edificis
ed1 = sky1.edificis
print("ed0", ed0)
print("ed1", ed1)
# Suposem els dos ordenats
res = []
while (i < len(ed0) and j < len(ed1)):
x0 = ed0[i][0]
x1 = ed1[j][0]
if x0 <= x1:
p1, p2 = self.intersec_rectangles(ed0[i], ed1[j])
if (p1):
res.append((max(ed0[i][0], ed1[j][0]), min(ed0[i][1], ed1[j][1]),
min(ed0[i][2], ed1[j][2])))
if (p2 == 1):
j += 1
else:
i += 1
else:
i += 1
else:
p1, p2 = self.intersec_rectangles(ed0[i], ed1[j])
if (p1):
res.append((max(ed0[i][0], ed1[j][0]), min(ed0[i][1], ed1[j][1]),
min(ed0[i][2], ed1[j][2])))
if (p2 == 1):
j += 1
else:
i += 1
else:
j += 1
return Skyline(res)
def area_alcada(self):
# Retorna l'area i l'alçada d'un skyline
area = 0
alcada = 0
for i in self.edificis:
area += (i[2]-i[0]) * i[1]
alcada = max(alcada, i[1])
return area, alcada
def dibuixar(self):
# Fa el plot de l'Skyline
self.ordenar()
self.unio(self.edificis)
# self.linies conte l'skyline sense solapaments
fig, ax = plt.subplots()
area = 0
alcada = 0
for counter, value in enumerate(self.linia):
if(counter < (len(self.linia)-1)):
index = counter + 1
ax.add_patch(
patches.Rectangle(
(value[0], 0),
(self.linia[index][0] - value[0]),
value[1],
edgecolor='red',
facecolor='red',
fill=True
))
area += (self.linia[index][0] - value[0])*(value[1])
alcada = max(alcada, value[1])
f = open('area_alcada.txt', 'w')
f.write("area: "+str(area)+"\n"+"alçada: "+str(alcada))
# f.write("alçada: " + str(alcada))
f.close()
ax.plot()
plt.savefig('skyline.png', bbox_inches="tight")
|
[
"jordiboschbosch@hotmail.com"
] |
jordiboschbosch@hotmail.com
|
29b676ce8dc99a99fd0ba0e02214b71f254f7431
|
88bd64de181325ea877a644f2e99cfba54e8168c
|
/old-version/influence/closed_forms.py
|
43908c4b4b87efeeb68fd297f18af11945289934
|
[
"MIT"
] |
permissive
|
joyivan/46927-Project
|
bb3132d627837a6066106466096d8ff32e620e31
|
366693a13d8ee1daad91364640792524f4664c04
|
refs/heads/master
| 2020-04-18T10:06:35.034984
| 2018-05-05T17:40:05
| 2018-05-05T17:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
import numpy as np
def I_loss_LinearRegressionCf(X_tr, Y_tr, X_te, Y_te):
n_tr, p = X_tr.shape
n_te, _ = X_te.shape
beta_hat = np.linalg.inv(
X_tr.T.dot(X_tr)).dot(X_tr.T).dot(Y_tr)
H_inv = (n_tr/2)*np.linalg.inv((X_tr.T.dot(X_tr)))
I_loss = np.zeros((n_tr, n_te))
for i in range(n_tr):
for j in range(n_te):
x_tr, y_tr = X_tr[i:i+1, :], Y_tr[i:i+1, :]
x_te, y_te = X_te[j:j+1, :], Y_te[j:j+1, :]
grad_tr = np.multiply(
-2, x_tr.T.dot(y_tr - x_tr.dot(beta_hat)))
grad_te = np.multiply(
-2, x_te.T.dot(y_te - x_te.dot(beta_hat)))
I_loss[i,j] = grad_tr.T.dot(H_inv).dot(grad_te)
return I_loss
def LOO_diff_LinearRegression(X_tr, Y_tr, X_te, Y_te):
l2_loss = lambda x, y, b: (y-x.dot(b))**2
n_tr, p = X_tr.shape
n_te, _ = X_te.shape
beta_hat = np.linalg.inv(
X_tr.T.dot(X_tr)).dot(X_tr.T).dot(Y_tr)
L_full = l2_loss(X_te, Y_te, beta_hat)
loss_diff = np.zeros((n_tr, n_te))
for i in range(n_tr):
rest_indices = [idx for idx in range(n_tr) if idx != i]
X_loo, Y_loo = X_tr[rest_indices, :], Y_tr[rest_indices, :]
beta_loo = np.linalg.inv(
X_loo.T.dot(X_loo)).dot(X_loo.T).dot(Y_loo)
L_loo = l2_loss(X_te, Y_te, beta_loo)
loss_diff[i, :] = (L_full - L_loo).T
return loss_diff
|
[
"zed.yang@outlook.com"
] |
zed.yang@outlook.com
|
48b6b7cb9368a9db6760084e0982e05ee92758d6
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/mmcv/ops/masked_conv.py
|
919702e9cbd04b9e1f5c93147bcced8a1be38c61
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321
| 2023-08-28T09:02:10
| 2023-08-28T09:02:10
| 145,670,155
| 5,319
| 1,900
|
Apache-2.0
| 2023-09-14T02:37:16
| 2018-08-22T07:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,851
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
class MaskedConv2dFunction(Function):
@staticmethod
def symbolic(g, features, mask, weight, bias, padding, stride=1):
return g.op(
'mmcv::MMCVMaskedConv2d',
features,
mask,
weight,
bias,
padding_i=padding,
stride_i=stride)
@staticmethod
def forward(ctx,
features: torch.Tensor,
mask: torch.Tensor,
weight: torch.nn.Parameter,
bias: torch.nn.Parameter,
padding: int = 0,
stride: int = 1) -> torch.Tensor:
assert mask.dim() == 3 and mask.size(0) == 1
assert features.dim() == 4 and features.size(0) == 1
assert features.size()[2:] == mask.size()[1:]
pad_h, pad_w = _pair(padding)
stride_h, stride_w = _pair(stride)
if stride_h != 1 or stride_w != 1:
raise ValueError(
'Stride could not only be 1 in masked_conv2d currently.')
out_channel, in_channel, kernel_h, kernel_w = weight.size()
if features.device.type == 'npu':
import torch_npu
output = torch_npu.npu_conv2d(
features,
weight,
bias,
stride=(stride_h, stride_w),
padding=(pad_h, pad_w),
dilation=(1, 1),
groups=1)
if mask.size()[1:] != output.size()[2:]:
raise ValueError(
'The mask is inconsistent with the shape of output_conv.')
mask = mask > 0
mask = mask.type(output.dtype)
output = output * mask
return output
batch_size = features.size(0)
out_h = int(
math.floor(
torch.true_divide((features.size(2) + 2 * pad_h -
(kernel_h - 1) - 1), stride_h) + 1))
out_w = int(
math.floor(
torch.true_divide((features.size(3) + 2 * pad_w -
(kernel_w - 1) - 1), stride_w) + 1))
mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
output = features.new_zeros(batch_size, out_channel, out_h, out_w)
if mask_inds.numel() > 0:
mask_h_idx = mask_inds[:, 0].contiguous()
mask_w_idx = mask_inds[:, 1].contiguous()
data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
mask_inds.size(0))
ext_module.masked_im2col_forward(
features,
mask_h_idx,
mask_w_idx,
data_col,
kernel_h=kernel_h,
kernel_w=kernel_w,
pad_h=pad_h,
pad_w=pad_w)
masked_output = torch.addmm(1, bias[:, None], 1,
weight.view(out_channel, -1), data_col)
ext_module.masked_col2im_forward(
masked_output,
mask_h_idx,
mask_w_idx,
output,
height=out_h,
width=out_w,
channels=out_channel)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output: torch.Tensor) -> tuple:
return (None, ) * 5
masked_conv2d = MaskedConv2dFunction.apply
class MaskedConv2d(nn.Conv2d):
"""A MaskedConv2d which inherits the official Conv2d.
The masked forward doesn't implement the backward function and only
supports the stride parameter to be 1 currently.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, ...]],
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self,
input: torch.Tensor,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
if mask is None: # fallback to the normal Conv2d
return super().forward(input)
else:
return masked_conv2d(input, mask, self.weight, self.bias,
self.padding)
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
190a872e028d3bf0081c8c7d8339ab39d3feadfb
|
a7b345e3115b97f4a221eb7b68eef8499dcddde7
|
/DNbookproject/settings.py
|
847e5400ad5f5f73a700ece9198eabc783d16a9a
|
[] |
no_license
|
2hwa-jang/SystemDesign2019
|
776b961899a4909d42c5b44194f9548f77d4ea76
|
8138e911a2d6fd7bcc5451e0a8d285435c521639
|
refs/heads/master
| 2023-04-06T18:38:11.168936
| 2019-09-21T01:59:47
| 2019-09-21T01:59:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,228
|
py
|
"""
Django settings for DNbookproject project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l)_=05l01nmfun^$ymr=+fq!4zda-qim2@*6tnmss_26kh!04*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
'bookmap.apps.BookmapConfig',
'others.apps.OthersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DNbookproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['DNbookproject/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DNbookproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"twoflowersj@likelion.org"
] |
twoflowersj@likelion.org
|
72405dd20f4b5a3f7da69513ae47ad06ab5a9a70
|
91d8fdf776930ef6bbc895281d10d466426f8dc8
|
/lab_3/main/urls.py
|
fc723cb75091e03c50cd4a1d5877f7a14a668e57
|
[] |
no_license
|
Vetal-V/IK-31-Vrublevskyi
|
07f26d2c9c2522dd43523b810c97d128bb907837
|
e2185e90f4dc64ca1113ececa2692ce3c0511dcc
|
refs/heads/master
| 2022-12-12T03:13:15.213288
| 2019-11-25T22:27:05
| 2019-11-25T22:27:05
| 206,574,884
| 1
| 5
| null | 2022-12-08T06:47:32
| 2019-09-05T13:48:01
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.main, name='main'),
path('health/', views.health, name='health')
]
|
[
"54945817+Vetal-V@users.noreply.github.com"
] |
54945817+Vetal-V@users.noreply.github.com
|
2d9c12e09ff90491664a3c8d3d90a9344e80deb5
|
5deeb3618189cca6f66e91d1ddcb3ce63dede8ff
|
/sample/migrate_rt.py
|
ed0011eddd2cc8ae4f2a36454b4aa83d5b7c6be4
|
[
"MIT"
] |
permissive
|
AzureAD/microsoft-authentication-library-for-python
|
f157efc1ec6c6d91a132f3dc8dc4742d7a309b78
|
bba6b146d6fca64d43eaf313da654c0570ccd497
|
refs/heads/dev
| 2023-09-03T14:34:20.487126
| 2023-08-23T08:10:18
| 2023-08-23T08:10:18
| 67,243,113
| 717
| 188
|
NOASSERTION
| 2023-09-12T16:46:21
| 2016-09-02T17:45:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
"""
The configuration file would look like this:
{
"authority": "https://login.microsoftonline.com/organizations",
"client_id": "your_client_id",
"scope": ["User.ReadBasic.All"],
// You can find the other permission names from this document
// https://docs.microsoft.com/en-us/graph/permissions-reference
}
You can then run this sample with a JSON configuration file:
python sample.py parameters.json
"""
import sys # For simplicity, we'll read config file from 1st CLI param sys.argv[1]
import json
import logging
import msal
# Optional logging
# logging.basicConfig(level=logging.DEBUG) # Enable DEBUG log for entire script
# logging.getLogger("msal").setLevel(logging.INFO) # Optionally disable MSAL DEBUG logs
def get_preexisting_rt_and_their_scopes_from_elsewhere():
# Maybe you have an ADAL-powered app like this
# https://github.com/AzureAD/azure-activedirectory-library-for-python/blob/1.2.3/sample/device_code_sample.py#L72
# which uses a resource rather than a scope,
# you need to convert your v1 resource into v2 scopes
# See https://docs.microsoft.com/azure/active-directory/develop/azure-ad-endpoint-comparison#scopes-not-resources
# You may be able to append "/.default" to your v1 resource to form a scope
# See https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent#the-default-scope
# Or maybe you have an app already talking to Microsoft identity platform v2,
# powered by some 3rd-party auth library, and persist its tokens somehow.
# Either way, you need to extract RTs from there, and return them like this.
return [
("old_rt_1", ["scope1", "scope2"]),
("old_rt_2", ["scope3", "scope4"]),
]
# We will migrate all the old RTs into a new app powered by MSAL
config = json.load(open(sys.argv[1]))
app = msal.PublicClientApplication(
config["client_id"], authority=config["authority"],
# token_cache=... # Default cache is in memory only.
# You can learn how to use SerializableTokenCache from
# https://msal-python.readthedocs.io/en/latest/#msal.SerializableTokenCache
)
# We choose a migration strategy of migrating all RTs in one loop
for old_rt, scopes in get_preexisting_rt_and_their_scopes_from_elsewhere():
result = app.acquire_token_by_refresh_token(old_rt, scopes)
if "error" in result:
print("Discarding unsuccessful RT. Error: ", json.dumps(result, indent=2))
print("Migration completed")
# From now on, those successfully-migrated RTs are saved inside MSAL's cache,
# and becomes available in normal MSAL coding pattern, which is NOT part of migration.
# You can refer to:
# https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/1.2.0/sample/device_flow_sample.py#L42-L60
|
[
"rayluo@microsoft.com"
] |
rayluo@microsoft.com
|
a5603e0af4ea367bc20eb9e4e323f02202057014
|
2976c5e9c534ace3e25674ace113ed3d920cc05c
|
/segment/tests/test_video_segment.py
|
79e414c4fc2170204cced6bb9221cbc46aefa015
|
[] |
no_license
|
nstanger/process_podcast
|
cc1d5d3813dc002b52a2cf169bf2d378bfb83952
|
16761c7dc62b036decedd67c24529d198b9d2a85
|
refs/heads/master
| 2021-08-28T15:47:49.280134
| 2021-08-23T10:30:30
| 2021-08-23T10:30:30
| 68,779,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
import unittest
from segment import VideoSegment
from segment.tests import SegmentSharedTestCase
class VideoSegmentTestCase(SegmentSharedTestCase):
"""Test the VideoSegment class."""
EXPECTED_TYPE = "video"
EXPECTED_TRIM = "trim"
EXPECTED_SETPTS = "setpts"
def setUp(self):
"""Set up for test."""
super().setUp()
self.segment = VideoSegment(
file=self.EXPECTED_INPUT_FILE,
punch_in=self.EXPECTED_PUNCH_IN,
punch_out=self.EXPECTED_PUNCH_OUT,
input_stream=self.EXPECTED_INPUT_STREAM)
self.expected_output_options = [
"-map", "{n}:v".format(n=self.segment.input_stream)
]
def test_init(self):
"""Test segment initialises correctly."""
super().test_init()
self.assertEqual(
self.segment._temp_frame_file, "",
msg="temp frame file = ")
# Tricky to test get_last_frame_number() and generate_frame()
# because they use pexpect.
# Remove SegmentSharedTestCase from the namespace so we don't run
# the shared tests twice. See <https://stackoverflow.com/a/22836015>.
del(SegmentSharedTestCase)
|
[
"nigel.stanger@otago.ac.nz"
] |
nigel.stanger@otago.ac.nz
|
9d8f4764c428aeb806bc2a0181ea73ec56947e39
|
25c1df7ee36377eb98e197da79005fc80c6d37a7
|
/imageprocessing/noise.py
|
b22603ff3777d3af477161fba0c211779daf8cfb
|
[] |
no_license
|
hbzhang/computervisionclass
|
08f628877da84a2e9b198663a940b4da9f1cb900
|
78222ba8fe43641bf663daf334bfbe20b3863929
|
refs/heads/master
| 2020-09-05T12:10:06.936668
| 2019-11-19T12:15:11
| 2019-11-19T12:15:11
| 218,754,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
from scipy import misc
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
face = misc.face(gray=True).astype(float)
face = face[230:290, 220:320]
face = face + 0.5*face.std()+ np.random.random(face.shape)
smooth = ndimage.gaussian_filter(face, 2)
filtered = ndimage.median_filter(face,2)
plt.figure()
plt.subplot(131)
plt.imshow(face,cmap=plt.cm.gray)
plt.subplot(132)
plt.imshow(smooth, cmap=plt.cm.gray)
plt.subplot(133)
plt.imshow(filtered,cmap=plt.cm.gray)
plt.show()
|
[
"hbzhang@vt.edu"
] |
hbzhang@vt.edu
|
e2fc3bb25435503cc25f03d7a8b9f78264677b50
|
23cf8f26d17d760fc3a937ad3a56ebc72520d458
|
/find digits.py
|
3118e71fee3a2c15027bdd1bb438ffc0162b0e7e
|
[] |
no_license
|
ab6995/algorithm-codes-in-python
|
2157f122e0578798c48c6c37356355e1f74963af
|
445d8e75dda2af4ff0e64453bbea8555c444a982
|
refs/heads/master
| 2021-01-21T20:33:38.676200
| 2017-06-19T01:54:36
| 2017-06-19T01:54:36
| 92,246,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 15:08:12 2017
@author: ashishbansal
problem:
Given an integer,N, traverse its digits (1,2,...,n) and determine how many
digits evenly divide N (i.e.: count the number of times N divided by
each digit i has a remainder of 0 ). Print the number of evenly
divisible digits.
"""
import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
print(len([x for x in list(str(n)) if int(x)!=0 and n%int(x)==0]))
|
[
"noreply@github.com"
] |
ab6995.noreply@github.com
|
23857be0068cb1b58270601d7ea42d5393abbad8
|
2dfbb97b47fd467f29ffb26faf9a9f6f117abeee
|
/leetcode/84.py
|
a8c43f1a9b98fb79e9c45be4a3ddfa8e973b06fe
|
[] |
no_license
|
liuweilin17/algorithm
|
0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5
|
d3e8669f932fc2e22711e8b7590d3365d020e189
|
refs/heads/master
| 2020-12-30T11:03:40.085105
| 2020-04-10T03:46:01
| 2020-04-10T03:46:01
| 98,844,919
| 3
| 1
| null | 2018-10-05T03:01:02
| 2017-07-31T03:35:14
|
C++
|
UTF-8
|
Python
| false
| false
| 4,644
|
py
|
###########################################
# Let's Have Some Fun
# File Name: 84.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Fri Sep 27 11:00:23 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 84. Largest Rectangle in Histogram
class SegmentNode:
def __init__(self, begin, end):
self.min_v = -1
self.begin = begin
self.end = end
self.left = None
self.right = None
class Solution:
# O(n^3), time limit exceed
def largestRectangleArea1(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
for j in range(i, N):
ret = max(ret, min(heights[i:j+1])*(j-i+1))
return ret
# O(n^2), time limit exceed
def largestRectangleArea2(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
min_h = heights[i]
for j in range(i, N):
min_h = min(min_h, heights[j])
ret = max(ret, min_h*(j-i+1))
return ret
# divide and conquer
# the maximum area of rectangle is one of these:
# 1. minimum height * number of bars
# 2. maximum area of bars on the left of minimum height
# 3. maximum area of bars on the right of minimum height
# average O(nlogn)
# worst O(n^2) when heights are sorted
# time limit exceed
def largestRectangleArea3(self, heights: List[int]) -> int:
def helper(begin, end):
if begin > end: return 0
min_ind = begin
min_height = heights[min_ind]
for i in range(begin+1, end+1):
if heights[i] < min_height:
min_ind = i
min_height = heights[i]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
return helper(0, N-1)
# divide and conquer with segment tree
def largestRectangleArea4(self, heights: List[int]) -> int:
# build segment tree for find mininum value in heights
def buildSegmentTree(begin, end):
if begin > end: return None
root = SegmentNode(begin, end)
if begin == end:
root.min_v = begin
return root
else:
middle = (begin + end) // 2
root.left = buildSegmentTree(begin, middle)
root.right = buildSegmentTree(middle+1, end)
root.min_v = root.left.min_v if heights[root.left.min_v] < heights[root.right.min_v] else root.right.min_v
return root
# find the mininum value in segment tree
def query(nd, begin, end):
if nd == None or begin > nd.end or end < nd.begin:
return -1
# I don't know why, check the review updates below this solution
if begin <= nd.begin and end >= nd.end:
return nd.min_v
left_min = query(nd.left, begin, end)
right_min = query(nd.right, begin, end)
if left_min == -1: return right_min
if right_min == -1: return left_min
return left_min if heights[left_min] < heights[right_min] else right_min
def helper(begin, end):
if begin > end: return 0
elif begin == end: return heights[begin]
else: pass
min_ind = query(root, begin, end)
print(begin, end, min_ind)
min_height = heights[min_ind]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
root = buildSegmentTree(0, N-1)
return helper(0, N-1)
# stack
# st[-1] is the local maximum heights, we calcuate the area from its left to st[-1], all the heights on the left is in the stack and smaller than it
def largestRectangleArea5(self, heights: List[int]) -> int:
st = [-1] # use -1 to calculate the minimum width
N = len(heights)
max_area = 0
for i in range(N):
while st[-1] != -1 and heights[st[-1]] >= heights[i]:
max_area = max(max_area, heights[st.pop()] * (i - st[-1] - 1))
st.append(i)
while st[-1] != -1:
max_area = max(max_area, heights[st.pop()] * (len(heights) - st[-1] -1))
return max_area
|
[
"liuweilin17@qq.com"
] |
liuweilin17@qq.com
|
829d0ce6be6a5c845838862167b457e1546ae0e6
|
e1e3a1d7fbb3de1e0baf0f9cbd47fa84472bf830
|
/1431 Kids With the Greatest Number of Candies.py
|
c8ea14aec7f07917acae91c7ce0a6ba0d69026ff
|
[] |
no_license
|
ihseo/leetcode
|
dceafca7ea3fdc3fb7e4893a29d9675bfb68c6f5
|
fad74d8040546553743df3b4556b39466516389c
|
refs/heads/master
| 2023-01-06T19:52:26.515176
| 2020-11-08T18:40:36
| 2020-11-08T18:40:36
| 289,858,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from typing import List
class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
output = []
max_candies = max(candies)
for num in candies:
if num + extraCandies >= max_candies:
output.append(True)
else:
output.append(False)
return output
|
[
"inhyukseo1@gmail.com"
] |
inhyukseo1@gmail.com
|
9714bf6b0dc7ce455e2b0df2c5143bb88161a1a6
|
bbe684bcef13f561603779c79d77d303cedf3a94
|
/brewmaster/urls.py
|
594bf84e0ce188dce72f55db58ab4a8807335537
|
[] |
no_license
|
mkuch90/BrewDuino
|
464506b0c5acbf6a0b608ceb7f0077db26a35c76
|
0b69cac7b7781b7218d0eeadd45a233a0fb55362
|
refs/heads/master
| 2020-12-25T17:34:09.317599
| 2016-08-22T01:15:28
| 2016-08-22T01:15:28
| 33,043,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'brewmaster.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^brewery/', include('brewery.urls', namespace='brewery')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"mkuch90@gmail.com"
] |
mkuch90@gmail.com
|
6c00c973d704931def41279998e890951055fc40
|
3b964e140fd2d912353257897031d22771509382
|
/program 8/Wumpus World.py
|
76ca924bfb07861a75416848d45d13f92e1f37f8
|
[] |
no_license
|
adarrah/CS-101
|
72c4528ca0d75405f5274b5e79eaf8aef3d9848c
|
7b1e58328c5bb1a61f2b4412d349380551bfa56b
|
refs/heads/master
| 2021-07-04T16:13:03.627006
| 2017-09-26T21:46:37
| 2017-09-26T21:46:37
| 104,941,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,979
|
py
|
import random
## creates an OffMapError that can be thrown
class OffMapError(Exception):
"""Error raised if user attempts to go off the map"""
pass
## creates cell class for the map in wumpusworld.py
class Cell(object):
## initialize base variable
has_wumpus = False
has_gold = False
has_pit = False
## create an instance of cell
def __init__(self, row, col):
self.row = row
self.col = col
## just a string representation I made for testing during development, this is ignorable
"""def __str__(self):
if self.has_wumpus == True and self.has_pit == True and self.has_gold == True:
return "{:^5}".format("WGP.")
elif self.has_wumpus == True and self.has_gold == True:
return "{:^5}".format("WG")
elif self.has_gold == True and self.has_pit == True:
return "{:^5}".format("GP")
if self.has_wumpus == True and self.has_pit == True:
return "{:^5}".format("WP")
elif self.has_pit == True:
return "{:^5}".format("P")
elif self.has_wumpus ==True:
return "{:^5}".format("W")
elif self.has_gold ==True:
return "{:^5}".format("G")
else:
return "({}:{})".format(self.row,self.col)"""
## creates a map class to be used by wumpusworld.py
class Map(object):
## initializes a grid list
grid = []
## creates an instance of map
def __init__(self):
self.reset()
## determines if the given coordinates are in the grid list
def onGrid(self,r,c):
"""Determines if the given coordinates are in the grid list if they are return true, otherwise return false"""
if r in range(0,4) and c in range(0,4):
return True
return False
## determies if the given coordinates are not in the grid list
def offGrid(self,r,c):
"""Determines if the given coordinates are not in the grid list if they aren't returns true, otherwise returns false"""
if r not in range(0,4) or c not in range(0,4):
return True
return False
## determines if the squares surrounding the given square have the wumpus
def isSmelly(self,r,c):
"""determines if the squares surrounding the given square are on the grid and have the wumpus, if any of them do it will return true, otherwise it returns false"""
if self.onGrid(r,c+1) and self.hasWumpus(r,c+1):
return True
elif self.onGrid(r,c-1) and self.hasWumpus(r,c-1):
return True
elif self.onGrid(r+1,c) and self.hasWumpus(r+1,c):
return True
elif self.onGrid(r-1,c) and self.hasWumpus(r-1,c):
return True
return False
## determines if the squares surrounding the given square have pits
def isBreezy(self,r,c):
"""determines if the squares surrounding the given square are on the grid and have pits, if any of them do it will return true, otherwise it returns false"""
if self.onGrid(r,c+1) and self.hasPit(r,c+1):
return True
elif self.onGrid(r,c-1) and self.hasPit(r,c-1):
return True
elif self.onGrid(r+1,c) and self.hasPit(r+1,c):
return True
elif self.onGrid(r-1,c) and self.hasPit(r-1,c):
return True
return False
## determines if the given square has the gold
def isGlinty(self,r,c):
"""determines if the given square has the gold if it does returns true, otherwise returns false"""
if self.grid[r][c].has_gold == True:
return True
return False
## determines if the given square has the gold
def grabGold(self,r,c):
"""determines if the given square has the gold if it does then it sets the cells has_gold to false """
if self.hasGold(r,c):
self.grid[r][c].has_gold = False
## determines if the given square has the wumpus
def hasWumpus(self,r,c):
"""determines if the given square has the wumpus if it does then returns true, otherwise returns false"""
if self.grid[r][c].has_wumpus == True:
return True
return False
## determines if the given square has the gold in it
def hasGold(self,r,c):
"""determines if the given square has the gold in it if it does returns true, otherwise returns fase"""
if self.grid[r][c].has_gold == True:
return True
return False
## determines if the given square has pit
def hasPit(self,r,c):
"""determines if the given square has pit if it does returns true, otherwise returns false"""
if self.grid[r][c].has_pit == True:
return True
return False
## resets the map
def reset(self):
"""Resets the two dimensional list grid and repopulates it with cells and puts pits, the wumpus and the gold on the map"""
self.grid = []
## creates a two dimensional list of cells by making a temporary list and adding cells to it and appending it to grid
for row in range(0, 5):
temp_list = []
for col in range(0, 5):
temp_list.append(Cell(row, col))
self.grid.append(temp_list)
## creates the obstacles to on the map by running through the loop until seven obstacles have been place on the grid with no pits on the same square
obstacle_count = 0
while obstacle_count < 7:
## generates random row and column
rand_row = random.randint(0, 4)
rand_col = random.randint(0, 4)
## the first five obstacles will be the five pits
if obstacle_count < 5:
if self.hasPit(rand_row,rand_col) == False and (rand_row != 0 and rand_col != 0):
self.grid[rand_row][rand_col].has_pit = True
obstacle_count += 1
## the sixth object is the wumpus
elif obstacle_count == 5:
if rand_row != 0 and rand_col != 0:
self.grid[rand_row][rand_col].has_wumpus = True
obstacle_count += 1
## finally the gold is placed
else:
if rand_row != 0 and rand_col != 0:
self.grid[rand_row][rand_col].has_gold = True
obstacle_count += 1
"""def __str__(self):
grid_str = ""
for row in range(4,-1,-1):
for col in range(0,5):
grid_str += str(self.grid[row][col])
grid_str += "\n"
return grid_str"""
class WumpusWorld(object):
## initializes the map and variables
def __init__(self):
self.worldmap = Map()
self.wumpusAlive = True
self.playerAlive = True
self.playerHasGold = False
self.playerHasArrow = True
self.playerMoves = 0
self.playerRow = 0
self.playerCol = 0
## moves the user east or throws an OffMapError
def stepEast(self):
"""Moves the user east if they are on the grid if they aren't it will throw an OffMapError"""
if self.worldmap.offGrid(self.playerRow, self.playerCol + 1):
self.playerMoves += 1
raise map.OffMapError("You're out of bounds dingus!")
else:
self.playerCol += 1
self.playerMoves += 1
## moves the user west or throws an OffMapError
def stepWest(self):
"""Moves the user west if they are on the grid if they aren't it will throw an OffMapError"""
if self.worldmap.offGrid(self.playerRow, self.playerCol - 1):
self.playerMoves += 1
raise map.OffMapError("You're out of bounds dingus!")
else:
self.playerCol -= 1
self.playerMoves += 1
## moves the user north or throws an OffMapError
def stepNorth(self):
"""Moves the user north if they are on the grid if they aren't it will throw an OffMapError"""
if self.worldmap.offGrid(self.playerRow + 1, self.playerCol):
self.playerMoves += 1
raise map.OffMapError("You're out of bounds dingus!")
else:
self.playerRow += 1
self.playerMoves += 1
## moves the user south or throws an OffMapError
def stepSouth(self):
"""Moves the user south if they are on the grid if they aren't it will throw an OffMapError"""
if self.worldmap.offGrid(self.playerRow - 1, self.playerCol):
self.playerMoves += 1
raise map.OffMapError("You're out of bounds dingus!")
else:
self.playerRow -= 1
self.playerMoves += 1
## grabs the gold if it is in the square
def grabGold(self):
"""Grabs the gold if the user and gold are in the same square and returns true, otherwise returns false"""
self.playerMoves += 1
if self.worldmap.grid[self.playerRow][self.playerCol].hasGold():
self.worldmap.grid[self.playerRow][self.playerCol].grabGold()
self.playerHasGold = True
return True
return False
## fires an arrrow in the given direction
def fire(self, direction):
"""Fires an arrow in the given direction and if it hits the wumpus returns true, otherwise it returns false but still fires the arrow"""
self.playerMoves += 1
self.playerHasArrow = False
if direction == "south" or direction[0] == "s":
for row in range(self.playerRow, -1, -1):
if self.worldmap.grid[row][self.playerCol].has_wumpus == True:
self.wumpusAlive = False
return True
if direction == "north" or direction[0] == "n":
for row in range(self.playerRow, 4):
if self.worldmap.grid[row][self.playerCol].has_wumpus == True:
self.wumpusAlive = False
return True
if direction == "west" or direction[0] == "w":
for row in range(self.playerCol, -1, -1):
if self.worldmap.grid[row][self.playerCol].has_wumpus == True:
self.wumpusAlive = False
return True
if direction == "east" or direction[0] == "e":
for row in range(self.playerRow, 4):
if self.worldmap.grid[row][self.playerCol].has_wumpus == True:
self.wumpusAlive = False
return True
return False
## determines if the user can climb from the current square
def canClimb(self):
"""Determines if the user is in square 0,0 and if they are returns true, otherwise returns false"""
self.playerMoves += 1
if self.playerRow == 0 and self.playerCol == 0:
return True
return False
## determinse if the square the user is in has a breeze
def feelBreeze(self):
"""Determines if the user's square has a breeze by calling the map function isBreezy returns true if there is a breeze, otherwise returns false"""
if self.worldmap.isBreezy(self.playerRow, self.playerCol):
return True
return False
## determines if the user's square has a stench
def smellStench(self):
"""Determines if the user's square has a stench by calling the map function isSmelly returns true if there is a stench, otherwise returns false"""
if self.worldmap.isSmelly(self.playerRow, self.playerCol):
return True
return False
## determines if the user's square has a glint
def seeGlint(self):
"""Determines if the user is in the same square as the gold if it does it returns true, otherwise returns false"""
if self.worldmap.grid[self.playerRow][self.playerCol].has_gold == True:
return True
return False
## determines if the user's square has the wumpus
def hasWumpus(self, ):
"""Determines if the user's square has the wumpus if it does returns true, otherwise returns false"""
if self.worldmap.grid[self.playerRow][self.playerCol].has_wumpus == True:
return True
return False
## determines if the user's square has a pit
def hasPit(self):
"""Determines if the user's square has a pit if it does returns true, otherwise returns false"""
if self.worldmap.grid[self.playerRow][self.playerCol].has_pit == True:
return True
return False
## asks the user if they would like to continue playing the game and returns true if they do and false if they don't
def keep_playing():
while True:
"""asks the user if they would like to continue playing the game and returns true if they do and false if they don't"""
user_in = input("Would you like to play again Y/N ==> ")
if user_in.lower() == 'y':
return True
elif user_in.lower() == "n":
return False
else:
print("Enter Y or N")
cont_game = True
## while cont_game variable is true, cont_game is updated at bottom by calling keep_playing()
while cont_game:
## initialize the wumpusworld instance and create a player score to be updated throughout the program
ww = WumpusWorld()
player_score = 0
print("Welcome to Wumpus World!\nYour goal is to find the goal and avoid the stinky Wumpus")
##print(ww.worldmap)
## while the player is still alive it will run the loop again and ask for another command
while ww.playerAlive:
## prints initial conditions each turn ie. if there is a pit in the column to the right you feel a breeze
if ww.feelBreeze():
print("You feel a breeze.")
if ww.smellStench():
print("You smell an awful stench.")
if ww.seeGlint():
print("You see a glint.")
elif ww.seeGlint() == False and ww.smellStench() == False and ww.feelBreeze() == False:
print("It is dark.")
## gets the user input to determine which move to make
command = input("What would you like to do? ")
## based on the command given it runs the method in wumpus world to make that move
try:
## moves east
if command.lower() == 'east':
ww.stepEast()
## moves west
elif command.lower() == 'west':
ww.stepWest()
## moves north
elif command.lower() == 'north':
ww.stepNorth()
## moves south
elif command.lower() == 'south':
ww.stepSouth()
## climbs out of wumpus world
elif command.lower() == 'climb':
if ww.canClimb():
if ww.playerHasGold:
player_score += 1000
else:
player_score += 100
print("You climb up out of Wumpus World.")
ww.playerAlive = False
else:
print("You cannot climb here!")
## grabs the gold
elif command.lower() == 'grab':
if ww.grabGold():
print("You pick up the gold.")
else:
print("There is no gold here.")
## fires the arrow
elif 'fire' in command.lower():
player_score -= 10
## if you don't have an error the program says there is no arrow
if ww.playerHasArrow == False:
print("You try to fire but you don't have an arrow.")
## if you have an arrow you shoot and if you hit the wumpus
elif ww.fire(command.lower().split()[1]):
print("You shoot an arrow")
print("You hear a terrible scream in the darkness.")
## if you have an arrow and it shoots and doesn't hit the wumpus
else:
print("You shoot an arrow.")
elif command.lower() == 'help':
print("Here are a list of commands you can execute.\nNorth, South, East, West: will move you around wumpus world\n"\
"Grab: will grab the gold if it is in the same space as you\nFire [direction]: will fire in arrow in the cardinal direction you give it"\
"\nClimb: will climb out of Wumpus World if you in the starting space")
else:
print("That is not a valid move.")
except OffMapError:
print("You feel a bump as you run into the wall")
except IndexError:
print("You must enter a direction to fire your arrow.")
player_score -= 1
if ww.hasPit():
print("You have fallen into a pit.")
player_score = 0
ww.playerAlive = False
elif ww.hasWumpus():
if ww.wumpusAlive:
print("There is a live Wumpus here!")
print("You were eaten by the Wumpus")
player_score = 0
ww.playerAlive = False
else:
print("There is a dead Wumpus here.")
print("You scored {} points!".format(player_score))
cont_game = keep_playing()
|
[
"a.darrah2016@gmail.com"
] |
a.darrah2016@gmail.com
|
d2430ad0aef9831b0b7820b8fd81409d3462449f
|
9974b280251db24fe418ddcfdc6e6ad56e773274
|
/test2.py
|
a6deb0272431072270e03b3de84a02ecc2a45d55
|
[] |
no_license
|
CFP106022106/10720PHYS290000
|
8b96ca3f3dbe8ef9356f9d66fcb1d9fd35618abe
|
c30426695be85af8f9164b8af99aab2b56b02a69
|
refs/heads/master
| 2020-04-27T11:55:34.393792
| 2019-04-21T08:19:49
| 2019-04-21T08:19:49
| 174,314,564
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
def censor(text, word):
return(text.replace(word, '*' * len(word)))
print censor("this hack is wack hack", "hack")
print 'test'
|
[
"noreply@github.com"
] |
CFP106022106.noreply@github.com
|
64d2739c958463fe01893743172e5b7fa905a74f
|
c39bdc5f7c3c59b628b4fb92fcdd95527f562933
|
/cifar10_train.py
|
2d4f28b98c6c921fa5e7c1612395f8b0c8af23e2
|
[] |
no_license
|
weixun2018/cifar10_serving
|
d6e8eddf2843d5cd9e12c91336f30b723e2c13bf
|
1fd639b4fbfb5d35249fbc2045e666d2b1ebdfb2
|
refs/heads/master
| 2020-03-24T02:39:26.092176
| 2018-07-26T03:49:34
| 2018-07-26T03:54:29
| 142,385,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,802
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', 'result/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 1000,
"""How often to log results to the console.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
[
"weixun@caicloud.io"
] |
weixun@caicloud.io
|
268fbaffa9b674513dd7b5d44308a1d04c4f2120
|
c760bb1a1cb5d0003ae97d855dd2f295b6c25235
|
/alpha.py
|
042f998dfb4edcc16acf33cff52ffb308e7e24d9
|
[] |
no_license
|
azeem6755/Star-Cast-Recommendataion
|
1b9c2797e30aa3c5a923cecac9451cfe59ff4350
|
e6cae892a4dbbbf90982da20656d3fa6ab60f033
|
refs/heads/master
| 2023-02-18T20:26:25.880008
| 2021-01-21T18:29:02
| 2021-01-21T18:29:02
| 204,743,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,589
|
py
|
from PyQt5 import QtWidgets, uic
#import time
import pandas as pd
import numpy as np
from mlxtend.frequent_patterns import association_rules
import random
def eval_func(a):
return eval(str(a))
def mul_func(a):
return int(a)*500
def make_list(a):
return list(a)
def div_func(a):
return a/500
def getItemsUsed(w,c):
# item count
i = c.__len__()-1
# weight
currentW = len(c[0])-1
# set everything to not marked
marked = []
for i in range(i+1):
marked.append(0)
while (i >= 0 and currentW >=0):
# if this weight is different than
# the same weight for the last item
# then we used this item to get this profit
#
# if the number is the same we could not add
# this item because it was too heavy
if (i==0 and c[i,np.int(currentW)] >0 )or c[i,np.int(currentW)] != c[i-1,np.int(currentW)]:
marked[i] =1
currentW = currentW-w[i]
i = i-1
return marked
# v = list of item values or profit
# w = list of item weight or cost
# W = max weight or max cost for the knapsack
def zeroOneKnapsack(profit, weight, total):
n = profit.__len__()
selection = np.zeros((n,total+1))
for i in range(0,n):
for j in range(0,total+1):
if (weight[i] > j):
selection[i,j] = selection[i-1,j]
else:
selection[i,j] = np.maximum(selection[i-1,j],profit[i]+selection[i-1,j-np.int(weight[i])])
return [selection[n-1,np.int(total)],getItemsUsed(weight,selection)]
def recommendation():
df = pd.read_csv('files/newresultapriori.csv')
df['itemsets'] = df['itemsets'].apply(eval_func)
df['support'] = df['support'].apply(eval_func)
df2 = pd.read_csv('files/TopActors.csv')
df2 = df2.drop(columns=['Unnamed: 0'])
df2['Top 10 Actors'] = df2['Top 10 Actors'].apply(eval_func)
req = pd.read_csv('req.csv')
req['normalizedRating'] = req['normalizedRating'].apply(eval_func)
req['googleHits'] = req['googleHits'].apply(eval_func).apply(div_func)
rules = association_rules(df,metric='lift',min_threshold=10)
genre_list = ['Adventure','Action','Comedy','Crime','Drama','Family','Fantasy','Thriller','Romance','Horror','Musical']
print(genre_list)
bud1 = int(dig.budgetBox.toPlainText())
budget = bud1*100000
input_genre = dig.genreBox.currentText()
abascus = list(df2[df2['Genre'] == input_genre.capitalize()]['Top 10 Actors'])
top_actor = abascus[0][random.randint(0,9)][0]
c = []
a = list(rules[rules['antecedents'].apply(lambda x:set([top_actor]).issubset(x))]['consequents'])
for each in a:
if len(eval_func(each))>1:
c.append(each)
supporting_actors = list(frozenset.union(*c))
supporting_actors.append(top_actor)
daa = req.loc[req['actorName'].isin(supporting_actors)]
profit = list(daa['normalizedRating'].values)
cost = list(daa['googleHits'].values)
W = np.int(div_func(budget))
#W = 50
if W<0:
print('Budget too low for the desired genre')
select = zeroOneKnapsack(profit,cost,W)
print(select)
name_index = []
for i in range(0,len(select[1])):
if select[1][i] == 1:
name_index.append(i)
#print(name_index)
bud = []
for each in name_index:
bud.append(cost[int(each)])
name = []
for each in name_index:
name.append(supporting_actors[int(each)])
#print(name)
to = mul_func(sum(bud))
if to/100000>bud1:
recommendation()
else:
res.show()
res.finalBudget.setText(str(to/100000))
res.actorList.clear()
res.actorList.addItems(name)
def actorEvaluation():
budget = float(dig.budgetBox.toPlainText())
genre = dig.genreBox.currentText()
noOfActors = str(dig.noOfActors.value())
res.show()
#time.sleep(5)
res.actorList.clear()
res.actorList.addItems([str(budget),genre,noOfActors])
app = QtWidgets.QApplication([])
dig = uic.loadUi("projectUI.ui")
res = uic.loadUi("result.ui")
genreList = ['Adventure','Action','Comedy','Crime','Drama','Family','Fantasy','Thriller','Romance','Horror','Musical']
dig.genreBox.addItems(genreList)
dig.submit.clicked.connect(recommendation)
res.refreshResult.clicked.connect(recommendation)
dig.setWindowTitle("Star Cast Prediction")
res.setWindowTitle("Optimal star cast")
dig.show()
app.exec()
|
[
"noreply@github.com"
] |
azeem6755.noreply@github.com
|
161c6671e458fed554bf825c179cc4b7abb336c1
|
96aab9f77de8170ae93004d699bd0b11e820b2d4
|
/rest/app/user/urls.py
|
c0b6dccc152b56208b129bc04563ca5b6a09e9fd
|
[] |
no_license
|
JasoSalgado/rest-app
|
8dbc842d6de0ec705fd04bc94e79ee75ad80f2e2
|
3d1662800bd1e98142a0edca244c82498cc4832b
|
refs/heads/master
| 2022-11-15T15:25:44.135084
| 2020-07-16T14:58:59
| 2020-07-16T14:58:59
| 280,182,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from django.conf.urls import url
from rest.app.user.views import UserRegistrationView, UserLoginView
urlpatterns = [
url(r'^signup', UserRegistrationView.as_view()),
url(r'^signin', UserLoginView.as_view()),
]
|
[
"jaso_98@hotmail.com"
] |
jaso_98@hotmail.com
|
bb929042988bad316efb3fc70aa8a31053d755a3
|
3964a8103441a7b881c76cd8fcb7658ad2649657
|
/venv/Scripts/vba_extract.py
|
f551ad8f242375a951d1850a3cce4ac5cfb765a3
|
[] |
no_license
|
cjdeguzman95/PythonWeek
|
d780b90701ef75f386c404e821990d835b6ae53e
|
2d49e6132c27b3e2c0d57bb517f34ce2e216a08e
|
refs/heads/master
| 2023-08-16T12:58:19.149820
| 2023-08-05T22:11:16
| 2023-08-05T22:11:16
| 247,747,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
#!C:\Users\CJ De Guzman\PycharmProjects\cj1\venv\Scripts\python.exe
##############################################################################
#
# vba_extract - A simple utility to extract a vbaProject.bin binary from an
# Excel 2007+ xlsm file for insertion into an XlsxWriter file.
#
# Copyright 2013-2020, John McNamara, jmcnamara@cpan.org
#
import sys
from zipfile import ZipFile
from zipfile import BadZipfile
# The VBA project file we want to extract.
vba_filename = 'vbaProject.bin'
# Get the xlsm file name from the commandline.
if len(sys.argv) > 1:
xlsm_file = sys.argv[1]
else:
print("\nUtility to extract a vbaProject.bin binary from an Excel 2007+ "
"xlsm macro file for insertion into an XlsxWriter file."
"\n"
"See: https://xlsxwriter.readthedocs.io/working_with_macros.html\n"
"\n"
"Usage: vba_extract file.xlsm\n")
exit()
try:
# Open the Excel xlsm file as a zip file.
xlsm_zip = ZipFile(xlsm_file, 'r')
# Read the xl/vbaProject.bin file.
vba_data = xlsm_zip.read('xl/' + vba_filename)
# Write the vba data to a local file.
vba_file = open(vba_filename, "wb")
vba_file.write(vba_data)
vba_file.close()
except IOError as e:
print("File error: %s" % str(e))
exit()
except KeyError as e:
# Usually when there isn't a xl/vbaProject.bin member in the file.
print("File error: %s" % str(e))
print("File may not be an Excel xlsm macro file: '%s'" % xlsm_file)
exit()
except BadZipfile as e:
# Usually if the file is an xls file and not an xlsm file.
print("File error: %s: '%s'" % (str(e), xlsm_file))
print("File may not be an Excel xlsm macro file.")
exit()
except Exception as e:
# Catch any other exceptions.
print("File error: %s" % str(e))
exit()
print("Extracted: %s" % vba_filename)
|
[
"cjdeguzman0@gmail.com"
] |
cjdeguzman0@gmail.com
|
74f36f53030f6a252bb12ac60a94b33ac37cbf15
|
e02d75afc9e98769a4b651d4ad5730e88a585d19
|
/34.py
|
6fec48402ea5e849623843ac01bfe7feedd20b3f
|
[] |
no_license
|
bio33/euler
|
07b718fcc675a33c0377c76c9489b5fe92d4b675
|
36250ff49c0119c4250dbe4b60829fe319c651fd
|
refs/heads/master
| 2020-05-30T13:45:58.244747
| 2019-09-26T15:35:00
| 2019-09-26T15:35:00
| 189,769,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
# upper bound - > 2540161 (9!*7) reason being thats the maximum sum of factorials can go of each digit as 9999999
def memoize(f):
memory = {}
def helper(x):
if x not in memory:
memory[x] = f(x)
return memory[x]
return helper
@memoize
def fact(n):
if n <= 1:
return 1
else:
return n * fact(n-1)
d = {str(k): fact(k) for k in range(0,10)}
print(d)
s = 0
for i in range(10,2540161):
temp = 0
for c in str(i):
temp += d[c]
if temp > i:
break
if temp == i :
s+=i
print(s)
|
[
"noreply@github.com"
] |
bio33.noreply@github.com
|
658a507fcf159ac4b48d14cc5cca2cfada4e319d
|
3c2a197bf32e72444c5f36559ad0cb9b64035516
|
/codeskeleton/value_generators/random_int.py
|
d9aac5df824e8d25278a8ef6f114953e6b7a0a9f
|
[] |
no_license
|
appressoas/codeskeleton
|
c538bbfccf19735464dc42996b754cf9199a14a3
|
604011cb27c47b02d325379895bc23b543797216
|
refs/heads/master
| 2021-01-19T19:24:21.100020
| 2017-06-26T09:34:45
| 2017-06-26T09:34:45
| 88,417,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import random
from .registry import GENERATOR_REGISTRY
def random_int(from_int=0, to_int=999999999):
return random.randint(from_int, to_int)
GENERATOR_REGISTRY.register(generator_name='random_int', function=random_int)
|
[
"post@espenak.net"
] |
post@espenak.net
|
d00b8b7df65a8b3580c1cdb5492a2d1e59ab941e
|
767a7116d4bdc2a8d52cbd74cb677862c6f40ac5
|
/T3/APF.py
|
2fd903ba790926219a9cc03c09ab38b358ed9075
|
[] |
no_license
|
lorenaelias/LFA
|
fc85bbd14f2719d6b26928417803f4dc9772511d
|
d64864cd361dbdb381611948ca2b619c65101bf7
|
refs/heads/master
| 2022-01-30T05:06:46.250637
| 2022-01-15T19:54:48
| 2022-01-15T19:54:48
| 181,799,032
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,388
|
py
|
class APF:
def __init__(self,nomeArq,flg):
if flg:
arq = open(nomeArq, 'r')
self.Q = arq.readline().rstrip().split()
self.S = arq.readline().rstrip().split()
self.T = arq.readline().rstrip().split()
self.q0 = arq.readline().strip()
self.Z = arq.readline().strip()
self.F = arq.readline().rstrip().split()
transicoes = arq.readlines()
self.delta = {}
matriz = []
for i in range(len(transicoes)) :
matriz.append(transicoes[i].strip().split())
for k in range(len(matriz)) :
if matriz[k][2] == '[]' :
self.delta[matriz[k][0], matriz[k][1]] = []
else:
aux = matriz[k]
aux = aux[3 :]
temp = []
if (matriz[k][0], matriz[k][1], matriz[k][2]) in self.delta:
temp = self.delta[matriz[k][0], matriz[k][1], matriz[k][2]]
self.delta[matriz[k][0], matriz[k][1], matriz[k][2]] = [tuple(aux)] + temp
# esse ultimo if serve para nao sobrescrever uma transicao quando existir
# mais de uma com a mesma key
arq.close()
else:
self.Q = []
self.S = []
self.T = []
self.q0 = ""
self.Z = ""
self.F = []
self.delta = {}
def pertence(self, elemento, conjunto):
if elemento in conjunto:
return True
return False
def validacaocadeia(self, sequencia):
for i in sequencia :
if not self.pertence(i, self.S):
return False
return True
def efecho(self, est, pi):
efecho = [est]
resultado = [est]
while(efecho != []):
qq : str
est1 = efecho.pop()
if (est1, '&', pi) in self.delta:
for i in self.delta[(est1, '&', pi)]:
if i not in efecho and i not in resultado:
efecho.append(i[0])
if i not in resultado:
resultado.append(i[0])
else:
if est1 not in resultado:
resultado.append(est1)
return resultado
def alteraPilha(self, a, pilhaAt, d2):
novaPi = pilhaAt
if d2 == '&':
novaPi.pop()
elif len(d2) == 2:
novaPi.append(d2[0])
elif d2 == pilhaAt[-1] or (d2 == '&' and a == '&'):
return pilhaAt
return novaPi
def percorreAPF1(self, sequencia, qAt, pilhaAt):
print("sequencia: ",sequencia)
if sequencia == "":
if qAt in self.F:
print("sinal", qAt, " ", sequencia)
return True
# se chegou ate aqui e ainda nao esta em um est final, pode ser que com uma
# etransicao consiga chegar a um
a = '&'
checaestado = (qAt, a, pilhaAt[-1])
if checaestado in self.delta :
prox = self.delta[checaestado]
for (d1, d2) in prox :
print("to mandando ", (a, pilhaAt, d2))
piAt = self.alteraPilha(a, pilhaAt, d2)
print("\n(d1,d2): ", d1, d2)
print("piAt: ", piAt)
print("pilhaAt: ", pilhaAt)
print(checaestado, ' -> ')
if self.percorreAPF("", d1, piAt):
return True
for i in self.F:
if i in self.efecho(qAt, pilhaAt[-1]):
print(i)
return True
# return False
else:
a = sequencia[0]
checaestado = (qAt, a, pilhaAt[-1])
if checaestado in self.delta:
prox = self.delta[checaestado]
for (d1, d2) in prox:
print("to mandando ", (a, pilhaAt, d2))
piAt = self.alteraPilha(a, pilhaAt, d2)
print("\n(d1,d2): ",d1, d2)
print("piAt: ", piAt)
print("pilhaAt: ", pilhaAt)
print(checaestado, ' -> ')
if self.percorreAPF(sequencia[1 :], d1, piAt) :
return True
a = '&'
checaestado = (qAt, a, pilhaAt[-1])
if checaestado in self.delta:
prox = self.delta[checaestado]
for (d1, d2) in prox :
print("to mandando ", (a, pilhaAt, d2))
piAt = self.alteraPilha(a, pilhaAt, d2)
print("\n(d1,d2): ", d1, d2)
print("piAt: ", piAt)
print("pilhaAt: ", pilhaAt)
print(checaestado, ' -> ')
if self.percorreAPF(sequencia, d1, piAt):
return True
return False
pilhaux = []
def esvaziapilha(self):
global pilhaux
pilhaux = []
# ultimo feito antes de arrumar para o wwRat
# funciona para os outros dois automatos 0n1n e anb2n
def percorreAPF2( self, sequencia, qAt, pilhaAt ) :
print("\npilha ",pilhaAt)
qAt2 = qAt
global pilhaux
if sequencia != "":
a = sequencia[0]
checaestado = (qAt, a, pilhaAt[-1])
if checaestado in self.delta and (checaestado not in pilhaux):
pilhaux.append(checaestado)
prox = self.delta[checaestado]
for (d1, d2) in prox :
qAt2 = d1
piAt = self.alteraPilha(a, pilhaAt, d2)
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia[1 :], d1, piAt) :
return True
checaestado = (qAt, '&', pilhaAt[-1])
if checaestado in self.delta and checaestado not in pilhaux:
pilhaux.append(checaestado)
prox = self.delta[checaestado]
for (d1, d2) in prox:
qAt2 = d1
piAt = self.alteraPilha('&', pilhaAt, d2)
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia, d1, piAt) :
return True
elif qAt2 in self.F :
print(qAt)
return True
else:
for i in self.F :
if i in self.efecho(qAt, pilhaAt[-1]) and sequencia == "" :
print(qAt)
return True
return False
# tentativa para aceitar wwRat
def percorreAPF3 ( self, sequencia, qAt, pilhaAt ) :
print("\npilha ", pilhaAt)
qAt2 = qAt
if sequencia == "":
if qAt in self.F:
print(qAt)
return True
else :
checaestado = (qAt, '&', pilhaAt[-1])
if checaestado in self.delta :
pilhaux.append(checaestado)
prox = self.delta[checaestado]
for (d1, d2) in prox :
qAt2 = d1
piAt = self.alteraPilha('&', pilhaAt, d2)
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia, d1, piAt) :
return True
if sequencia != "" :
a = sequencia[0]
if len(pilhaAt) > 0 :
checaestado = (qAt, a, pilhaAt[-1])
if checaestado in self.delta :
pilhaux.append(checaestado)
prox = self.delta[checaestado]
for (d1, d2) in prox :
qAt2 = d1
if len(pilhaAt) > 0 :
piAt = self.alteraPilha(a, pilhaAt, d2)
else :
return False
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia[1 :], d1, piAt) :
return True
else :
if d2 == '&' :
if len(pilhaAt) > 0 :
piAt = self.alteraPilha('&', pilhaAt, d2)
else :
return False
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia, d1, piAt) :
return True
if len(pilhaAt) > 0 :
checaestado = (qAt, '&', pilhaAt[-1])
if checaestado in self.delta :
pilhaux.append(checaestado)
prox = self.delta[checaestado]
for (d1, d2) in prox :
qAt2 = d1
if len(pilhaAt) > 0 :
piAt = self.alteraPilha('&', pilhaAt, d2)
else :
return False
print(checaestado, "->", (d1, d2))
if self.percorreAPF3(sequencia, d1, piAt) :
return True
# TODO: MUDAR O SIMBOLO DE DESEMPILHAR P NAO DAR PROB NO ALTERAPILHA
return False
def printAPF(self):
print('\n\n-------\033[1;34mAUTOMATO DE PILHA POR ESTADO FINAL\033[0;0m-------\n')
print('Estados: ', self.Q)
print('Alfabeto: ', self.S)
print('Alfabeto da pilha: ', self.T)
print('Estado inicial: ', self.q0)
print('Simbolo inicial da pilha: ', self.Z)
print('Estados finais: ', self.F)
print('Transições:')
print('(Estado atual\tSimbolo)->\tEstado resultante\n')
for i in self.delta:
print(i,' -> ',self.delta[i])
print('\n-------------------------------------------------\n')
|
[
"noreply@github.com"
] |
lorenaelias.noreply@github.com
|
4389fe895f258da6df6e190350867e47b798783f
|
ee3b81258db93cad6d155fa30af7dda9aa1410d6
|
/convert_vid_to_images_&audio_&text.py
|
acfc2e8fa4c81e48c0ca8a5055355fd53eec727f
|
[] |
no_license
|
CS-Rorita/speech-to-text
|
c3a50456f03f531057f347e005ad8ed5a2d5b260
|
91e22be60586dc0df66ac325fb58e4cdd028c79e
|
refs/heads/master
| 2022-11-02T07:35:30.228990
| 2020-06-23T13:27:23
| 2020-06-23T13:27:23
| 274,397,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
# Program To Read video
# and Extract Frames
import cv2
import os
# Python code to convert video to audio
import moviepy.editor as mp
import speech_recognition as sr
# Function to extract frames
def FrameCapture(path):
# =========================================================================================================
# Path to audio file
# Insert Local Video File Path
clip = mp.VideoFileClip(path)
# Insert Local Audio File Path
sound = clip.audio.write_audiofile(r'./data/audio.wav')
# ==================================================================================================
# Path to video file
vidcap = cv2.VideoCapture(path)
try:
if not os.path.exists('data'):
os.makedirs('data') # extract frames from this video. Following it, create a folder where you want to save the images
except OSError:
print('Error: Creating directory of data')
def getFrame(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)
hasFrames, image = vidcap.read()
if hasFrames:
# Saves image of the current frame in jpg file
name = './data/frame' + str(count) + '.jpg'
cv2.imwrite(name, image) # save frame as JPG file
return hasFrames
sec = 0
frameRate = 30 # //it will capture image in each 30 second
count = 1
success = getFrame(sec)
while success:
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success = getFrame(sec)
#========================================================================
r = sr.Recognizer()
sound = sr.AudioFile("./stanford.wav")
with sound as source:
r.adjust_for_ambient_noise(source)
print("Converting Audio To Text ..... ")
audio = r.listen(source)
type(audio)
try:
text=r.recognize_google(audio)
print("Converted Audio Is : \n"+text)
except Exception as e:
print("Error can't recognize the voice {} : ".format(e))
# Driver Code
if __name__ == '__main__':
# Calling the function
FrameCapture('Java Programming Tutorial 1 - Introduction to Java.mp4')
|
[
"noreply@github.com"
] |
CS-Rorita.noreply@github.com
|
58c0521573ba490ab6ced88ab69a77ffb44197a3
|
7fd3c972968416c27982194220e8f83f88bda99a
|
/Algorithms_LinkedLists/Code/206_ReverseLinkedList/v1_1.py
|
3ecd4e0aaaae2ea9a80b1e04d4cb30f33f5fff2b
|
[] |
no_license
|
AKHeit/LeetCode
|
a5093271eb1d9e27776e6b491f972be607802a72
|
d91f60431aa7767d1a854e0e27a26023fc8ec45c
|
refs/heads/master
| 2021-09-02T13:08:27.073505
| 2018-01-02T22:49:07
| 2018-01-02T22:49:07
| 113,120,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
"""
Problem: 206 reverse linked list
Level: easy
Tags: linked list
Technique: iterative, recursion
Status:
Problem Description:
reverse a linked list
To do:
fixed the broken recursive calling
"""
#Definition for singly-linked list.
class ListNode:
"""
Comment out while running in LeetCode
Keep while running locally
"""
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
"""
Solution format for LeetCode
"""
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
:method calls:
:recursive calling on new list (O(n2))
"""
# garbage input
if head == None:
return head
# recursive calling
if head.next == None:
rev = ListNode(head.val)
else:
rev = self.reverseList(head.next)
check = rev
while check.next!=None:
check = check.next
check.next = ListNode(head.val)
return rev
def reverseList_new(self, head):
"""
Brute force creation of new list
:type head: ListNode
:rtype: ListNode
:method calls: addNode_head
"""
if head == None:
return head
ans = ListNode(head.val)
tail = (head.next == None)
while not tail:
head = head.next
ans = self.addNode_head(ans, head.val)
tail = (head.next == None)
return ans
def addNode_head(self, node_h, val):
"""
adds node to head
:type node_h: ListNode (original head)
:type val: ListNode (new head)
:rtype : ListNode
:method calls: NONE
"""
nn = ListNode(node_h.val)
nn.next = node_h.next
node_h.val = val
node_h.next = nn
return node_h
if __name__== "__main__":
"""
test code
"""
#
# standardized printing of test results
#
def print_test(ans_e,ans_o,name):
"""
prints tests in standardized format
:type ans_e: expected answer in printable format
:type ans_o: observed answer in printable format
"""
print('~'*40)
if ans_o != ans_e:
error = 1
print("########## FAIL ##########")
print("TEST: {} :: Status: FAIL".format(name))
else:
error = 0
print("TEST: {} :: Status: PASS".format(name))
print('TEST: {} :: Expected: {}'.format(method, ans_e))
print('TEST: {} :: Observed: {}'.format(method, ans_o))
return error
#
# testing starts here
#
err = 0
sol = Solution()
# test addNode_head checking values fields
method = "addNode_head: checkvals"
inp_0 = ListNode(1)
inp_0.next = ListNode(-30)
inp_1 = 300
out = sol.addNode_head(inp_0,300)
expected_answer = [300,1,-30]
observed_answer = [None,None,None]
observed_answer[0] = out.val
observed_answer[1] = out.next.val
observed_answer[2] = out.next.next.val
err = err + print_test(expected_answer, observed_answer,method)
# test addNode_head checking next field
method = "addNode_head: isnext none"
expected_answer = [False, False, True]
observed_answer = [None, None, None]
observed_answer[0] = out.next
observed_answer[1] = out.next.next
observed_answer[2] = out.next.next.next
for i in range(len(observed_answer)):
observed_answer[i] = (observed_answer[i] == None)
err = err + print_test(expected_answer, observed_answer,method)
# test full reversal
method = "reverseList"
head = ListNode(4)
head = sol.addNode_head(head, 3)
head = sol.addNode_head(head, 2)
head = sol.addNode_head(head, 1)
expected_answer = [4,3,2,1]
observed_answer = [None] * len(expected_answer)
newhead = sol.reverseList(head)
observed_answer[0] = newhead.val
observed_answer[1] = newhead.next.val
observed_answer[2] = newhead.next.next.val
observed_answer[3] = newhead.next.next.next.val
err = err + print_test(expected_answer, observed_answer,method)
#
# Final pass/fail readout
#
print('')
if err == 0:
print('PASSED ALL TESTS')
else:
print('FAILED A TEST: DEBUG!!!')
|
[
"heitman.alexander@gmail.com"
] |
heitman.alexander@gmail.com
|
df1b483b12a18f285047ae7d1f7b07f90b38e4ab
|
a43504f11666edffa9497630d9fcad31566b4349
|
/app/bot_engine/request_makers.py
|
7c475e770f2ee3cf8dc298e5fc7d6fa180ffd930
|
[] |
no_license
|
korid24/shop_list_webhook_bot
|
4896d0e731679815b043ba4c997651fe4f3682a9
|
efe359b42a8b625ea78b855664358937419a1785
|
refs/heads/master
| 2022-12-29T09:51:16.212730
| 2020-10-22T10:16:20
| 2020-10-22T10:16:20
| 306,046,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,933
|
py
|
import json
import requests
from typing import List, NamedTuple, Optional, Union
from config import AUTH_TOKEN, BASE_URL
# from local_utils import write_json
from bot_engine.utils import path_to
HEADERS = {
'Content-type': 'application/json',
'Authorization': 'token {}'.format(AUTH_TOKEN),
'Accept-Language': 'en-US'}
class RequestConstructor(NamedTuple):
"""
Шаблон для информации запроса
"""
url: str
method: str
data: Optional[Union[list, dict]]
def create_user(
telegram_id: int,
first_name: Optional[str],
last_name: Optional[str],
nickname: Optional[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление пользоавателя
"""
data = {
'telegram_id': telegram_id,
'first_name': first_name,
'last_name': last_name,
'nickname': nickname}
url = BASE_URL + path_to('user')
return RequestConstructor(url=url, data=data, method='post')
def add_elements(
telegram_id: int,
position: int,
elements: List[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление элемента
"""
data = []
for element in elements:
data.append({'title': element})
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_create/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_create/')
return RequestConstructor(url=url, data=data, method='post')
def replace_element(
telegram_id: int,
position: int,
old_ind: int,
new_ind: int) -> RequestConstructor:
"""
Формирует информацию для запроса на перемещение элемента
"""
data = {'ind': new_ind}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase', old_ind))
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', old_ind))
return RequestConstructor(url=url, data=data, method='patch')
def remove_elements(
telegram_id: int,
position: int,
elements: List[int]) -> RequestConstructor:
"""
Формирует информацию для запроса на удаление элемента
"""
data = {'items': elements}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_delete/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_delete/')
return RequestConstructor(url=url, data=data, method='delete')
def get_all(telegram_id: int) -> RequestConstructor:
"""
Формирует информацию для запроса на получение полной инфы о пользователе
"""
url = BASE_URL + path_to('user', telegram_id)
return RequestConstructor(url=url, data=None, method='get')
def make_request(
info: RequestConstructor, answer: bool = True) -> Union[dict, int]:
"""
Совершает запрос исходя из предоставленной инфы. Возвращает тело ответа
если нужно, а если не нужно то код ответа
"""
response = requests.request(
method=info.method,
url=info.url,
data=json.dumps(info.data),
headers=HEADERS)
if not answer:
return response.status_code
return response.json()
|
[
"korid24.dev@gmail.com"
] |
korid24.dev@gmail.com
|
a67206d5ae73c680d0c58a0ec7396e287f72750f
|
d5e3d72f2f21a4a326e953a79aae97ad5109623e
|
/machinelearninginaction/Ch10/kMeans.py
|
b4957253b0939348c82737eea7f6547ef7fd1cb0
|
[] |
no_license
|
thatwaylw/pycl
|
37006e7b4657faa8181476564c19304e7da2c3c6
|
d253da71e9154b592bf58a72f012185ffff2dd85
|
refs/heads/master
| 2020-07-04T13:22:10.002414
| 2019-08-16T10:27:37
| 2019-08-16T10:27:58
| 202,297,713
| 0
| 0
| null | 2019-08-14T08:57:35
| 2019-08-14T07:27:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,646
|
py
|
#coding:utf-8
'''
Created on Feb 16, 2011
k Means Clustering for Ch10 of Machine Learning in Action
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
#fltLine = map(float,curLine) #map all elements to float() # map()在python3里不一样!!!。
#dataMat.append(fltLine)
lineArr =[]
for i in range(len(curLine)):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
return dataMat
def plot1(dataMat, centroids):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(dataMat[:,0], dataMat[:,1], s=15)
ax.scatter(centroids[:,0], centroids[:,1], s=45, c='r', marker='x')
plt.show()
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1] #样本维度2
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j]) # 第j维最小值,标量
rangeJ = float(max(dataSet[:,j]) - minJ) #标量
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1)) # k行1列[0,1)之间的随机数
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0] #80个点
clusterAssment = mat(zeros((m,2)))#80行,每行存:最近的中心编号j,以及最近的距离平方
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:]) #第j个中心和第i个样本点之间的距离
if distJI < minDist:
minDist = distJI; minIndex = j # 对每个点i,找最近的中心j
if clusterAssment[i,0] != minIndex: clusterChanged = True #如果此轮迭代中,点i的最近中心编号j变了。。
clusterAssment[i,:] = minIndex,minDist**2 #更新此轮迭代中的,点i的两个数据:最近中心编号j和相应的距离平方
#print(centroids)
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#dataSet的子集,最近中心编号j==cent
centroids[cent,:] = mean(ptsInClust, axis=0) #18行2列,求平均成1行2列,不加axis是全部元素求均值变成一个标量,=0按列求均值,把行求掉,=1按行求均值
return centroids, clusterAssment
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0] #80个点
clusterAssment = mat(zeros((m,2)))#80行,每行存:最近的中心编号j,以及最近的距离平方,j一开始全部是0(质心=初始中心,在centList里编号为0)
centroid0 = mean(dataSet, axis=0).tolist()[0] #所有点的平均中心(质心),作为第一初始中心
centList =[centroid0] #create a list with one centroid (质心=初始中心,在centList里编号为0)
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2 #先算好每个点j到初始中心c0的距离平方
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)): # 对当前每个中心
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#dataSet的子集,最近中心编号=i的所有点
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)#对当前每个簇做一次2-kMean
sseSplit = sum(splitClustAss[:,1])#当前最佳2分类的误差平方和
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1]) #(未分部分的误差)当前中心i之外所有簇的总误差平方和
print("sseSplit, and notSplit: ",sseSplit,sseNotSplit) #两个加起来就是当前把i一分为二之后的总误差
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat #原来的i分成两个,这里包含两个新中心的的坐标mat(2x2)
bestClustAss = splitClustAss.copy() # 样本点个数仅仅是原来i簇的样本点个数,第一维最近中心编号,只有0,1两种取值(新2中心的本轮内部编号)
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #要先看下面cenList更新的两行,1成了追加的那个,编号变成了最后
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit#0成了原来替代掉的,编号就是bestCentToSplit
print('the bestCentToSplit is: ',bestCentToSplit)
print('the len of bestClustAss is: ', len(bestClustAss))
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#当前中心(索引为bestCentToSplit)的坐标,换掉,成新的2中心的前一个的坐标(2维)
centList.append(bestNewCents[1,:].tolist()[0])#再在最后追加新的2中心的后一个的坐标(2维)
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#修改全局总ClusterAss,更新原来中心编号等于本轮bestCentToSplit的部分
return mat(centList), clusterAssment
import urllib
import json
def testURLLib():
#req = urllib.request.Request('http://120.131.82.100:18081/qa_child/child_app/?toyId=gqtest&t=hello')
#c=urllib.request.urlopen(req)
c=urllib.request.urlopen('http://120.131.82.100:18081/qa_child/child_app/?toyId=gqtest&t=hello')
str = c.read().decode('utf-8')
print(str)
jobj =json.loads(str)
print(jobj)
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.parse.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print(yahooApi)
req = urllib.request.Request(yahooApi)
c=urllib.request.urlopen(req)
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print("%s\t%f\t%f" % (lineArr[0], lat, lng))
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print("error fetching")
sleep(1)
fw.close()
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
|
[
"laiwei.lw@gmail.com"
] |
laiwei.lw@gmail.com
|
0ffac56bcad2e24caca6937b282f7b1ad0456549
|
ad3c845b93724fa37b88a09b0f776e3d5719bca0
|
/mylogs/notes/admin.py
|
a9a714f116f2f9f71f248ea53908575fbf56bcf1
|
[] |
no_license
|
VSpectrum/My_Logs
|
ffc3adfca5bb22b628cfd958badf1a22179faa78
|
9df444ea89c8030ec8638057b661c0a7e2aa1812
|
refs/heads/master
| 2021-01-22T06:23:22.907453
| 2017-06-13T16:26:50
| 2017-06-13T16:26:50
| 92,547,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from notes.models import Log
class LogDisplay(admin.ModelAdmin):
def time_seconds(self, obj):
return obj.date.strftime("%d-%b-%Y | %H:%M:%S")
time_seconds.short_description = 'Date'
list_display = ('logHours', 'logNote', 'order', 'date')
admin.site.register(Log, LogDisplay)
|
[
"vgooljar@sunnygroup.com"
] |
vgooljar@sunnygroup.com
|
17ff71f9320ed1b5a19d7b730f0302b2113591eb
|
196f7e3238f961fb5eba7a794f0b0c75d7c30ba1
|
/Python自动化运维技术与最佳实践/2业务服务监控/213对比nginx配置文件test.py
|
9cc58a6e05df6dd2f918590e93150457966d7b24
|
[] |
no_license
|
Liaoyingjie/Pythonlearn
|
d0b1b95110017af7e063813660e52c61a6333575
|
8bca069f38a60719acac5aa39bd347f90ab0bfb1
|
refs/heads/master
| 2020-04-08T07:35:07.357487
| 2018-04-12T16:44:43
| 2018-04-12T16:44:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
#!/usr/bin/python
import difflib
import sys
textfile1='nginx.conf.v1'
textfile2='nginx.conf.v2'
def readfile(filename):
try:
fileHandle = open (filename, 'a+' )
text=fileHandle.read().splitlines()
fileHandle.close()
return text
except IOError as error:
print('Read file Error:'+str(error))
sys.exit()
if textfile1=="" or textfile2=="":
print("Usage: simple3.py filename1 filename2")
sys.exit()
#textfile1='HtmlDiff.html'
#textfile2='HtmlDiff111.html'
text1_lines = readfile(textfile1)
text2_lines = readfile(textfile2)
d = difflib.HtmlDiff()
#print(d.make_file(text1_lines, text2_lines))
f=open('213对比Nginx网页结果.html', 'a+')
#print(d.make_file(text1_lines,text2_lines))
print((d.make_file(text1_lines,text2_lines)),file=f)
f.close()
|
[
"godzoco@qq.com"
] |
godzoco@qq.com
|
a69b7932bdbaf598e483e8e989db6149b82fd61c
|
b650ecac89df96d01b3371806b931858eaa202cc
|
/io_scene_gltf2/blender/imp/gltf2_blender_skin.py
|
b2b39a2b94764e627b2e5bd93979c919ba73d9a1
|
[] |
no_license
|
yazici/blender-addons
|
a92a87e6f96d130cf32c94227cbd5623fe129ad9
|
85bda1d3f7d20766561ef73c864a4a6872d93a23
|
refs/heads/master
| 2020-04-25T18:26:34.571420
| 2019-02-25T15:48:03
| 2019-02-25T15:48:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,427
|
py
|
# Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Vector, Matrix
from ..com.gltf2_blender_conversion import matrix_gltf_to_blender, scale_to_matrix
from ...io.imp.gltf2_io_binary import BinaryData
class BlenderSkin():
"""Blender Skinning / Armature."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create_armature(gltf, skin_id, parent):
"""Armature creation."""
pyskin = gltf.data.skins[skin_id]
if pyskin.name is not None:
name = pyskin.name
else:
name = "Armature_" + str(skin_id)
armature = bpy.data.armatures.new(name)
obj = bpy.data.objects.new(name, armature)
bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj)
pyskin.blender_armature_name = obj.name
if parent is not None:
obj.parent = bpy.data.objects[gltf.data.nodes[parent].blender_object]
@staticmethod
def set_bone_transforms(gltf, skin_id, bone, node_id, parent):
"""Set bone transformations."""
pyskin = gltf.data.skins[skin_id]
pynode = gltf.data.nodes[node_id]
obj = bpy.data.objects[pyskin.blender_armature_name]
# Set bone bind_pose by inverting bindpose matrix
if node_id in pyskin.joints:
index_in_skel = pyskin.joints.index(node_id)
if pyskin.inverse_bind_matrices is not None:
inverse_bind_matrices = BinaryData.get_data_from_accessor(gltf, pyskin.inverse_bind_matrices)
# Needed to keep scale in matrix, as bone.matrix seems to drop it
if index_in_skel < len(inverse_bind_matrices):
pynode.blender_bone_matrix = matrix_gltf_to_blender(
inverse_bind_matrices[index_in_skel]
).inverted()
bone.matrix = pynode.blender_bone_matrix
else:
gltf.log.error("Error with inverseBindMatrix for skin " + pyskin)
else:
pynode.blender_bone_matrix = Matrix() # 4x4 identity matrix
else:
print('No invBindMatrix for bone ' + str(node_id))
pynode.blender_bone_matrix = Matrix()
# Parent the bone
if parent is not None and hasattr(gltf.data.nodes[parent], "blender_bone_name"):
bone.parent = obj.data.edit_bones[gltf.data.nodes[parent].blender_bone_name] # TODO if in another scene
# Switch to Pose mode
bpy.ops.object.mode_set(mode="POSE")
obj.data.pose_position = 'POSE'
# Set posebone location/rotation/scale (in armature space)
# location is actual bone location minus it's original (bind) location
bind_location = Matrix.Translation(pynode.blender_bone_matrix.to_translation())
bind_rotation = pynode.blender_bone_matrix.to_quaternion()
bind_scale = scale_to_matrix(pynode.blender_bone_matrix.to_scale())
location, rotation, scale = matrix_gltf_to_blender(pynode.transform).decompose()
if parent is not None and hasattr(gltf.data.nodes[parent], "blender_bone_matrix"):
parent_mat = gltf.data.nodes[parent].blender_bone_matrix
# Get armature space location (bindpose + pose)
# Then, remove original bind location from armspace location, and bind rotation
final_location = (bind_location.inverted() @ parent_mat @ Matrix.Translation(location)).to_translation()
obj.pose.bones[pynode.blender_bone_name].location = \
bind_rotation.inverted().to_matrix().to_4x4() @ final_location
# Do the same for rotation
obj.pose.bones[pynode.blender_bone_name].rotation_quaternion = \
(bind_rotation.to_matrix().to_4x4().inverted() @ parent_mat @
rotation.to_matrix().to_4x4()).to_quaternion()
obj.pose.bones[pynode.blender_bone_name].scale = \
(bind_scale.inverted() @ parent_mat @ scale_to_matrix(scale)).to_scale()
else:
obj.pose.bones[pynode.blender_bone_name].location = bind_location.inverted() @ location
obj.pose.bones[pynode.blender_bone_name].rotation_quaternion = bind_rotation.inverted() @ rotation
obj.pose.bones[pynode.blender_bone_name].scale = bind_scale.inverted() @ scale
@staticmethod
def create_bone(gltf, skin_id, node_id, parent):
"""Bone creation."""
pyskin = gltf.data.skins[skin_id]
pynode = gltf.data.nodes[node_id]
scene = bpy.data.scenes[gltf.blender_scene]
obj = bpy.data.objects[pyskin.blender_armature_name]
bpy.context.window.scene = scene
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode="EDIT")
if pynode.name:
name = pynode.name
else:
name = "Bone_" + str(node_id)
bone = obj.data.edit_bones.new(name)
pynode.blender_bone_name = bone.name
pynode.blender_armature_name = pyskin.blender_armature_name
bone.tail = Vector((0.0, 1.0, 0.0)) # Needed to keep bone alive
# set bind and pose transforms
BlenderSkin.set_bone_transforms(gltf, skin_id, bone, node_id, parent)
bpy.ops.object.mode_set(mode="OBJECT")
@staticmethod
def create_vertex_groups(gltf, skin_id):
"""Vertex Group creation."""
pyskin = gltf.data.skins[skin_id]
for node_id in pyskin.node_ids:
obj = bpy.data.objects[gltf.data.nodes[node_id].blender_object]
for bone in pyskin.joints:
obj.vertex_groups.new(name=gltf.data.nodes[bone].blender_bone_name)
@staticmethod
def assign_vertex_groups(gltf, skin_id):
"""Assign vertex groups to vertices."""
pyskin = gltf.data.skins[skin_id]
for node_id in pyskin.node_ids:
node = gltf.data.nodes[node_id]
obj = bpy.data.objects[node.blender_object]
offset = 0
for prim in gltf.data.meshes[node.mesh].primitives:
idx_already_done = {}
if 'JOINTS_0' in prim.attributes.keys() and 'WEIGHTS_0' in prim.attributes.keys():
original_joint_ = BinaryData.get_data_from_accessor(gltf, prim.attributes['JOINTS_0'])
original_weight_ = BinaryData.get_data_from_accessor(gltf, prim.attributes['WEIGHTS_0'])
tmp_indices = {}
tmp_idx = 0
weight_ = []
for i in prim.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
weight_.append(original_weight_[i[0]])
tmp_indices = {}
tmp_idx = 0
joint_ = []
for i in prim.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
joint_.append(original_joint_[i[0]])
for poly in obj.data.polygons:
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vert_idx = obj.data.loops[loop_idx].vertex_index
if vert_idx in idx_already_done.keys():
continue
idx_already_done[vert_idx] = True
if vert_idx in range(offset, offset + prim.vertices_length):
tab_index = vert_idx - offset
cpt = 0
for joint_idx in joint_[tab_index]:
weight_val = weight_[tab_index][cpt]
if weight_val != 0.0: # It can be a problem to assign weights of 0
# for bone index 0, if there is always 4 indices in joint_
# tuple
group = obj.vertex_groups[gltf.data.nodes[
pyskin.joints[joint_idx]
].blender_bone_name]
group.add([vert_idx], weight_val, 'REPLACE')
cpt += 1
else:
gltf.log.error("No Skinning ?????") # TODO
offset = offset + prim.vertices_length
@staticmethod
def create_armature_modifiers(gltf, skin_id):
"""Create Armature modifier."""
pyskin = gltf.data.skins[skin_id]
if pyskin.blender_armature_name is None:
# TODO seems something is wrong
# For example, some joints are in skin 0, and are in another skin too
# Not sure this is glTF compliant, will check it
return
for node_id in pyskin.node_ids:
node = gltf.data.nodes[node_id]
obj = bpy.data.objects[node.blender_object]
for obj_sel in bpy.context.scene.objects:
obj_sel.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# Reparent skinned mesh to it's armature to avoid breaking
# skinning with interleaved transforms
obj.parent = bpy.data.objects[pyskin.blender_armature_name]
arma = obj.modifiers.new(name="Armature", type="ARMATURE")
arma.object = bpy.data.objects[pyskin.blender_armature_name]
|
[
"julien.duroure@gmail.com"
] |
julien.duroure@gmail.com
|
7a021ccaf3c670c98dfc5155d1cbd84b76bfd436
|
2caf6885511af24443e22aaa43cd679d694f6f80
|
/note/my_note/first_month/day06/do_it.py
|
ec11f1b1af222ae157ca35960d3fb73a0a203e08
|
[] |
no_license
|
nandadao/Python_note
|
7f9ba54a73af05c935b4f7e24cacb728859a6c69
|
abddfc2e9a1704c88867cff1898c9251f59d4fb5
|
refs/heads/master
| 2020-11-25T18:29:50.607670
| 2019-12-19T01:28:02
| 2019-12-19T01:28:02
| 228,793,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# dict_like = {
# "qtx":["编码","看书","跑步"],
# "lzmly":["看电影","编码","美食","唱歌"]
# }
#
# # for i in dict_like["qtx"]:
# # print("qtx",i)
#
# # 所有人的爱好
# for item in dict_like:
# for i in dict_like[item]:
# print(item, i)
dict_city = {
"北京":{
"景区":["天安门","天坛","故宫"],
"美食":["驴打滚","豆汁"]
},
"四川":{
"景区":["九寨沟","宽窄巷子"],
"美食":["火锅","串串香"]
},
}
for i in dict_city["北京"]["景区"]:
print("北京美食",i)
for item in dict_city:
print("城市有:", item)
for item in dict_city:
for i in dict_city[item]["景区"]:
print(item, i)
|
[
"1361335953@qq.com"
] |
1361335953@qq.com
|
cc972351c9723f12cac29a568a4b3609c43b5fe0
|
a43763ff378a39f50af967dd90ff898e969e88e7
|
/Lista_Prof_Fabio/Algoritmos_Exercicio-03-REPETICAO-FOR/fb_ex3_q10.py
|
e86692c6afa50f7ffb885fa3171aa20a16feec99
|
[] |
no_license
|
marcosaraujo2020/ifpi-ads-algoritmos2020
|
801ec1858958a0ab51f050c42e4067fefb8cc106
|
608ed133c6e57a4cfe84bf5d301dd44e057ffc47
|
refs/heads/master
| 2023-01-12T04:36:07.524983
| 2020-11-20T21:21:09
| 2020-11-20T21:21:09
| 246,123,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
def main():
limite_inferior = int(input('Informe um valor para ser o limite inferior: '))
limite_superior = int(input('Informe um valor para ser o limite superior: '))
numeros_impares(limite_inferior, limite_superior)
def numeros_impares(inferior, supeior):
print(f'Os números ÍMPARES no intervalo de {inferior} a {supeior} são: ', end=' ')
for contador in range(inferior, supeior + 1):
if contador % 2 == 1:
print(contador, end=' ')
main()
|
[
"marcosaraujo2015.1@gmail.com"
] |
marcosaraujo2015.1@gmail.com
|
0600e81657f4533c2756ea97761353143be27172
|
02bc72dd34f1cb9642d1b336a7552e86e08c875f
|
/density_model_demo.py
|
e7d488c99b8d2787f1832c7c410c8db2ad40262e
|
[
"MIT"
] |
permissive
|
JFlommersfeld/Actomyosin-contractions-in-soft-pillar-rings
|
b3061ef0449f7ee11c5cc441b893f70d2dc0fa4e
|
0f8e9375f53da432a9cc54a208e5655bc80f9f45
|
refs/heads/main
| 2023-08-03T11:09:26.000857
| 2021-09-16T15:03:04
| 2021-09-16T15:03:04
| 407,205,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
import numpy as np
from calculate_contraction_dynamics import calculate_contraction_dynamics
from plotting import plot_tip_displacement_and_velocity, plot_peak_velocities, plot_final_forces
from auxiliaries import discretize_curve, velos_numerical
# first we analyse the contraction dynamics for a single pillar stiffness
pillar_stiffness = 35.
# simulate the model
timepoints, sol, velocities, _, _, _, _ = \
calculate_contraction_dynamics('density model', './parameter_density_model.txt', 210., pillar_stiffness)
# plot the dynamic quantities
plot_tip_displacement_and_velocity(timepoints, sol[0]/pillar_stiffness, velocities, ylim_bottom_velo=-0.5,
ylim_top_velo=5, ylim_bottom_disp=-0.5, ylim_top_disp=4)
# repeat the analysis at a higher pillar stiffness
pillar_stiffness = 100.
# simulate the model
timepoints, sol, velocities, _, _, _, _ = \
calculate_contraction_dynamics('density model', './parameter_density_model.txt', 210., pillar_stiffness)
# plot the dynamic quantities
plot_tip_displacement_and_velocity(timepoints, sol[0]/pillar_stiffness, velocities, ylim_bottom_velo=-0.5,
ylim_top_velo=5, ylim_bottom_disp=-0.5, ylim_top_disp=4)
# now we analyse the dependence on the pillar stiffness
peak_velo_discrete = []
final_forces = []
t_max = 500.
for idx, stiffness in enumerate(range(20, 230, 5)):
pillar_stiffness = stiffness
# integrate ODE
timepoints, sol, velocities, transmitted_powers, dissipated_powers, transmitted_work, dissipated_work = \
calculate_contraction_dynamics('density model', './parameter_density_model.txt', t_max, pillar_stiffness)
# get the final force and discretized peak velocity
final_forces.append([pillar_stiffness, sol[0][-1]])
times_discrete, disp_discrete = discretize_curve(timepoints, sol[0] / pillar_stiffness, 30)
velos_discrete = velos_numerical(np.asarray(times_discrete) / 60, disp_discrete)
peak_velo_discrete.append([pillar_stiffness, np.max(velos_discrete)])
# plot peak velocities
peak_velo_discrete = np.transpose(np.asarray(peak_velo_discrete))
plot_peak_velocities(peak_velo_discrete[0], peak_velo_discrete[1])
# plot final forces
final_forces = np.asarray(final_forces)
final_forces = final_forces.T
plot_final_forces(final_forces[0], final_forces[1], ylim_bottom=-20, ylim_top=420)
|
[
"noreply@github.com"
] |
JFlommersfeld.noreply@github.com
|
edbb85dd0ca6063541942d239e131754ea2518a0
|
13d6d302b111ef375e28a8685c141b0c61a9b4f1
|
/day9/code.py
|
a2dc45636323322b7bb5485f1e76f23d7864670c
|
[] |
no_license
|
eireneapostol/codeadvent
|
7aaa7e90275e95309329f26bcf3323579e7db8e4
|
183a8a9077cca16666ef1205cfd2e3316ccef1b6
|
refs/heads/master
| 2021-04-25T06:04:40.453486
| 2017-12-20T16:03:40
| 2017-12-20T16:03:40
| 113,890,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,120
|
py
|
'''
--- Day 9: Stream Processing ---
A large stream blocks your path. According to the locals, it's not safe to cross the stream at the moment because it's full of garbage. You look down at the stream; rather than water, you discover that it's a stream of characters.
You sit for a while and record part of the stream (your puzzle input). The characters represent groups - sequences that begin with { and end with }. Within a group, there are zero or more other things, separated by commas: either another group or garbage. Since groups can contain other groups, a } only closes the most-recently-opened unclosed group - that is, they are nestable. Your puzzle input represents a single, large group which itself contains many smaller ones.
Sometimes, instead of a group, you will find garbage. Garbage begins with < and ends with >. Between those angle brackets, almost any character can appear, including { and }. Within garbage, < has no special meaning.
In a futile attempt to clean up the garbage, some program has canceled some of the characters within it using !: inside garbage, any character that comes after ! should be ignored, including <, >, and even another !.
You don't see any characters that deviate from these rules. Outside garbage, you only find well-formed groups, and garbage always terminates according to the rules above.
Here are some self-contained pieces of garbage:
<>, empty garbage.
<random characters>, garbage containing random characters.
<<<<>, because the extra < are ignored.
<{!>}>, because the first > is canceled.
<!!>, because the second ! is canceled, allowing the > to terminate the garbage.
<!!!>>, because the second ! and the first > are canceled.
<{o"i!a,<{i<a>, which ends at the first >.
Here are some examples of whole streams and the number of groups they contain:
{}, 1 group.
{{{}}}, 3 groups.
{{},{}}, also 3 groups.
{{{},{},{{}}}}, 6 groups.
{<{},{},{{}}>}, 1 group (which itself contains garbage).
{<a>,<a>,<a>,<a>}, 1 group.
{{<a>},{<a>},{<a>},{<a>}}, 5 groups.
{{<!>},{<!>},{<!>},{<a>}}, 2 groups (since all but the last > are canceled).
Your goal is to find the total score for all groups in your input. Each group is assigned a score which is one more than the score of the group that immediately contains it. (The outermost group gets a score of 1.)
{}, score of 1.
{{{}}}, score of 1 + 2 + 3 = 6.
{{},{}}, score of 1 + 2 + 2 = 5.
{{{},{},{{}}}}, score of 1 + 2 + 3 + 3 + 3 + 4 = 16.
{<a>,<a>,<a>,<a>}, score of 1.
{{<ab>},{<ab>},{<ab>},{<ab>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<!!>},{<!!>},{<!!>},{<!!>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<a!>},{<a!>},{<a!>},{<ab>}}, score of 1 + 2 = 3.
What is the total score for all groups in your input?
--- Part Two ---
Now, you're ready to remove the garbage.
To prove you've removed it, you need to count all of the characters within the garbage. The leading and trailing < and > don't count, nor do any canceled characters or the ! doing the canceling.
<>, 0 characters.
<random characters>, 17 characters.
<<<<>, 3 characters.
<{!>}>, 2 characters.
<!!>, 0 characters.
<!!!>>, 0 characters.
<{o"i!a,<{i<a>, 10 characters.
How many non-canceled characters are within the garbage in your puzzle input?
'''
f = open("./inputs/day9","r")
line = f.readline()
line = list(line)
print(line)
i = 0
length = len(line)
count_garbage = 0 # non-canceled garbage - PART TWO
while i < length:
if line[i] == "<":
del(line[i])
end_garbage = False
parent = "<"
while not end_garbage:
if parent != "!" and line[i] != ">" and line[i] != "!":
count_garbage += 1
if line[i] == "!" and parent == "!":
line[i] = "x"
if line[i] == ">" and parent != "!":
end_garbage = True
parent = line[i]
del(line[i])
i += 1
length = len(line)
score = 0
nr_open = 0
for i in line:
if i == "{":
nr_open += 1
score += nr_open
if i == "}":
nr_open -= 1
print(line)
print(score) # solution PART 1
print(count_garbage) # solution PART TWO
|
[
"apostol.irina.mihail@gmail.com"
] |
apostol.irina.mihail@gmail.com
|
ee1f5ea3b1d1fc59071b32fb3800848e91e8dce8
|
597d30748afff316d245af4dd145909c107a6631
|
/Cryptology/generator.py
|
8e45903d8bca3a7c353bfb6c669a8cb1de83a1e7
|
[] |
no_license
|
rp25/oldClassWork
|
7605fbb31a4cebb4b60d5ff5f146f27e0714a0ec
|
ddf3300c49585f4438999a0d422e15c452c97618
|
refs/heads/master
| 2021-01-12T00:03:08.811392
| 2017-01-11T17:14:38
| 2017-01-11T17:14:38
| 78,660,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
#!/usr/bin/env python
# Original version by Will Morgan
# Modified to produce unique integers by Chris Marron
# (as well as some other modifications)
from random import randint
from sys import argv
def generate_unique_ints(num, lower, upper, file):
""" Generate a file of unique, random integers """
""" num - number of integers to generate """
""" lower - lower bound on integers """
""" upper - upper bound on integers """
""" file - name of output file """
dataFile = open(file, "w")
dataList = {}
while len( dataList.keys() ) < num:
dataList[ str( randint( lower, upper) ) ] = True
intData = sorted( map( int, dataList.keys() ) )
for n in intData:
dataFile.write(str(n) + "\n")
|
[
"rajanpatel625@gmail.com"
] |
rajanpatel625@gmail.com
|
6b85c86bf9fae1f69daae94816bb8f44e26688c3
|
d54e7f950115a00028f23c6b9d71cfcb2431d406
|
/turtle1.py
|
68bfd5d131d3f3eeb56a5f9f5f82d69d5e710d65
|
[] |
no_license
|
candy2882/MindX-D4E12
|
6ded560353af199e067805af1e7963a99cfd52be
|
a6a225b659f54132967cd4ece6dc5b0383486d2a
|
refs/heads/master
| 2022-08-03T08:33:50.724458
| 2020-05-25T11:01:21
| 2020-05-25T11:01:21
| 266,748,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from turtle import *
speed(-1)
color('blue')
a = 200
for i in range(20):
for j in range(2):
forward(a/2)
left(90)
forward(a)
left(90)
for j in range(2):
forward(a/2)
left(90)
left(50)
mainloop()
|
[
"noreply@github.com"
] |
candy2882.noreply@github.com
|
1b2fee2d23cfabd911deb5fc04a15adcd4198f21
|
89175668a2d04910a8e75d98a6c9b1a102d7b43a
|
/basedbot/dbot.py
|
6aa8bdf16a39e7b5f7a1aab6babcc9b2a5af473a
|
[] |
no_license
|
Laurox/TUMbot
|
db0a6c5cea15cbbc550555d1c183b3dd71f14a61
|
1c277e9e28ea2466bdb14db16d38882977a05c04
|
refs/heads/master
| 2022-12-29T15:56:01.131294
| 2020-10-09T12:45:19
| 2020-10-09T12:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,991
|
py
|
import os
from pathlib import Path
import discord.ext.commands
from .dbmgr import DatabaseManager
from .confmgr import ConfigManager
class DBot(discord.ext.commands.Bot):
def __init__(self, **options):
super().__init__(**options)
self.db = DatabaseManager(os.environ.get('DBOT_DBPATH', "db"))
self.conf = ConfigManager(self.db)
self._cogpaths = ['basedbot/cogs']
async def close(self):
await super().close()
self.db.close()
async def send_table(self, messageable: discord.abc.Messageable, keys, table, maxlen=2000):
key_length = {}
for row in table:
for key in keys:
if key not in key_length:
key_length[key] = len(str(key))
key_length[key] = max(key_length[key], len(str(row[key])))
header = "|"
delimiter = "|"
for i in keys:
header += f" {str(i).ljust(key_length[i])} |"
delimiter += '-' * (key_length[i] + 2) + '|'
text = header + "\n" + delimiter
for row in table:
line = "\n|"
for key in keys:
line += f" {str(row[key]).ljust(key_length[key])} |"
# -6: Account for code block
if len(text) + len(line) >= maxlen - 6:
await messageable.send(f"```{text}```")
text = ""
text += line
await messageable.send(f"```{text}```")
def add_cog_path(self, path):
self._cogpaths.append(path)
def find_cog(self, name):
name = name.lower()
for path in self._cogpaths:
if os.path.isfile(f"{path}/{name}.py"):
return f"{path.replace('/', '.')}.{name}"
return None
def find_all_cogs(self):
cogs = []
for cogpath in self._cogpaths:
for path in Path(cogpath).glob('*.py'):
cogs.append('.'.join(path.parent.parts + (path.stem,)))
return cogs
|
[
"timschumi@gmx.de"
] |
timschumi@gmx.de
|
e38dccdb0d54d9999e361b5feba35a1683826052
|
1da6288279a9702bf86f3ba00c96a29248a370e7
|
/LightChaserAnim/testing/katana3.0_develop/Viewers/ExampleViewer/python/__init__.py
|
503141073666f9167e0f6d013eefcc9ea4350778
|
[] |
no_license
|
lovejunjie1/aWorkingSource
|
b6ca7e8322143dbdeabea9f42c23cce0bcb32070
|
2c82878d2e93900175a96af0d579ef0d2ef1913e
|
refs/heads/master
| 2020-05-20T09:41:03.964408
| 2019-03-29T09:49:29
| 2019-03-29T09:49:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
# Copyright (c) 2016 The Foundry Visionmongers Ltd. All Rights Reserved.
"""
Python package implementing a Example Viewer tab with simple drawing of mesh
geometry data as points. This tab uses a C++ library to perform the actual
drawing.
"""
# Append the Example viewer library to the path to allow it to be imported
import os, sys
import ManipulatorMenu
# Scan KATANA_RESOURCES directories for the one that contains 'ExampleViewer.so'
# and then add that to the system path, to allow us to import it below.
resources = os.getenv('KATANA_RESOURCES', '')
resources = resources.split(os.pathsep) if resources != '' else []
libExtension = "pyd" if sys.platform == "win32" else "so"
for resourcePath in resources:
if not resourcePath:
continue
libPath = os.path.abspath(os.path.join(resourcePath, 'Libs'))
if(os.path.isfile(os.path.join(libPath, 'ExampleViewer.%s' % libExtension))
or os.path.isfile(os.path.join(libPath, 'ExampleViewer_d.%s' % libExtension))):
sys.path.append(libPath)
from ExampleViewerTab import ExampleViewerTab
PluginRegistry = [
("KatanaPanel", 2.0, "Example Viewer", ExampleViewerTab),
]
|
[
"xukai@lightchaseranimation.com"
] |
xukai@lightchaseranimation.com
|
16a9ab45a648bf3879f29429dc097c418725dfe2
|
3e9b4f8d1a5a2fec968e69cabddd2c6703bf5b4f
|
/backend/instagram/migrations/0001_initial.py
|
26f693d957167c5dc0d25cf16934197a5c7c213c
|
[] |
no_license
|
Sim-JH/Django-React
|
6c425dfef78d41e558f315d4a38f9c6ee04720d7
|
cfb25040cc46f85821c8cdfc67ecbf0318e0c301
|
refs/heads/master
| 2023-04-22T04:58:01.392028
| 2021-05-15T19:51:49
| 2021-05-15T19:51:49
| 367,717,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
# Generated by Django 3.0.13 on 2021-04-05 09:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('photo', models.ImageField(upload_to='instagram/post/%Y/%m/%d')),
('caption', models.TextField(max_length=500)),
('location', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_post_set', to=settings.AUTH_USER_MODEL)),
('like_user_set', models.ManyToManyField(blank=True, related_name='like_post_set', to=settings.AUTH_USER_MODEL)),
('tag_set', models.ManyToManyField(blank=True, to='instagram.Tag')),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Post')),
],
options={
'ordering': ['-id'],
},
),
]
|
[
"saide0032@gmail.com"
] |
saide0032@gmail.com
|
f28ffb38ea8f0fe9f3305d4b12da3031869b709a
|
0ef29103334cdb6a7d4a68f03401f1a8bc7e532e
|
/Red Cell Foundation/hospitals/migrations/0004_hospital_user.py
|
b53bd9a4dd755f972a06ba92e5bd6cf339ff8424
|
[] |
no_license
|
dhyey6602/iweb
|
43344538a9a20765dd4c936d3ae1560e2f959b88
|
df4430536630f2fcff118597d3c662e793ee3da2
|
refs/heads/master
| 2020-08-10T23:02:51.442335
| 2019-10-11T13:09:18
| 2019-10-11T13:09:18
| 214,439,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# Generated by Django 2.2.4 on 2019-10-05 01:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hospitals', '0003_remove_hospital_user'),
]
operations = [
migrations.AddField(
model_name='hospital',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"dhyey6602patel@gmail.com"
] |
dhyey6602patel@gmail.com
|
2d9da259fdc14b8cb5bf137e5ab76ab8e8182a96
|
c019093a2474b92bda1b9fcab0ae750937aedc1c
|
/jaxlie/manifold/_manifold_helpers.py
|
9bfeaae7af619693187e8bbe93f513efab7291ad
|
[
"MIT"
] |
permissive
|
mfkiwl/jaxlie
|
6c8a83d367299592a68bb80c7dc9816e9e006f09
|
4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537
|
refs/heads/master
| 2023-07-13T17:11:16.693321
| 2021-08-31T18:51:33
| 2021-08-31T18:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
from typing import TypeVar, cast
import jax
from jax import numpy as jnp
from .. import hints
from .._base import MatrixLieGroup
from .._se2 import SE2
from .._se3 import SE3
from .._so2 import SO2
from .._so3 import SO3
T = TypeVar("T", bound=MatrixLieGroup)
@jax.jit
def rplus(transform: T, delta: hints.TangentVector) -> T:
"""Manifold right plus.
Computes `T_wb = T_wa @ exp(delta)`.
Args:
transform: `T_wa`
delta: `T_ab.log()`
Returns:
T: `T_wb`
"""
return transform @ type(transform).exp(delta)
@jax.jit
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> hints.MatrixJax:
"""Analytical Jacobians for `jaxlie.manifold.rplus()`, linearized around a zero
local delta.
Useful for on-manifold optimization.
Equivalent to --
```
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> jnp.ndarray:
# Since transform objects are pytree containers, note that `jacfwd` returns a
# transformation object itself and that the Jacobian terms corresponding to the
# parameters are grabbed explicitly.
return jax.jacfwd(
jaxlie.manifold.rplus, # Args are (transform, delta)
argnums=1, # Jacobian wrt delta
)(transform, onp.zeros(transform.tangent_dim)).parameters()
```
Args:
transform
Returns:
Jacobian. Shape should be `(Group.parameters_dim, Group.tangent_dim)`.
"""
if type(transform) is SO2:
# Jacobian row indices: cos, sin
# Jacobian col indices: theta
transform_so2 = cast(SO2, transform)
J = jnp.zeros((2, 1))
cos, sin = transform_so2.unit_complex
J = J.at[0].set(-sin).at[1].set(cos)
elif type(transform) is SE2:
# Jacobian row indices: cos, sin, x, y
# Jacobian col indices: vx, vy, omega
transform_se2 = cast(SE2, transform)
J = jnp.zeros((4, 3))
# Translation terms
J = J.at[2:, :2].set(transform_se2.rotation().as_matrix())
# Rotation terms
J = J.at[:2, 2:3].set(
rplus_jacobian_parameters_wrt_delta(transform_se2.rotation())
)
elif type(transform) is SO3:
# Jacobian row indices: qw, qx, qy, qz
# Jacobian col indices: omega x, omega y, omega z
transform_so3 = cast(SO3, transform)
w, x, y, z = transform_so3.wxyz
_unused_neg_w, neg_x, neg_y, neg_z = -transform_so3.wxyz
J = (
jnp.array(
[
[neg_x, neg_y, neg_z],
[w, neg_z, y],
[z, w, neg_x],
[neg_y, x, w],
]
)
/ 2.0
)
elif type(transform) is SE3:
# Jacobian row indices: qw, qx, qy, qz, x, y, z
# Jacobian col indices: vx, vy, vz, omega x, omega y, omega z
transform_se3 = cast(SE3, transform)
J = jnp.zeros((7, 6))
# Translation terms
J = J.at[4:, :3].set(transform_se3.rotation().as_matrix())
# Rotation terms
J = J.at[:4, 3:6].set(
rplus_jacobian_parameters_wrt_delta(transform_se3.rotation())
)
else:
assert False, f"Unsupported type: {type(transform)}"
assert J.shape == (transform.parameters_dim, transform.tangent_dim)
return J
@jax.jit
def rminus(a: T, b: T) -> hints.TangentVectorJax:
"""Manifold right minus.
Computes `delta = (T_wa.inverse() @ T_wb).log()`.
Args:
a: `T_wa`
b: `T_wb`
Returns:
`T_ab.log()`
"""
return (a.inverse() @ b).log()
|
[
"yibrenth@gmail.com"
] |
yibrenth@gmail.com
|
8f95d383d94a707bbe4cd0c6783b1979d805c91d
|
53cd3aae3cd5a8cfd373f0cf5c3a3346591e95a6
|
/app.py
|
b6ab8f3a30c6b41a48b76178f98462e72e360579
|
[] |
no_license
|
zqqqqz2000/teacher-management-server
|
bfb958245a6affd04b3d3cf8047dcd9f756db244
|
f1e552db4bbd471ab97987614978af581314891e
|
refs/heads/master
| 2023-01-22T06:14:10.453562
| 2020-11-22T11:20:32
| 2020-11-22T11:20:32
| 313,501,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,481
|
py
|
from flask import Flask
from flask import request
from flask_cors import CORS
import datetime
from dao.check import Check
from dao.finance_office import FinanceOffice
from dao.salary import Salary
from dao.submission import Submission
from dao.teacher import Teacher
from global_var import db
from utils.token import with_token, tokenize
from dao.hr_office import HROffice
from dao.department import Department
from utils.enc import md5
import config
from typing import *
app = Flask(__name__)
app.config.from_object(config)
CORS(app, resources=r'/*', supports_credentials=True)
with app.app_context():
db.init_app(app=app)
# db.drop_all()
db.create_all()
@app.route('/hrlogin', methods=['POST'])
def hrlogin():
data = request.get_json(silent=True)
username = data['username']
password = data['password']
pass_hash = md5(password)
query_res = HROffice.query.filter_by(
username=username,
password=pass_hash
).first()
if query_res:
return {'success': True, 'token': tokenize({'username': username})}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/add_department', methods=['POST'])
@with_token
def add_department(token: Dict):
data = request.get_json(silent=True)
department_name = data['name']
if token:
d = Department(name=department_name)
db.session.add(d)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请先登录'}
@app.route('/get_department', methods=['POST'])
@with_token
def get_department(token: Dict):
data = request.get_json(silent=True)
if token:
ds: List[Department] = Department.query.all()
res = [{
'id': d.id,
'name': d.name
} for d in ds]
return {'success': True, 'departments': res}
else:
return {'success': False, 'info': '请先登录'}
@app.route('/del_department', methods=['POST'])
@with_token
def del_department(token: Dict):
data = request.get_json(silent=True)
if token:
id_ = data['id']
d = Department.query.filter_by(id=id_).first()
db.session.delete(d)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请先登录'}
# 教师
@app.route('/add_teacher', methods=['POST'])
@with_token
def add_teacher(token: Dict):
data = request.get_json(silent=True)
if token:
del data['token']
data['password'] = md5(data['password'])
t = Teacher(
**data
)
db.session.add(t)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请先登录'}
@app.route('/get_teacher', methods=['POST'])
@with_token
def get_teacher(token: Dict):
data = request.get_json(silent=True)
if token:
ts: List[Tuple[Teacher, str]] = db.session.query(Teacher, Department.name).join(Department).all()
res = [{
'id': t.id,
'name': t.name,
'username': t.username,
'age': t.age,
'department': dname,
'gender': '男' if t.gender else '女',
'education': t.education,
'title': t.title,
'marry': '已婚' if t.marry else '未婚',
'hiredate': str(t.hiredate),
'status': '未在职' if t.status else '在职'
} for t, dname in ts]
return {'success': True, 'teacher': res}
else:
return {'success': False, 'info': '请先登录'}
@app.route('/teacher_check', methods=['POST'])
@with_token
def teacher_check(token: Dict):
data = request.get_json(silent=True)
if token:
teachers = data['teachers']
for t in teachers:
tid = t['id']
date = t['date']
check = t['check']
c = Check(
tid=tid,
date=datetime.datetime.strptime(date, '%Y-%M-%d').date(),
check=check
)
db.session.add(c)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请先登录'}
@app.route('/submission', methods=['POST'])
@with_token
def submission(token: Dict):
data = request.get_json(silent=True)
if token:
from_, to = data['range']
checks = db.session.query(Teacher.name, Check.tid, db.func.count('*').label('c')).filter(
Check.date >= from_,
Check.date <= to,
Check.check == 0,
Teacher.id == Check.tid
).group_by(Check.tid).all()
basic_salary = data['basic_salary']
bonus = data['bonus']
tax = data['tax']
comment = data['comment']
for _, tid, times in checks:
s = Submission(
tid=tid,
check_fine=50 * int(times),
basic_salary=basic_salary,
bonus=bonus,
tax=tax,
comment=comment,
date=datetime.datetime.now().date()
)
db.session.add(s)
db.session.commit()
check_status = [f'教师: {name},签到漏签{times}次' for name, tid, times in checks]
return {'success': True, 'info': check_status}
else:
return {'success': False, 'info': '请先登录'}
# 财务处
@app.route('/get_submission', methods=['POST'])
@with_token
def get_submission(token: Dict):
data = request.get_json(silent=True)
if token:
subs: List[Tuple[str, Submission]] = db.session.query(Teacher.name, Submission).filter(
Teacher.id == Submission.tid
).all()
res = [{
'id': sub.id,
'name': name,
'check_fine': sub.check_fine,
'basic_salary': sub.basic_salary,
'tax': sub.tax,
'comment': sub.comment,
'date': str(sub.date),
'approve': '已审批' if sub.approve else '未审批'
} for name, sub in subs]
return {'success': True, 'submissions': res}
else:
return {'success': False}
@app.route('/finance_login', methods=['POST'])
def finance_login():
data = request.get_json(silent=True)
username = data['username']
password = data['password']
pass_hash = md5(password)
query_res = FinanceOffice.query.filter_by(
username=username,
password=pass_hash
).first()
if query_res:
return {'success': True, 'token': tokenize({'username': username})}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/delete_submission', methods=['POST'])
@with_token
def delete_submission(token: Dict):
data = request.get_json(silent=True)
if token:
id_ = data['id']
s = Submission.query.filter_by(id=id_).first()
db.session.delete(s)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/approve_submission', methods=['POST'])
@with_token
def approve_submission(token: Dict):
data = request.get_json(silent=True)
if token:
id_ = data['id']
s: Submission = Submission.query.filter_by(id=id_).first()
s.approve = 1
salary = Salary(
tid=s.tid,
check_fine=s.check_fine,
basic_salary=s.basic_salary,
bonus=s.bonus,
tax=s.tax,
date=s.date,
comment=s.comment
)
db.session.add(salary)
db.session.commit()
return {'success': True}
else:
return {'success': False, 'info': '请核对用户名和密码'}
# 教师登录
@app.route('/teacher_login', methods=['POST'])
def teacher_login():
data = request.get_json(silent=True)
username = data['username']
password = data['password']
pass_hash = md5(password)
query_res = Teacher.query.filter_by(
username=username,
password=pass_hash
).first()
if query_res:
return {'success': True, 'token': tokenize({'username': username})}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/teacher_info', methods=['POST'])
@with_token
def teacher_info(token: Dict):
data = request.get_json(silent=True)
if token:
username = token['username']
checks: List[Check] = db.session.query(Check).filter(
Teacher.username == username,
Check.tid == Teacher.id
).all()
salary: List[Salary] = db.session.query(Salary).filter(
Salary.tid == Teacher.id,
Teacher.username == username
).all()
teachers: List[str, Teacher] = db.session.query(Department.name, Teacher).filter(
Teacher.username == username
)
return {'success': True, 'infos': {
'checks': [{
'id': check.id,
'date': str(check.date),
'check': '已签到' if check.check else '未签到'
} for check in checks],
'salary': [{
'id': s.id,
'check_fine': s.check_fine,
'basic_salary': s.basic_salary,
'bonus': s.bonus,
'tax': s.tax,
'date': str(s.date),
'comment': s.comment
} for s in salary],
'teachers': [{
'id': teacher.id,
'name': teacher.name,
'username': teacher.username,
'education': teacher.education,
'age': teacher.age,
'title': teacher.title,
'marry': '未婚' if teacher.marry else '已婚',
'department': department_name,
'hiredate': str(teacher.hiredate),
'status': '在职' if teacher.status else '离职'
} for department_name, teacher in teachers]
}}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/searcher', methods=['POST'])
@with_token
def searcher(token: Dict):
data = request.get_json(silent=True)
if token:
del data['token']
data = {k: v for k, v in data.items() if v}
teachers: List[str, Teacher] = db.session.query(Teacher, Department).filter_by(
**data
).join(Department).all()
print(teachers)
return {'success': True, 'teachers': [{
'id': teacher.id,
'name': teacher.name,
'username': teacher.username,
'education': teacher.education,
'age': teacher.age,
'title': teacher.title,
'marry': '未婚' if teacher.marry else '已婚',
'department': department.name,
'hiredate': str(teacher.hiredate),
'status': '在职' if teacher.status else '离职'
} for teacher, department in teachers]}
else:
return {'success': False, 'info': '请核对用户名和密码'}
@app.route('/dis', methods=['POST'])
@with_token
def dis(token: Dict):
data = request.get_json(silent=True)
if token:
id_ = data['id']
t: Teacher = Teacher.query.filter_by(id=id_).first()
t.status
else:
return {'success': False, 'info': '请核对用户名和密码'}
if __name__ == '__main__':
app.run()
|
[
"zqqqqz2000@sina.cn"
] |
zqqqqz2000@sina.cn
|
3d29d9e0401b41f679f4562eeccb15214d879b8a
|
48965c88ac5820834951452963c6f4fd76f6c7b5
|
/capstone_backenv/bin/django-admin.py
|
a2324b8fdfa0f62e8c53edf0bf9eb90ca66d5e00
|
[] |
no_license
|
krishnaneupaney/capstone_back
|
1ee602a60ecf0626c29c2b6a66743a584342651d
|
da13af60fc05ab042838b1b1c253ca02e1fbc036
|
refs/heads/master
| 2023-05-01T15:37:48.023860
| 2021-05-21T01:51:25
| 2021-05-21T01:51:25
| 367,676,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
#!/Users/krishnaneupaney/Desktop/SEIR_Erica/capstone/CapstoneBackend/capstone_backenv/bin/python3.9
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"krishnaneupaney000@gmail.com"
] |
krishnaneupaney000@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.