blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
451dfab3ed62aae3876fa15fbc9c2a21fb406512
|
8b937b1dd346494b8e2de51f589bc71504f73019
|
/fro9.py
|
b1bc1127b517b81cf9e8654f287233569ffa810d
|
[] |
no_license
|
Vivekg95/Python-Tutorial
|
b50c48a56958743cbd1392fd17ad0500bda7eaf2
|
10a5307a3fa273c4780f05c32869fc2abb01980f
|
refs/heads/master
| 2020-06-09T14:30:46.877224
| 2019-07-10T03:27:17
| 2019-07-10T03:27:17
| 193,452,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def factorial(n):
if n == 1:
return 1
else:
return n * factorial(n-1)
print(factorial(3))
import sys
sys.setrecursionlimit(5000)
def factorial1(m):
if m == 1:
return 1
else:
return m * factorial(m-1)
print(factorial(30))
|
[
"ipsa77390@gmail.com"
] |
ipsa77390@gmail.com
|
e1a06a73f73b2fd764c7ffc3b8a93a8b59ec2dab
|
b264dde95141cd0cd2eeac9f64135647d4da38ed
|
/check_ttis.py
|
3fcac0bd6944d3341b68c8a6509b91e6de500448
|
[] |
no_license
|
ChrisLMartin/cal_data_processing
|
12e3831bd18c7b0304d117e82e946b14fcaa058a
|
73b59b74ee1ca4ab0f4893d183db08711a8af3f8
|
refs/heads/master
| 2018-12-04T18:39:24.967394
| 2018-11-07T05:13:27
| 2018-11-07T05:13:27
| 125,007,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 08:40:50 2018
@author: christopher.martin
"""
import matplotlib.pyplot as plt
import os
import pandas as pd
def file_walk():
folder_name = '//ICAL8000/Users/Public/Documents/Calmetrix/CalCommander/Export'
paths = (os.path.join(root, filename)
for root, _, filenames in os.walk(folder_name)
for filename in filenames)
tti_dict = {}
for path in paths:
mix_id, tti = find_tti(path)
print(mix_id)
tti_dict[mix_id] = tti
if mix_id == 'Water':
break
return tti_dict
def find_tti(input_filename):
mix_id = input_filename.split(sep='_')[1]
df = pd.read_table(input_filename, nrows=7, error_bad_lines=False, encoding="latin1", header=None)
df = df.set_index(0).transpose()
# keys = ['Mix ID', 'Time to Iso (hh:mm)']
# assert keys[0] in df.columns, '{} does not exist'.format(keys[0])
# assert keys[1] in df.columns, '{} does not exist'.format(keys[1])
return mix_id, df['Time to Iso (hh:mm)'].values[0]
def analyse_ttis():
tti_dict = file_walk()
ttis_hm = list(tti_dict.values())
ttis_m = []
ttis_h = []
for time in ttis_hm:
tti_split = time.split(sep=':')
ttis_m.append(int(tti_split[0]) * 60 + int(tti_split[1]))
ttis_h.append(float(tti_split[0]) + float(tti_split[1])/60)
plt.subplot(1,2,1)
plt.hist(ttis_h, bins=20)
plt.subplot(1,2,2)
plt.boxplot(ttis_h)
if __name__ == '__main__':
analyse_ttis()
|
[
"christopherlancemartin@gmail.com"
] |
christopherlancemartin@gmail.com
|
8315b2c6c23754084561971e0514e2576d3f7184
|
1f3435e0698c062bebec79af4c2ed7bf5f865000
|
/demo/functions/filter_demo.py
|
35d5174300fee65a444402dcfb580c1f899a3d58
|
[] |
no_license
|
srikanthpragada/PYTHON_02_FEB_2021
|
d879f5aadc3717d342e21e69d56f74b1a3967ca6
|
c900b5745cf8343d5b00c345c0c13c9c7656cb5a
|
refs/heads/master
| 2023-03-07T11:57:09.199601
| 2021-03-01T03:18:07
| 2021-03-01T03:18:07
| 335,815,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
def ispositive(n: int) -> bool:
print('Checking ',n)
return n > 0
def hasdigit(s: str) -> bool:
for c in s:
if c.isdigit():
return True
return False
nums = [10, -20, 0, 9, -8, -6]
names = ['abc', 'xyz123', 'bc12', 'pqr']
for n in filter(ispositive, nums):
print(n)
# for n in filter(hasdigit, names):
# print(n)
#
# for c in filter(str.isupper , "How DO you DO?"):
# print(c)
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
7a48e04dfc240929c06ae1b00ddb33f64a9b466b
|
f2415be2a2359d7a15ecd23d454ffd964dcfb84b
|
/build/pygments/tests/test_shell.py
|
eb09e8d17c911c9b5533fdde340fc78396a9f958
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
sagangwee/sagangwee.github.io
|
9032658383ed577e762838440bfc570f16599e60
|
cbe39c5bb1691c6cca4ec1f50bfafe5e0f7140a0
|
refs/heads/master
| 2021-01-19T03:19:57.949882
| 2019-04-10T07:31:28
| 2019-04-10T07:31:28
| 49,043,796
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,940
|
py
|
# -*- coding: utf-8 -*-
"""
Basic Shell Tests
~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.token import Token
from pygments.lexers import BashLexer
class BashTest(unittest.TestCase):
def setUp(self):
self.lexer = BashLexer()
self.maxDiff = None
def testCurlyNoEscapeAndQuotes(self):
fragment = u'echo "${a//["b"]/}"\n'
tokens = [
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.String.Interpol, u'${'),
(Token.Name.Variable, u'a'),
(Token.Punctuation, u'//['),
(Token.Literal.String.Double, u'"b"'),
(Token.Punctuation, u']/'),
(Token.String.Interpol, u'}'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testCurlyWithEscape(self):
fragment = u'echo ${a//[\\"]/}\n'
tokens = [
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.String.Interpol, u'${'),
(Token.Name.Variable, u'a'),
(Token.Punctuation, u'//['),
(Token.Literal.String.Escape, u'\\"'),
(Token.Punctuation, u']/'),
(Token.String.Interpol, u'}'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testParsedSingle(self):
fragment = u"a=$'abc\\''\n"
tokens = [
(Token.Name.Variable, u'a'),
(Token.Operator, u'='),
(Token.Literal.String.Single, u"$'abc\\''"),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
|
[
"sagangwee@yahoo.com"
] |
sagangwee@yahoo.com
|
a39cf79c6b01e34ff8646714d6eb9d54aa6ab1d3
|
2959cbd1727c452d1806e42011b704add664fbf0
|
/cesar00.py
|
4c9c726f4858c875cce0ef2f1f22ff814b787b59
|
[] |
no_license
|
GuilhermeRamous/python-exercises
|
2f8c60aec952c0ba2e8914289215ebfa1ab90f0b
|
02ba796440ef3caa66614ac51250cd2e4e937600
|
refs/heads/master
| 2021-04-26T23:45:32.643108
| 2018-03-05T02:16:47
| 2018-03-05T02:16:47
| 123,851,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
def low_or_upper(string):
if 97 <= string <= 122:
return 0
else:
return 1
def cesar_encrypter(texto, key):
resultado = ""
alfabeto_low = "abcdefghijklmnopqrstuvwxyz"
alfabeto_upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for i in range(len(texto)):
if texto[i] in alfabeto_low or texto[i] in alfabeto_upper or texto[i] == " ":
if texto[i] == " ":
resultado += " "
else:
if low_or_upper(texto[i]) == 0 and chr(ord(texto[i]) + key) in alfabeto_low:
resultado += chr(ord(texto[i]) + key)
elif low_or_upper(texto[i]) == 1 and chr(ord(texto[i]) + key) in alfabeto_upper:
resultado += chr(ord(texto[i]) + key)
else:
resultado += chr(ord(texto[i]) + key - 26)
else:
return "Cifra de cesar só criptografa letras não acentuadas. Execute o programa novamente."
return resultado
string = input("Texto: ")
num = int(input("Chave (1 a 26): ")
print(cesar_encrypter(string, num))
|
[
"society66601@gmail.com"
] |
society66601@gmail.com
|
f9f22339c66dce36ef39681ab09a8329b03d5019
|
03aebbf4fe13a3a0d79f738a0c2cd7741a788cdd
|
/Book_automation/ch4/passingReference.py
|
fe403f96341f517c3c38c7504e890d24f4369afd
|
[] |
no_license
|
Maxim-Krivobokov/python-practice
|
c69d2fbe601ed7e8388a564bf20a7ceab283919d
|
718698dfaeabd5ea000bce3a20bf18d1efe938c2
|
refs/heads/master
| 2021-05-24T10:27:29.022718
| 2020-07-22T14:34:15
| 2020-07-22T14:34:15
| 253,518,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
def dobavka(someParameter):
someParameter.append('Hello')
words = ['preved', 'hi', 'ola']
dobavka(words)
# someparameter takes a link to list, modifies it, and printing list words shows changes
print(words)
|
[
"maxim.krivobokov@gmail.com"
] |
maxim.krivobokov@gmail.com
|
2a8d7dc7341a2f0a3ec6c4fe31c35f2aa330f901
|
859787784f3e2aa1b89116197da5d9d7cae4dbfb
|
/packs/common_files/common_infos.py
|
715e6a892732f7f32c017b5598261439b5699fe6
|
[] |
no_license
|
santana-daniela/adm_impress
|
b5fd89abcc3357cb1e277396b98f7d0c59bb26f2
|
c1847d94a4188f405ed2448bb19ccb80fe67a403
|
refs/heads/master
| 2021-01-07T09:10:30.900253
| 2020-02-02T20:06:19
| 2020-02-02T20:06:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
import copy
import numpy as np
class CommonInfos:
def get_local_t(self, T, volumes):
T2 = T[volumes][:,volumes]
data = np.array(T2.sum(axis=1).transpose())[0]
data2 = T2.diagonal()
data2 -= data
T2.setdiag(data2)
return T2
def copy(self):
return copy.deepcopy(self)
|
[
"jprandrade2@gmail.com"
] |
jprandrade2@gmail.com
|
8f97d18dcbd8a750d1ce616076ea9059e0c30ac9
|
9397a453a4b9c4ddd9988235fe4a8ee6720358c1
|
/analysis/visualization/geobia_Fig3.py
|
3c6f7f3e298c6c0220b120e9a6953f123d7b2f3c
|
[] |
no_license
|
komazsofi/myPhD_escience_analysis
|
33e61a145a1e1c13c646ecb092081182113dbce3
|
5f0ecdd05e7eaeb7fce30f0c28e0728642164dbc
|
refs/heads/master
| 2021-06-04T21:39:58.115874
| 2020-06-18T12:59:53
| 2020-06-18T12:59:53
| 119,659,750
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
font = {'family': 'normal',
'weight': 'bold',
'size': 18}
plt.rc('font', **font)
workdir='D:/Geobia_2018/Results_17ofApril/'
filename='3all_tiles_clean.txt_PC1__PC2__PC3segm_parameters'
segm_optimal=pd.read_csv(workdir+filename+'.csv',sep=',')
segm_optimal.rename(columns={'Threshold': 'Similarity threshold'}, inplace = True)
print(segm_optimal.dtypes)
segm_optimal=segm_optimal[segm_optimal['Minimum size']!=50]
segm_optimal=segm_optimal.sort_values(by=['Similarity threshold'])
"""
fig, ax = plt.subplots(figsize=(8,6))
segm_optimal.groupby('Minimum size').plot(x='threshold',y='optimization_criteria',ax=ax,marker='o')
plt.show()
"""
sns.pointplot(x='Similarity threshold',y='Optimization criteria',hue='Minimum size',data=segm_optimal,linestyle="-")
plt.show()
|
[
"komazsofi@gmail.com"
] |
komazsofi@gmail.com
|
0873a4c3d5838c4306f5b1c919ab449235b82590
|
a03abf3628a6ba7b117daed34ab95213ac280d78
|
/Orig_Proj/ordered_list.py
|
b4175f6f5b8d735f7892d96732caf172b32686c0
|
[] |
no_license
|
DennisJGlennon/cpe102project1
|
996f40f4c85667058d33fdff8c6c66359059dc21
|
10654ecf8c7c62b7326bf3beff136cb9ca19c853
|
refs/heads/master
| 2021-01-01T18:08:11.211957
| 2015-04-23T00:25:36
| 2015-04-23T00:25:36
| 33,385,275
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
class OrderedList:
def __init__(self):
self.list = []
def insert(self, item, ord):
size = len(self.list)
idx = 0
while (idx < size and self.list[idx].ord < ord):
idx += 1
self.list[idx:idx] = [ListItem(item, ord)]
def remove(self, item):
size = len(self.list)
idx = 0
while (idx < size and self.list[idx].item != item):
idx += 1
if idx < size:
self.list[idx:idx+1] = []
def head(self):
return self.list[0] if self.list else None
def pop(self):
if self.list:
return self.list.pop(0)
class ListItem:
def __init__(self, item, ord):
self.item = item
self.ord = ord
def __eq__(a, b):
return a.item == b.item and a.ord == b.ord
|
[
"dmritter@calpoly.edu"
] |
dmritter@calpoly.edu
|
ed59feea2ea0c8d539121a236416d59f52212059
|
eb4b0419d3843b68b16e8136556e9dc37dbc6701
|
/dba3/day5/c_pachong.py
|
d3d2f299ac60b17477f965a073ede902d6fa33ee
|
[] |
no_license
|
YuyeeYouzi/youzi
|
9a2e7682896c4d13edf5394bf6f3357729a849e2
|
942df08f42a715704d188a685f6dccc7dfe47162
|
refs/heads/master
| 2021-07-23T20:50:17.986272
| 2017-11-03T15:02:49
| 2017-11-03T15:02:49
| 109,358,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
+#import urllib #urllib urllib2
#urllib.request
from urllib import request,error
from lxml import etree
url = 'http://www.51job.com/'
response = request.urlopen(url) # 打开url获取内容
#html = response.read() #bytes
#html=html.decode('gbk')
#print(html)
#headers = {
# 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0',
# #'Connection':'keep_alive'
#}
##构造一个请求
#req = request.Request(url,headers=headers)
##html = request.urlopen(req)
##html=html.read().decode('gbk')
##print(html)
#try:
# html = request.urlopen(req).read().decode('gbk')
#except Exception as e:
# print(e,url)
xml = '''
<bookstore>
<title lang="en">Tom Jarry</title>
<book>
<title lang="en">Harry Potter<price>555</price></title>
<price id='111'>29.99</price>
</book>
<book>
<title lang="cn" >Learning XML</title>
<price id='222'>39.95</price>
</book>
</bookstore>
'''
#xml = etree.Html(xml)
xml = etree.ElementTree(etree.HTML(xml))
print(xml)
#print(xml.xpath('/html/body/bookstore/book')) 绝对路径
print(xml.xpath('//bookstore/book/..')) #相对路径
print(xml.xpath('//bookstore/book/title/@lang')) #属性
print(xml.xpath('//title/text()')) #文本
print(xml.xpath('//book/title/text()'))
print(xml.xpath('//bookstore/title/text()'))
print(xml.xpath('//bookstore/book[1]/title/price/text()')) # book后面的括号写不写都一样
print(xml.xpath('//bookstore/book/title[1]')) # 下标从1开始
print(xml.xpath('//bookstore//title[1]'))#---->有3个
print(xml.xpath('//bookstore/title[1]'))#---->1个
print(xml.xpath('//bookstore//price[@id]'))
print(xml.xpath('//bookstore//price[@id="222"]'))
print(xml.xpath('//bookstore/*')) #book下的所有子节点
print(xml.xpath('//bookstore/*[@*]'))
def get_html(url)
try:
response = request.urlopen(url)
except error.URLError as e:
print(e,url)
except error.HTTPError as e:
print(e,url)
except Exception as e:
print(e.url)
return html
|
[
"YuyeeYouzi@example.com"
] |
YuyeeYouzi@example.com
|
10a6e8075d142d8ef31be740aa4f0af798748102
|
547af1246bb04acdd3313c0cf02a14831251b841
|
/tools/releasetools/edify_generator.py
|
d4a35fb519165d1992d7c96d51a2f7c596875de0
|
[
"Apache-2.0"
] |
permissive
|
deneydeville/android_build
|
572463b167c34d7a47e7df5e3e69329eff0dd199
|
554ed99e8fa723ad39cd3ec2e89892332765f65e
|
refs/heads/master
| 2021-01-20T13:22:49.173725
| 2016-06-10T13:29:49
| 2016-06-10T13:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,887
|
py
|
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import common
class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
def __init__(self, version, info, fstab=None):
self.script = []
self.mounts = set()
self.version = version
self.info = info
if fstab is None:
self.fstab = self.info.get("fstab", None)
else:
self.fstab = fstab
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
appended to the parent script with AppendScript(). Used when the
caller wants to generate script commands out-of-order."""
x = EdifyGenerator(self.version, self.info)
x.mounts = self.mounts
return x
@staticmethod
def WordWrap(cmd, linelen=80):
"""'cmd' should be a function call with null characters after each
parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
to a given line length, replacing nulls with spaces and/or newlines
to format it nicely."""
indent = cmd.index("(")+1
out = []
first = True
x = re.compile("^(.{,%d})\0" % (linelen-indent,))
while True:
if not first:
out.append(" " * indent)
first = False
m = x.search(cmd)
if not m:
parts = cmd.split("\0", 1)
out.append(parts[0]+"\n")
if len(parts) == 1:
break
else:
cmd = parts[1]
continue
out.append(m.group(1)+"\n")
cmd = cmd[m.end():]
return "".join(out).replace("\0", " ").rstrip("\n")
def AppendScript(self, other):
"""Append the contents of another script (which should be created
with temporary=True) to this one."""
self.script.extend(other.script)
def AssertOemProperty(self, name, value):
"""Assert that a property on the OEM paritition matches a value."""
if not name:
raise ValueError("must specify an OEM property")
if not value:
raise ValueError("must specify the OEM value")
cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || '
'abort("This package expects the value \\"{value}\\" for '
'\\"{name}\\" on the OEM partition; this has value \\"" + '
'file_getprop("/oem/oem.prop", "{name}") + "\\".");').format(
name=name, value=value)
self.script.append(cmd)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current recovery build fingerprint is one of *fp."""
if not fp:
raise ValueError("must specify some fingerprints")
cmd = (' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"') % i
for i in fp]) +
' ||\n abort("Package expects build fingerprint of %s; this '
'device has " + getprop("ro.build.fingerprint") + ".");') % (
" or ".join(fp))
self.script.append(cmd)
def AssertSomeThumbprint(self, *fp):
"""Assert that the current recovery build thumbprint is one of *fp."""
if not fp:
raise ValueError("must specify some thumbprints")
cmd = (' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"') % i
for i in fp]) +
' ||\n abort("Package expects build thumbprint of %s; this '
'device has " + getprop("ro.build.thumbprint") + ".");') % (
" or ".join(fp))
self.script.append(cmd)
def AssertOlderBuild(self, timestamp, timestamp_text):
"""Assert that the build on the device is older (or the same as)
the given timestamp."""
self.script.append(
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
'abort("Can\'t install this package (%s) over newer '
'build (" + getprop("ro.build.date") + ").");') % (timestamp,
timestamp_text))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('assert(' +
' || '.join(['getprop("ro.product.device") == "%s" || getprop("ro.build.product") == "%s"'
% (i, i) for i in device.split(",")]) +
' || abort("This package is for device: %s; ' +
'this device is " + getprop("ro.product.device") + ".");' +
');') % device
self.script.append(cmd)
def AssertSomeBootloader(self, *bootloaders):
"""Assert that the bootloader version is one of *bootloaders."""
cmd = ("assert(" +
" || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
' || abort("This package supports bootloader(s): ' +
", ".join(["%s" % (b,) for b in bootloaders]) +
'; this device has bootloader " + getprop("ro.bootloader") + ".");' +
");")
self.script.append(self.WordWrap(cmd))
def AssertSomeBaseband(self, *basebands):
"""Assert that the baseband version is one of *basebands."""
cmd = ("assert(" +
" || ".join(['getprop("ro.baseband") == "%s"' % (b,)
for b in basebands]) +
' || abort("This package supports baseband(s): ' +
", ".join(["%s" % (b,) for b in basebands]) +
'; this device has baseband " + getprop("ro.baseband") + ".");' +
");")
self.script.append(self.WordWrap(cmd))
def RunBackup(self, command):
self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command))
def ValidateSignatures(self, command):
self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");')
# Exit code 124 == abort. run_program returns raw, so left-shift 8bit
self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");')
def FlashSuperSU(self):
self.script.append('package_extract_dir("supersu", "/tmp/supersu");')
self.script.append('run_program("/sbin/busybox", "unzip", "/tmp/supersu/supersu.zip", "META-INF/com/google/android/*", "-d", "/tmp/supersu");')
self.script.append('run_program("/sbin/busybox", "sh", "/tmp/supersu/META-INF/com/google/android/update-binary", "dummy", "1", "/tmp/supersu/supersu.zip");')
def DolbyScript(self):
self.script.append('package_extract_dir("dolby", "/tmp/dolby");')
self.script.append('run_program("/sbin/busybox", "unzip", "/tmp/dolby/dolby.zip", "META-INF/com/google/android/*", "-d", "/tmp/dolby");')
self.script.append('run_program("/sbin/busybox", "sh", "/tmp/dolby/META-INF/com/google/android/update-binary", "dummy", "1", "/tmp/dolby/dolby.zip");')
def GappsScript(self):
self.script.append('package_extract_dir("Gapps", "/tmp/Gapps");')
self.script.append('run_program("/sbin/busybox", "unzip", "/tmp/Gapps/Gapps.zip", "META-INF/com/google/android/*", "-d", "/tmp/Gapps");')
self.script.append('run_program("/sbin/busybox", "sh", "/tmp/Gapps/META-INF/com/google/android/update-binary", "dummy", "1", "/tmp/Gapps/Gapps.zip");')
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds. 'dur' may be zero to advance it via SetProgress
commands instead of by time."""
self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
def SetProgress(self, frac):
"""Set the position of the progress bar within the chunk defined
by the most recent ShowProgress call. 'frac' should be in
[0,1]."""
self.script.append("set_progress(%f);" % (frac,))
def PatchCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes, checking the version saved in cache if the
file does not match."""
self.script.append(
'apply_patch_check("%s"' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
') || abort("\\"%s\\" has unexpected contents.");' % (filename,))
def FileCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes."""
self.script.append('assert(sha1_check(read_file("%s")' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
'));')
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
available on /cache."""
self.script.append(('apply_patch_space(%d) || abort("Not enough free space '
'on /system to apply patches.");') % (amount,))
def Mount(self, mount_point, mount_options_by_format=""):
"""Mount the partition with the given mount_point.
mount_options_by_format:
[fs_type=option[,option]...[|fs_type=option[,option]...]...]
where option is optname[=optvalue]
E.g. ext4=barrier=1,nodelalloc,errors=panic|f2fs=errors=recover
"""
fstab = self.fstab
if fstab:
p = fstab[mount_point]
mount_dict = {}
if mount_options_by_format is not None:
for option in mount_options_by_format.split("|"):
if "=" in option:
key, value = option.split("=", 1)
mount_dict[key] = value
mount_flags = mount_dict.get(p.fs_type, "")
if p.context is not None:
mount_flags = p.context + ("," + mount_flags if mount_flags else "")
self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % (
p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device,
p.mount_point, mount_flags))
self.mounts.add(p.mount_point)
def Unmount(self, mount_point):
"""Unmount the partiiton with the given mount_point."""
if mount_point in self.mounts:
self.mounts.remove(mount_point)
self.script.append('unmount("%s");' % (mount_point,))
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
self.script.append('package_extract_dir("%s", "%s");' % (src, dst))
def Comment(self, comment):
"""Write a comment into the update script."""
self.script.append("")
for i in comment.split("\n"):
self.script.append("# " + i)
self.script.append("")
def Print(self, message):
"""Log a message to the screen (if the logs are visible)."""
self.script.append('ui_print("%s");' % (message,))
def TunePartition(self, partition, *options):
fstab = self.fstab
if fstab:
p = fstab[partition]
if p.fs_type not in ("ext2", "ext3", "ext4"):
raise ValueError("Partition %s cannot be tuned\n" % (partition,))
self.script.append(
'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
'"%s") || abort("Failed to tune partition %s");' % (
p.device, partition))
def FormatPartition(self, partition):
"""Format the given partition, specified by its mount point (eg,
"/system")."""
fstab = self.fstab
if fstab:
p = fstab[partition]
self.script.append('format("%s", "%s", "%s", "%s", "%s");' %
(p.fs_type, common.PARTITION_TYPES[p.fs_type],
p.device, p.length, p.mount_point))
def WipeBlockDevice(self, partition):
if partition not in ("/system", "/vendor"):
raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
fstab = self.fstab
size = self.info.get(partition.lstrip("/") + "_size", None)
device = fstab[partition].device
self.script.append('wipe_block_device("%s", %s);' % (device, size))
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
if not file_list:
return
cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
self.script.append(self.WordWrap(cmd))
def DeleteFilesIfNotMatching(self, file_list):
"""Delete the file in file_list if not matching the checksum."""
if not file_list:
return
for name, sha1 in file_list:
cmd = ('sha1_check(read_file("{name}"), "{sha1}") || '
'delete("{name}");'.format(name=name, sha1=sha1))
self.script.append(self.WordWrap(cmd))
def RenameFile(self, srcfile, tgtfile):
"""Moves a file from one location to another."""
if self.info.get("update_rename_support", False):
self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
else:
raise ValueError("Rename not supported by update binary")
def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
"""Prepend an action with an apply_patch_check in order to
skip the action if the file exists. Used when a patch
is later renamed."""
cmd = ('sha1_check(read_file("%s"), %s) ||' % (tgtfile, tgtsha1))
self.script.append(self.WordWrap(cmd))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
source file."""
if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
raise ValueError("bad patches given to ApplyPatch")
cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
% (srcfile, tgtfile, tgtsha1, tgtsize)]
for i in range(0, len(patchpairs), 2):
cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2])
cmd.append(');')
cmd = "".join(cmd)
self.script.append(self.WordWrap(cmd))
def WriteRawImage(self, mount_point, fn, mapfn=None):
"""Write the given package file into the partition for the given
mount point."""
fstab = self.fstab
if fstab:
p = fstab[mount_point]
partition_type = common.PARTITION_TYPES[p.fs_type]
args = {'device': p.device, 'fn': fn}
if partition_type == "MTD":
self.script.append(
'write_raw_image(package_extract_file("%(fn)s"), "%(device)s");'
% args)
elif partition_type == "OSIP":
self.script.append(
'write_osip_image(package_extract_file("%(fn)s"), "%(device)s");'
% args)
elif partition_type == "EMMC":
if mapfn:
args["map"] = mapfn
self.script.append(
'package_extract_file("%(fn)s", "%(device)s", "%(map)s");' % args)
else:
self.script.append(
'package_extract_file("%(fn)s", "%(device)s");' % args)
else:
raise ValueError(
"don't know how to write \"%s\" partitions" % p.fs_type)
def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
"""Set file ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
else:
cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o' \
% (fn, uid, gid, mode)
if capabilities is not None:
cmd += ', "capabilities", %s' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
capabilities):
"""Recursively set path ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
else:
cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
'"dmode", 0%o, "fmode", 0%o' \
% (fn, uid, gid, dmode, fmode)
if capabilities is not None:
cmd += ', "capabilities", "%s"' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
def MakeSymlinks(self, symlink_list):
"""Create symlinks, given a list of (dest, link) pairs."""
by_dest = {}
for d, l in symlink_list:
by_dest.setdefault(d, []).append(l)
for dest, links in sorted(by_dest.items()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self.WordWrap(cmd))
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def Unmount(self, mount_point):
self.script.append('unmount("%s");' % mount_point)
self.mounts.remove(mount_point)
def UnmountAll(self):
for p in sorted(self.mounts):
self.script.append('unmount("%s");' % (p,))
self.mounts = set()
def AddToZip(self, input_zip, output_zip, input_path=None):
"""Write the accumulated script to the output_zip file. input_zip
is used as the source for the 'updater' binary needed to run
script. If input_path is not None, it will be used as a local
path for the binary instead of input_zip."""
self.UnmountAll()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
"\n".join(self.script) + "\n")
if input_path is None:
data = input_zip.read("OTA/bin/updater")
else:
data = open(input_path, "rb").read()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
data, perms=0o755)
|
[
"renosang@gmail.com"
] |
renosang@gmail.com
|
be1929dd1be553a6bedfcf9381fe86c19edf1a79
|
79ef95f92c459f896986077db960bf237c430380
|
/lab/merge_sort.py
|
5424bacf630f8324e908cb3ea631daeb66049c26
|
[] |
no_license
|
SpeechieX/algorithms-and-data-structures
|
67e9ac613d197d096964c077c2c45a0f08f2ff60
|
566d7d3884bf58880fcb3718f6870d632820d424
|
refs/heads/master
| 2020-05-25T21:08:31.806574
| 2018-07-30T22:55:03
| 2018-07-30T22:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
def merge(a, b):
i = 0
j = 0
sorted = []
inversions = a[1] + b[1]
a = a[0]
b = b[0]
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
sorted.append(a[i])
i+=1
elif a[i] == b[j]:
sorted.append(a[i])
sorted.append(b[j])
i += 1
j += 1
else:
inversions += 1
sorted.append(b[j])
j+=1
if i <= len(a) - 1:
sorted = sorted + a[i:]
elif j <= len(b) - 1:
sorted = sorted + b[j:]
return [sorted, inversions]
def merge_sort(arr, left, right):
if left == right-1:
return [arr[left:right], 0]
midpoint = (right-left)//2 + left
return merge(merge_sort(arr, left, midpoint), merge_sort(arr, midpoint, right))
|
[
"jtamsut1993@gmail.com"
] |
jtamsut1993@gmail.com
|
c361766ebc265679ff6af2f6299d746627145288
|
b09aefec305ec7926fb85cbfdc9fcb6b4757aa22
|
/tr/text_recognition/Preprocess/Image_Preprocess.py
|
400616ae5005719bcdf6a5ae603c4232388b61e7
|
[] |
no_license
|
thydeyx/recognize
|
182d164163c9165ad9fc2733f85e77ae9698056a
|
5af79b4e6606ea2fd78a586152b8b8125c8f3e63
|
refs/heads/master
| 2020-03-27T21:49:18.410454
| 2018-09-03T09:13:22
| 2018-09-03T09:13:22
| 147,178,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,172
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
import re
import random
import numpy as np
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
from sklearn.utils.extmath import cartesian
import cv2 as cv
class Image_Database(object):
def __init__(self, image_dir_path, gt_dir_path, image_height, image_width):
self.image_height = image_height; # image height
self.image_width = image_width; # image width
self.convmap_height = int(np.ceil(self.image_height / 16.)); # CNN feature map height
self.convmap_width = int(np.ceil(self.image_width / 16.)); # CNN feature map width
self.scale = np.array([1 / 16, 1 / 10, 1 / 8, 1 / 6, 1 / 4]);
self.aspect_ratio = np.array([1, 2, 5, 8]);
self.anchor_size = self.scale.shape[0] * self.aspect_ratio.shape[0];
self.image_dict = dict();
self.load_data(image_dir_path, gt_dir_path, self.image_dict);
self.image_num = len(self.image_dict);
self.fg_thres = 0.5;
self.bg_thres = 0.2;
self.bbox_normalize_scale = 5;
self.proposal_prepare(self.image_dict);
self.name_list = list(self.image_dict.keys());
self.iter_index = 0;
def get_name_dict(self, image_name_list, gt_name_list, image_dict): # Get the name set from image and ground truth to ensure the same name.
image_name_set = set();
for image_name in image_name_list:
image_name_set.add(os.path.splitext(image_name)[0]);
gt_name_set = set();
for gt_name in gt_name_list:
gt_name_set.add(os.path.splitext(gt_name)[0][3:]);
name_set = set.intersection(image_name_set, gt_name_set);
for name in name_set:
image_dict[name] = {"image": None, "gt": None, "proposal": None, "fgsize": None, "src_size": None};
def load_image(self, image_dir_path, image_name_list, image_dict): # Load images from directory to imdb dict.
for image_name in image_name_list:
image_path = os.path.join(image_dir_path, image_name);
name = os.path.splitext(image_name)[0];
if name in image_dict and os.path.isfile(image_path):
im = cv.imread(image_path);
image_dict[name]["src_size"] = (im.shape[1], im.shape[0]);
im = cv.resize(im, (self.image_width, self.image_height));
print(image_name, im.shape);
pix = np.array(im).astype(np.float32);
image_dict[name]["image"] = pix;
def load_ground_truth(self, gt_dir_path, gt_name_list, image_dict): # Load ground-truth from directory to imdb_dict
for gt_name in gt_name_list:
gt_path = os.path.join(gt_dir_path, gt_name);
name = os.path.splitext(gt_name)[0][3:];
if name in image_dict and os.path.isfile(gt_path) and re.match(".*.txt", gt_name):
fp = open(gt_path, "r", encoding="UTF-8");
gt_list = [];
for line in fp:
line = line.strip();
if len(line) == 0:
continue;
line = line.replace(",", "");
line_split = re.split(" +|\t+", line);
pos_gt = [int(line_split[0]) * self.image_width / image_dict[name]["src_size"][0],
int(line_split[1]) * self.image_height / image_dict[name]["src_size"][1],
int(line_split[2]) * self.image_width / image_dict[name]["src_size"][0],
int(line_split[3]) * self.image_height / image_dict[name]["src_size"][1]];
# word_gt = line_split[4];
# gt_list.append([pos_gt, word_gt]);
gt_list.append(pos_gt);
image_dict[name]["gt"] = np.array(gt_list, dtype=int);
def load_data(self, image_dir_path, gt_dir_path, image_dict): # Load Image Database
image_name_list = os.listdir(image_dir_path);
gt_name_list = os.listdir(gt_dir_path);
# print(image_name_list);
# print(gt_name_list);
self.get_name_dict(image_name_list, gt_name_list, image_dict);
self.load_image(image_dir_path, image_name_list, image_dict);
self.load_ground_truth(gt_dir_path, gt_name_list, image_dict);
def generate_anchors(self): # Generate anchors from the specificed scale and ration.
anchors = np.zeros([self.anchor_size, 4]);
scale_size = self.scale.shape[0];
aspect_ratio_size = self.aspect_ratio.shape[0];
for i in range(scale_size):
for j in range(aspect_ratio_size):
anchor_height = int(self.image_height * self.scale[i]);
anchor_width = int(anchor_height * self.aspect_ratio[j]);
anchors[i * aspect_ratio_size + j, :] = np.array([-0.5 * anchor_width, -0.5 * anchor_height, 0.5 * anchor_width, 0.5 * anchor_height]);
return anchors;
def compute_overlap(self, mat1, mat2): # Calculate the overlap area between proposals and ground truth
s1 = mat1.shape[0];
s2 = mat2.shape[0];
area1 = (mat1[:, 2] - mat1[:, 0]) * (mat1[:, 3] - mat1[:, 1]);
if mat2.shape[1] == 5:
area2 = mat2[:, 4];
else:
area2 = (mat2[:, 2] - mat2[:, 0]) * (mat2[:, 3] - mat2[:, 1]);
x1 = cartesian([mat1[:, 0], mat2[:, 0]]);
x1 = np.amax(x1, axis=1);
x2 = cartesian([mat1[:, 2], mat2[:, 2]]);
x2 = np.amin(x2, axis=1);
com_zero = np.zeros(x2.shape[0]);
w = x2 - x1;
w = w - 1;
w = np.maximum(com_zero, w);
y1 = cartesian([mat1[:, 1], mat2[:, 1]]);
y1 = np.amax(y1, axis=1);
y2 = cartesian([mat1[:, 3], mat2[:, 3]]);
y2 = np.amin(y2, axis=1);
h = y2 - y1;
h = h - 1;
h = np.maximum(com_zero, h);
oo = w * h;
aa = cartesian([area1[:], area2[:]]);
aa = np.sum(aa, axis=1);
ooo = oo / (aa - oo);
overlap = np.transpose(ooo.reshape(s1, s2), (1, 0));
return overlap;
def compute_regression(self, roi, proposal): # Calculate the regression target
target = np.zeros(4);
w1 = roi[2] - roi[0];
h1 = roi[3] - roi[1];
w2 = proposal[2] - proposal[0];
h2 = proposal[3] - proposal[1];
target[0] = (roi[0] - proposal[0]) / w2;
target[1] = (roi[1] - proposal[1]) / h2;
target[2] = np.log(w1 / w2);
target[3] = np.log(h1 / h2);
return target;
def compute_target(self, roi_s, proposals_s): # Get useful proposals for model training
roi = roi_s.copy();
proposals = proposals_s.copy();
proposal_size = proposals.shape[0];
roi_proposal_mat = np.zeros([proposal_size, 9]);
if roi.shape[0] == 0:
return roi_proposal_mat, 0;
overlap = self.compute_overlap(roi, proposals);
overlap_max = np.max(overlap, axis=1);
overlap_max_idx = np.argmax(overlap, axis=1);
fg_proposal_num = 0;
for i in range(proposal_size):
roi_proposal_mat[i, 1:5] = proposals[i, :];
if self.proposals_mask[i] == 1:
if overlap_max[i] >= self.fg_thres:
roi_proposal_mat[i, 0] = 1;
roi_proposal_mat[i, 5:] = self.compute_regression(roi[overlap_max_idx[i], :4], proposals[i, :]);
fg_proposal_num += 1;
elif overlap_max[i] < self.bg_thres:
roi_proposal_mat[i, 0] = -1;
return roi_proposal_mat, fg_proposal_num;
def proposal_prepare(self, image_dict): # Generate proposal positions for the original image
anchors = self.generate_anchors();
proposals = np.zeros([self.anchor_size * self.convmap_height * self.convmap_width, 4], dtype=int);
for i in range(self.convmap_height):
y0 = i * 16 + 8;
for j in range(self.convmap_width):
x0 = j * 16 + 8;
for k in range(self.anchor_size):
index = (i * self.convmap_width + j) * self.anchor_size + k;
anchor = anchors[k, :];
proposals[index, :] = anchor + np.array([x0, y0, x0, y0]);
self.proposals = proposals;
# ignore cross-boundary anchors
proposals_keep = np.where((proposals[:, 0] > 0) & (proposals[:, 1] > 0) & (proposals[:, 2] < self.image_width) & (proposals[:, 3] < self.image_height))[0];
self.proposals_mask = np.zeros(proposals.shape[0]);
self.proposals_mask[proposals_keep] = 1;
self.proposal_size = self.proposals.shape[0];
# area = (proposals[:, 2] - proposals[:, 0]) * (proposals[:, 3] - proposals[:, 1]);
# proposals = np.hstack([proposals, area.reshape([area.shape[0], 1])]);
print("proposal size: {}".format(self.proposal_size));
for image_name in image_dict:
image_dict[image_name]["proposal"], image_dict[image_name]["fgsize"] = self.compute_target(image_dict[image_name]["gt"], proposals);
print("image_name: {}, fgsize: {}".format(image_name, image_dict[image_name]["fgsize"]));
# print("Compute Target: {}/{}".format(image_num, image_num));
# self.fg_proposals_per_image = fg_proposal_num_dict;
def generate_minibatch_proposal(self, image_batch_size, proposal_batch_size): # Generate mini-batch for RPN model training
pix_vec = np.zeros([image_batch_size, self.image_height, self.image_width, 3]);
labels_vec = np.zeros([image_batch_size, self.proposal_size, 2]);
labels_weight_vec = np.zeros([image_batch_size, self.proposal_size]);
bbox_targets_vec = np.zeros([image_batch_size, self.proposal_size, 4]);
bbox_loss_weight_vec = np.zeros([image_batch_size, self.proposal_size]);
for i in range(image_batch_size):
if self.iter_index == self.image_num:
random.shuffle(self.name_list);
self.iter_index = 0;
while self.image_dict[self.name_list[self.iter_index]]["fgsize"] < 10:
if self.iter_index == self.image_num - 1:
random.shuffle(self.name_list);
self.iter_index = 0;
else:
self.iter_index += 1;
cur_name = self.name_list[self.iter_index];
im_train = self.image_dict[cur_name];
pix = im_train["image"];
roi_proposal = im_train["proposal"];
fg_idx = np.where(roi_proposal[:, 0] == 1)[0];
bg_idx = np.where(roi_proposal[:, 0] == -1)[0];
labels = np.hstack([np.zeros([self.proposal_size, 1]), np.ones([self.proposal_size, 1])]);
labels[fg_idx, 0] = 1;
labels[fg_idx, 1] = 0;
bbox_targets = roi_proposal[:, 5:];
fg_num = int(min(fg_idx.shape[0], proposal_batch_size / 2));
np.random.shuffle(fg_idx);
fg_idx = fg_idx[:fg_num];
bg_num = int(min(bg_idx.shape[0], proposal_batch_size - fg_num));
np.random.shuffle(bg_idx);
bg_idx = bg_idx[:bg_num];
# fg_num = fg_idx.shape[0];
# bg_num = bg_idx.shape[0];
labels_weight = np.zeros(self.proposal_size);
bbox_loss_weight = np.zeros(self.proposal_size);
labels_weight[fg_idx] = bg_num / fg_num;
labels_weight[bg_idx] = 1;
bbox_loss_weight[fg_idx] = 1;
pix_vec[i] = pix;
labels_vec[i] = labels;
labels_weight_vec[i] = labels_weight;
bbox_targets_vec[i] = bbox_targets;
bbox_loss_weight_vec[i] = bbox_loss_weight;
self.iter_index += 1;
# print(np.sum(labels_weight), np.sum(bbox_loss_weight));
# print(labels);
return pix_vec, labels_vec, labels_weight_vec, bbox_targets_vec, bbox_loss_weight_vec;
def prepare_region_data(self, region_height, region_width): # Prepare data for region classifier
self.pos_image_list = [];
self.neg_image_list = [];
self.region_height = region_height;
self.region_width = region_width;
for image_name in self.image_dict:
pix = self.image_dict[image_name]["image"];
gts = self.image_dict[image_name]["gt"];
pos_num = gts.shape[0];
for gt in gts:
pos_img = pix[gt[1]:gt[3] + 1, gt[0]:gt[2] + 1].astype(np.uint8).copy();
pos_img = cv.resize(pos_img, (region_width, region_height));
self.pos_image_list.append(pos_img.astype(np.float32));
roi_proposal = self.image_dict[image_name]["proposal"];
bg_idx = np.where((roi_proposal[:, 0] != 1) & (roi_proposal[:, 1] > 0) & (roi_proposal[:, 2] > 0) & (roi_proposal[:, 3] < self.image_width) & (roi_proposal[:, 4] < self.image_height))[0];
# bg_idx = np.where((roi_proposal[:, 0] == 0) & (self.proposals_mask == 1))[0];
np.random.shuffle(bg_idx);
bg_idx = bg_idx[:pos_num];
for idx in bg_idx:
neg_box = roi_proposal[idx, 1:5].astype(np.int);
neg_img = pix[neg_box[1]:neg_box[3], neg_box[0]:neg_box[2]].astype(np.uint8).copy();
neg_img = cv.resize(neg_img, (region_width, region_height));
self.neg_image_list.append(neg_img.astype(np.float32));
self.region_num = len(self.pos_image_list);
self.region_index = 0;
print("Region Num: {}".format(len(self.pos_image_list)));
def generate_minibatch_region(self, region_batch_size): # Generate minibatch for region classifier training
region_remain_num = int(region_batch_size / 2);
pos_image_batch = [];
neg_image_batch = [];
while region_remain_num > 0:
end_pos = min(self.region_index + region_remain_num, self.region_num);
pos_image_batch.extend(self.pos_image_list[self.region_index:end_pos]);
neg_image_batch.extend(self.neg_image_list[self.region_index:end_pos]);
region_remain_num -= (end_pos - self.region_index);
self.region_index = end_pos;
if self.region_index == self.region_num:
random.shuffle(self.pos_image_list);
random.shuffle(self.neg_image_list);
self.region_index = 0;
pos_image_batch = np.array(pos_image_batch);
neg_image_batch = np.array(neg_image_batch);
image_batch = np.vstack([pos_image_batch, neg_image_batch]);
pos_label_batch = np.hstack([np.ones([int(region_batch_size / 2), 1]), np.zeros([int(region_batch_size / 2), 1])]);
neg_label_batch = np.hstack([np.zeros([int(region_batch_size / 2), 1]), np.ones([int(region_batch_size / 2), 1])]);
label_batch = np.vstack([pos_label_batch, neg_label_batch]);
return image_batch, label_batch;
def recover_pos(proposal, target):
real_pos = np.zeros(4, dtype=int);
w2 = proposal[2] - proposal[0];
h2 = proposal[3] - proposal[1];
real_pos[0] = target[0] * w2 + proposal[0];
real_pos[1] = target[1] * h2 + proposal[1];
w1 = np.exp(target[2]) * w2;
h1 = np.exp(target[3]) * h2;
real_pos[2] = real_pos[0] + w1;
real_pos[3] = real_pos[1] + h1;
return real_pos;
def main():
image_dir_path = "/Users/max/Downloads/Data/Handwriting_Data/ICDAR2013/image_test/";
gt_dir_path = "/Users/max/Downloads/Data/Handwriting_Data/ICDAR2013/gt_test/";
dst_dir_path = "/Users/max/Downloads/Data/Presentation/weekly/"
imdb_obj = Image_Database(image_dir_path, gt_dir_path, 720, 960);
# print(imdb_obj.image_dict);
# for image_name in imdb_obj.image_dict:
# im = cv.imread(os.path.join(image_dir_path, image_name) + ".jpg");
# im = cv.resize(im, (imdb_obj.image_width, imdb_obj.image_height));
# print(imdb_obj.image_dict[image_name]["image"].shape)
# #
# for gt in imdb_obj.image_dict[image_name]["gt"]:
# cv.rectangle(im, (gt[0], gt[1]), (gt[2], gt[3]), (255, 0, 0), 3);
#
# for proposal in imdb_obj.image_dict[image_name]["proposal"]:
# if proposal[0] == 1:
# cv.rectangle(im, (int(proposal[1]), int(proposal[2])), (int(proposal[3]), int(proposal[4])), (0, 255, 0), 3);
# real_pos = recover_pos(proposal[1:5], proposal[5:]);
# cv.rectangle(im, (real_pos[0], real_pos[1]), (real_pos[2], real_pos[3]),
# (0, 0, 255), 3);
#
# plt.imshow(im);
# plt.show();
# cv.imwrite(os.path.join(dst_dir_path, image_name) + "_predict.png", im);
# print(imdb_obj.image_dict[image_name]["fgsize"]);
imdb_obj.prepare_region_data(224, 224);
image_batch, label_batch = imdb_obj.generate_minibatch_region(32);
print(len(image_batch));
# for image in image_batch:
# image = image.astype(np.uint8);
# plt.imshow(image);
# plt.show();
if __name__ == "__main__":
main();
|
[
"hanyi_tang@apple.com"
] |
hanyi_tang@apple.com
|
df3512fd4a973632fa8a3375b5162dae22014eaf
|
23934d631c8504e4ce04b4c39bd423776453d466
|
/dou/spiders/dou_spider.py
|
430b9b0b9ddbd19d0bc080f8115c6ed7a8424f7f
|
[] |
no_license
|
OlehMalichenko/dou_data_with_scrapy
|
8eec87966f3a54ec6b95b977326aec2deff69508
|
b80f733e21b05e651108bffc07df889d72fe8e38
|
refs/heads/master
| 2020-12-02T05:54:49.530344
| 2019-12-30T12:38:11
| 2019-12-30T12:38:11
| 230,913,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,905
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import csv
from dou.items import DouItem
from pprint import pprint
from time import sleep
import random
class DouSpiderSpider(scrapy.Spider):
name = 'dou_spider'
allowed_domains = ['jobs.dou.ua']
def start_requests(self):
with open('needed_csv.csv', newline='\n', encoding='utf8') as file:
reader = csv.reader(file)
count = 0
count_all = 0
for line in reader:
count_all = count_all + 1
href = self.check_line(line)
if href is None:
continue
yield scrapy.Request(href)
count = count + 1
# if count == 1:
# break
if count == 50:
sec = self.get_random_time()
print('SLEEP--------------------' + str(sec))
print('count_all----------------' + str(count_all))
sleep(sec)
count = 0
def check_line(self, line):
if type(line) is list:
if len(line) == 1:
href = line[0]
if type(href) is str:
check_http = href.find('http', 0, 4)
if check_http != -1:
return href
else:
print('is not link-http')
return None
else:
print('is not str')
return None
else:
print('is not correct len in line')
return None
else:
print('line in csv is not list')
return None
def parse(self, response):
if response.status == 200:
# inner hrefs
href = response.request.url
href_vacancy = href + 'vacancies/'
href_offices = href + 'offices/'
# start create item
item = DouItem()
item['name'] = self.get_data(response, self.get_name_path())
item['href'] = href
item['location'] = self.get_data(response, self.get_location_path())
item['link'] = self.get_data(response, self.get_link_path())
item['href_vacancy'] = href_vacancy
item['href_offices'] = href_offices
# go to vacancy
yield scrapy.Request(url=href_vacancy,
callback=self.go_to_vacancy,
meta={'item': item, 'href_offices': href_offices})
#==========STEP VACANCY============#
# first step (turn to office - second step)
def go_to_vacancy(self, response):
path = self.get_vacancy_path()
vacancies_list = response.xpath(path)
if vacancies_list is None:
return
item = response.meta['item']
href_offices = response.meta['href_offices']
vacancies = list()
for el in vacancies_list:
title = el.xpath('.//text()').get()
href = el.xpath('.//@href').get()
vacancies.append([title, href])
item['vacancy'] = vacancies
# go to offices
yield scrapy.Request(url=href_offices,
callback=self.go_to_kiev_offices,
meta={'item': item})
#==========STEP OFFICES============#
# yield item
def go_to_kiev_offices(self, response):
item = response.meta['item']
path = self.get_office_kiev_path()
element = response.xpath(path)
if element is None:
yield item
return
office = self.get_data_from_kiev_office(element)
item['address'] = office['address']
item['email'] = office['email']
item['tel'] = office['tel']
item['persons_admin'] = office['persons_admin']
yield item
def get_data_from_kiev_office(self, element):
result = {'address': self.get_data(element, self.get_address_path()),
'email': self.get_data(element, self.get_mail_path()),
'tel': self.get_data(element, self.get_tel_path()),
'persons_admin': self.get_persons_info(element)}
return result
def get_persons_info(self, element):
path_info = self.get_person_info_path()
info = element.xpath(path_info)
if len(info) > 1:
info = self.find_need_info(info)
if info is None:
return None
path_li = self.get_person_li_path()
persons = info.xpath(path_li)
if persons is None:
return None
persons_list = list()
path_position = self.get_person_position_path()
path_name = self.get_person_name_path()
for person in persons:
try:
position_ = person.xpath(path_position)
position = position_[-1].get().strip()
person_name_ = person.xpath(path_name)
person_name = person_name_[-1].get().strip()
except:
continue
persons_list.append([position, person_name])
return persons_list
def find_need_info(self, infos):
for info in infos:
persons = info.xpath('.//ul[@class="persons"]')
if persons is not None:
return info
return None
#=========PATHES TO DATA===========#
def get_data(self, response, path):
try:
return response.xpath(path).get().strip()
except:
return None
def get_name_path(self):
return '//div[@class="company-info"]/h1[@class="g-h2"]/text()'
def get_location_path(self):
return '//div[@class="company-info"]/div[@class="offices"]/text()'
def get_link_path(self):
return '//div[@class="company-info"]/div[@class="site"]/a[@href]/@href'
def get_vacancy_path(self):
return '//div[@id="vacancyListId"]//div[@class="vacancy"]/div[@class="title"]/a[@class="vt"]'
def get_office_kiev_path(self):
return '//h4[@id="kiev"]/..'
def get_address_path(self):
return './/div[@class="contacts"]//div[@class="address"]/text()'
def get_mail_path(self):
return './/div[@class="contacts"]//div[@class="mail"]/a[@href]/text()'
def get_tel_path(self):
return './/div[@class="contacts"]//div[@class="phones"]/text()'
def get_person_info_path(self):
return './/div[@class="info"]'
def get_person_li_path(self):
return './/ul[@class="persons"]/li'
def get_person_position_path(self):
return './/a[@class="name"]/following-sibling::text()'
def get_person_name_path(self):
return './/a[@class="name"]/child::text()'
#=========RANDOM TIME===========#
def get_random_time(self):
return random.uniform(1, 5)
|
[
"advokat1141@gmail.com"
] |
advokat1141@gmail.com
|
248090511984e8a8a5bc1b4ac31562347c35d8f7
|
10e1839b504fc18ab6cf19bcaac64593d7bf7f37
|
/hackerrank/python/10. printDoormat.py
|
c72b0bc2bdef1cbbf8e5af0c00cbb614c95b5aa2
|
[] |
no_license
|
hideyk/Challenges
|
bfb4f77bf3aefa855899fb9f32dc2d84549ab65b
|
5b01f1d58442f71cfdb3231fc338d68f13af79b3
|
refs/heads/main
| 2023-07-19T18:14:13.999347
| 2021-01-17T10:46:10
| 2021-01-17T10:46:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
def doormat(r, c):
for i in range(r//2):
print((".|."*(i*2+1)).center(c, "-"))
print("WELCOME".center(c, "-"))
for i in range(r//2-1, -1, -1):
print((".|."*(i*2+1)).center(c, "-"))
def main():
rows, columns = [int(s) for s in input().strip().split()]
doormat(rows, columns)
if __name__ == "__main__":
main()
|
[
"automationhidey@gmail.com"
] |
automationhidey@gmail.com
|
d14e6f6a6abfde25ec14975c731e0b395db426dc
|
dbb320f62c06433b2ca92ee3dd51a6bde8527143
|
/design_hit_counter.py
|
4a764144a5c10539c184f4511604eb6b68851bc5
|
[
"MIT"
] |
permissive
|
pranavdave893/Leetcode
|
3c051f4510d8907f04534b27c1fa76602f205852
|
1f30ea37af7b60585d168b15d9397143f53c92a1
|
refs/heads/master
| 2021-06-10T12:12:34.782733
| 2021-03-22T00:37:44
| 2021-03-22T00:37:44
| 140,044,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from collections import deque
class HitCounter(object):
def __init__(self):
self.dq = deque()
self.curr_count = 0
def hit(self, timestamp):
if not self.dq or self.dq[-1][0] != timestamp:
self.dq.append([timestamp, 1])
else:
self.dq[-1][1] += 1
self.curr_count += 1
def getHits(self, timestamp):
while self.dq and self.dq[0][0] <= timestamp-300:
self.curr_count -= self.dq.popleft()[1]
return self.curr_count
|
[
"pranavdave893@gmail.com"
] |
pranavdave893@gmail.com
|
716f9858e08437626c4799f63d4487ab054a99ef
|
d9e620594239a7ad77654134e59b8780d2dc6133
|
/python/BobRossBot/bin/app.py
|
6bb30d7000c75a8b49c1e7b5f414abb17679e512
|
[] |
no_license
|
adamriggs/reddit-api
|
c86824d1c71ab77b929573518127f55b3fbf6afe
|
f64cec053f3a801e480fff17102d237b5288e0d4
|
refs/heads/master
| 2016-09-05T11:19:41.435636
| 2013-06-16T23:56:04
| 2013-06-16T23:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,973
|
py
|
#-----------------
# ShittyWatercolorBot
# By: Adam Riggs
# v 0.1
# 5/19/2013
#-----------------
#-----------------
# imports
#-----------------
import praw
import MySQLdb
from random import randint
import time
from time import sleep
from pprint import pprint
from pgmagick import Image
from imgfx import ImgFX
import urllib2
import urllib
import json
from base64 import b64encode
import re
#-----------------
# main variables
#-----------------
user_agent='HappyLittleBot: makes it look like Bob Ross is painting the image that op posted.'
subreddit_names=[]
bob_ross_quotes=['We don\'t make mistakes, we just have happy accidents.','Any time ya learn, ya gain.','Any way you want it to be, that\'s just right.','Be sure to use odorless paint-thinner. If it\'s not odorless, you\'ll find yourself working alone very, very quick.','I like to beat the brush.','Tender as a mothers love... And with my mother, that was certainly true.','People look at me like I\'m a little strange, when I go around talking to squirrels and rabbits and stuff. That\'s ok. Thaaaat\'s just ok.','People might look at you a bit funny, but it\'s okay. Artists are allowed to be a bit different.','Shwooop. Hehe. You have to make those little noises, or it just doesn\'t work.','We tell people sometimes: we\'re like drug dealers, come into town and get everybody absolutely addicted to painting. It doesn\'t take much to get you addicted.','We want happy paintings. Happy paintings. If you want sad things, watch the news.','We\'re gonna make some big decisions in our little world.','When I was teaching my son Steve to paint, I used to tell him, just pretend he was a whisper, and he floated right across the mountain, that easy, gentle, make love to it, caress it.','You can do anything you want to do. This is your world.','Even if you\'ve never painted before, this one you can do.','And just go straight in like your going to stab it. And barely touch it...barely touch it.']
my_name="HappyLittleBot"
output_file="output.png"
has_name=False
sleep_time=1800
#-----------------
# initializations
#-----------------
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="*****", # your username
passwd="*****", # your password
db="reddit") # name of the data base
cur = db.cursor()
fx = ImgFX()
r = praw.Reddit(user_agent = user_agent)
r.login(my_name,'*****')
#-----------------
# functions
#-----------------
#//-----Database
def connectDB():
global db
global cur
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="*****", # your username
passwd="*****", # your password
db="reddit") # name of the data base
cur=db.cursor()
def closeDB():
global db
db.close()
def getSubreddits():
global subreddit_names
cur.execute("SELECT DISTINCT subreddit_name from subreddits")
mysql_rows = cur.fetchall()
subreddit_names = [x for x, in mysql_rows]
#print subreddit_names
subreddit_names=['pics']
def checkMsgId(id):
cur.execute("SELECT COUNT(1) FROM " + my_name + " WHERE msg_id = \'" + id + "\'")
msgExists=cur.fetchone()
if msgExists[0]:
print "\nalready in database == true\n"
return True
else:
print "\nalready in database == false\n"
return False
def insertMsgInDb(msgType, id, subreddit, author):
cur.execute("INSERT INTO " + my_name + " (msg_type,msg_id,subreddit,author) VALUES (\'" + str(msgType) + "\', \'" + str(id) + "\', \'" + str(subreddit) + "\', \'" + str(author) + "\')")
db.commit()
#//-----Image
def makeWatercolor(img_name,output_name):
print "makeWatercolor()"
img=fx.bobross(img_name)
img.write(output_name)
def sideLoad(filepath):
req = urllib2.Request('https://api.imgur.com/3/image', 'image=' + urllib.quote(b64encode(open(filepath,'rb').read())))
req.add_header('Authorization', 'Client-ID ' + '89861848efdc33c')
response = urllib2.urlopen(req)
response = json.loads(response.read())
return str(response[u'data'][u'link'])
def randomComment():
print "randomComment()"
return bob_ross_quotes[randint(0,len(bob_ross_quotes)-1)]
def comment(url):
output="[" + randomComment() + "](" + url + ")\n\n\n^I'm ^a ^bot! ^This ^image ^was ^generated ^automatically."
return output
#//-----Main Loop
def searchSubs():
for subreddit_name in subreddit_names:
print "number of subreddits == " + str(len(subreddit_names))
print "\n" + subreddit_name + "\n"
subreddit = r.get_subreddit(subreddit_name)
for submission in subreddit.get_hot(limit=5):
print "\n*****submission.id==" + submission.id
#print str(submission.title)
if(submission.is_self!="false"):
msgReplied=checkMsgId(submission.id)
print "replied: " + str(msgReplied)
print submission.url
if(msgReplied==False and str(submission.author)!=my_name and any(string in submission.url for string in 'imgur')):
urlstr=str(submission.url)
#print urlstr[-4:]
if((urlstr[-4:]=='.jpg' or urlstr[-4:]=='.png')==False):
urlstr=urlstr + ".jpg"
print "urlstr==" + urlstr
try:
makeWatercolor(urlstr,output_file)
#print randomComment()
submission.add_comment(comment(sideLoad(output_file)))
insertMsgInDb("submission", submission.id, submission.subreddit, submission.author)
except praw.errors.RateLimitExceeded as e:
print "Rate limited for " + str(e.sleep_time) + " seconds at " + time.strftime("%H:%M:%S", time.gmtime())
print "Sleeping..."
sleep(e.sleep_time)
submission.add_comment(comment(sideLoad(output_file)))
insertMsgInDb("submission", submission.id, submission.subreddit, submission.author)
except Exception as e:
#insertMsgInDb("submission", submission.id, submission.subreddit, submission.author)
print "unkown error replying (probably an album)\n"
else:
print "self post"
#-----------------
# main loop
#-----------------
closeDB()
while True:
try:
connectDB()
getSubreddits()
searchSubs()
closeDB()
print "\nSleeping...\n"
sleep(sleep_time)
except Exception as e:
print "something went wrong in the main loop"
print str(e)
print "\nSleeping...\n"
sleep(sleep_time)
|
[
"phlux1111@gmail.com"
] |
phlux1111@gmail.com
|
b716bd2e0194fef8998be5cb4394c18d86a8e1f8
|
00ee5387bbac76666c7099165fc3cd9f449bfc31
|
/src/einsteinpy/symbolic/predefined/bessel_gravitational_wave.py
|
161bb108c4292e5941e389c0f46e17f7a3be4686
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
GTron-1729/einsteinpy
|
dafd1885fe4b0184c1c4332dc3eea97e5c213b72
|
0d14579abb790edcc8f5990a6123889ba732b585
|
refs/heads/master
| 2021-03-30T14:44:37.001809
| 2020-03-18T07:29:43
| 2020-03-18T14:46:51
| 248,064,349
| 1
| 0
|
MIT
| 2020-03-17T20:14:05
| 2020-03-17T20:14:04
| null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
from sympy import Function, Symbol, besselj, cos, diag, exp, sin, sqrt, symbols
from einsteinpy.symbolic import constants
from einsteinpy.symbolic.metric import MetricTensor
def BesselGravitationalWave(C=symbols("C")):
"""
Exact gravitational wave solution without diffraction.
Class. Quantum Grav., 16:L75–78, 1999.
D. Kramer.
An exact solution describing an axisymmetric gravitational wave propagating in the
z-direction in closed form. This solution to Einstein’s vacuum field equations has the
remarkable property that the curvature invariants decrease monotonically with increasing radial
distance from the axis and vanish at infinity. The solution is regular at the symmetry axis.
Parameters
----------
C : ~sympy.core.basic.Basic or int or float
Constant for Bessel metric, the choice of the constant is not really relavent for details see the paper. Defaults to 'C'.
"""
coords = symbols("t rho phi z")
t, rho, ph, z = coords
# Useful helper functions, these wrap the Bessel functions that are used. C is some constant here
U = C * besselj(rho, 0) * cos(t)
K = (
(1 / 2)
* (C ** 2)
* rho
* (
(rho * ((besselj(rho, 0) ** 2) + (besselj(rho, 1) ** 2)))
- (2 * besselj(rho, 0) * besselj(rho, 1) * (cos(t) ** 2))
)
)
# define the metric
metric = diag(
-1 * exp(-2 * U) * exp(2 * K),
exp(-2 * U) * exp(2 * K),
exp(-2 * U) * (rho ** 2),
exp(2 * U),
).tolist()
return MetricTensor(metric, coords, "ll")
|
[
"s.ritwik98@gmail.com"
] |
s.ritwik98@gmail.com
|
9ee673ca981a636ee2e60a8f4ae3a5a38fa35162
|
163959194f7affb1fac6fdc8bb0d7fdf1162d513
|
/getPic_web/keyword_extraction/new_keyword.py
|
3834c08d15191203a38e901d8d9ad5f0e3819bf5
|
[] |
no_license
|
yookyungkho/getPic
|
bfe648aaa5071d42403fceb25048ef47b4063636
|
2d12d343c519b157f64829067dc4b3c9b97d1e07
|
refs/heads/master
| 2023-03-05T05:14:15.090213
| 2021-02-22T10:45:20
| 2021-02-22T10:45:20
| 280,839,513
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
class new_KeywordSummarizer:
def __init__(self, tokenize=None):
self.tokenize = tokenize
def new_keyword(self, sents):
tfidf_vectorizer = TfidfVectorizer(tokenizer = self.tokenize.tokenizer)
tfidf = tfidf_vectorizer.fit_transform(sents)
names = tfidf_vectorizer.get_feature_names()
data = tfidf.todense().tolist()
# Create a dataframe with the results
df = pd.DataFrame(data, columns=names)
result = dict(df.sum(axis=0))
result = sorted(result.keys(),reverse=True,key=lambda x : result[x])[:10]
return result
|
[
"kmmnjng528@gmail.com"
] |
kmmnjng528@gmail.com
|
94f1dfb204610733217a723111e92cc59c5c0a91
|
7d8ba9231df27c12b86a58e55d9b7b85ad47ef8f
|
/app/decorators.py
|
3674087e3318433ee3dc3a05c3891550dab571d0
|
[] |
no_license
|
lence-wl/flask-myBlog
|
0d2eb65452306c652d613dfb1eb1e7a1051f5d04
|
1f4ae07efff44c165690f8667d64e0deb8cf44c0
|
refs/heads/master
| 2020-04-11T10:48:51.807751
| 2018-12-14T03:42:05
| 2018-12-14T03:42:05
| 161,727,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!sur/bin/env python
#-*- coding:utf-8 -*-
"""
@author: lence
@file: decorators.py.py
@time: 2018/11/01
"""
from functools import wraps
from flask import abort
from flask_login import current_user
from .models import Permission
#检查常规权限的装饰器
def permission_required(Permission):
def decorator(f):
@wraps(f)
def decorated_function(*args,**kwargs):
if not current_user.can(Permission):
abort(403)
return f(*args,**kwargs)
return decorated_function
return decorator
#检查管理员权限的装饰器
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f)
|
[
"lence0516@163.com"
] |
lence0516@163.com
|
b09385c1908e01162b4d125b5b156ad5bece8328
|
ddbe710362cd1c00d068964f905b8f2b509d7b32
|
/Final_Database/TableSchema.py
|
90b68416e6ab84168be78fb87cb04c49868f2bc5
|
[] |
no_license
|
lnicholsonmesm/ETL-Project
|
20964183331ea68cecc48136ccfbd422dd1d357d
|
835e50d1551459e652a05af703a6dd138d1b6705
|
refs/heads/master
| 2022-11-08T05:33:10.431304
| 2020-06-14T06:20:31
| 2020-06-14T06:20:31
| 270,881,335
| 2
| 0
| null | 2020-06-13T22:34:30
| 2020-06-09T02:12:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
import psycopg2
#from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from config import username, password
#CREATE DATABASE
create_db_query = 'CREATE DATABASE MovieDB'
create_schema_query = 'CREATE SCHEMA IF NOT EXISTS ETL'
# CREATE TABLE SCHEMA
ETL_actors = '''DROP TABLE IF EXISTS actors_role;
CREATE TABLE actors_role(id INT PRIMARY KEY
,names VARCHAR
,last_role VARCHAR)'''
ETL_movies_info = '''DROP TABLE IF EXISTS movies_info;
CREATE TABLE movies_info(movie_id INT PRIMARY KEY
,title VARCHAR
,plot VARCHAR
,IMDb_rating NUMERIC
,main_cast_id INT
,main_cast_name VARCHAR
,other_cast_id INT
,other_cast_name VARCHAR
)'''
#CONNECT TO DATABASE AND CREATE CURSOR OBJECT TO DO THINGS
conn = psycopg2.connect(database="postgres", user=username, password=password, host='localhost', port= '5432')
conn.autocommit = True #or we could commit with conn.commit()
cursor = conn.cursor()
#CREATE DATABASE
cursor.execute(create_db_query)
print("Successfully created database")
cursor.execute(ETL_actors)
print("Successfully created actors_role")
cursor.execute(ETL_movies_info)
print("Successfully created movies_info")
|
[
"61956735+lnicholsonmesm@users.noreply.github.com"
] |
61956735+lnicholsonmesm@users.noreply.github.com
|
f907f8844bc20138ecc531023bbcc84f708cbf9f
|
256cb39311d30ce25c27842dd156b0e381f6e820
|
/app/model/Job_Action.py
|
3be07e42159f5a18cf56fdf5f7ed2cb68af7918b
|
[
"MIT"
] |
permissive
|
bvbgrad/betterJob
|
d274898fd424267cee0755c865452be7331589d6
|
d54962d31eafde428fa7309c8bf5ac238bf6a640
|
refs/heads/master
| 2023-05-09T02:38:38.588070
| 2021-05-31T02:50:33
| 2021-05-31T02:50:33
| 322,061,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
"""
Database model and methods
business object: Job Actions
"""
from sqlalchemy import Column, ForeignKey, Integer, String, Date
from app.model import Base
from datetime import date
class Job(Base):
__tablename__ = 'job'
job_Id = Column(Integer, primary_key=True)
job_name = Column(String(50))
job_nbr = Column(String(30))
# bvb TODO change priority to status # active, fav1-5, inactive
priority = Column(String(10))
job_type = Column(String(10))
job_post_date = Column(Date())
job_expire_date = Column(Date())
salary_min = Column(Integer)
salary_max = Column(Integer)
company_IdFK = Column(Integer, ForeignKey('company.company_Id'))
def __init__(
self, job_name=None, job_nbr=None, priority=3, job_type=None,
job_post_date=None, job_expire_date=None,
salary_min=0, salary_max=None, company_IdFK=0):
self.job_name = job_name
self.job_nbr = job_nbr
self.priority = priority
self.job_type = job_type
if job_post_date is None:
self.job_post_date = date.today()
else:
self.job_post_date
self.job_expire_date = job_expire_date
self.salary_min = salary_min
self.salary_max = salary_max
self.company_IdFK = company_IdFK
def __repr__(self):
return f"Job(Id={self.job_Id!r}, name={self.job_name!r}, " +\
f"job_nbr={self.job_nbr}, priority={self.priority}, " +\
f"job_type={self.job_type}, " +\
f"job_post_date={self.job_post_date}, " +\
f"job_expire_date={self.job_expire_date}, " +\
f"salary_min={self.salary_min}, salary_max={self.salary_max}, " +\
f"company_IdFK={self.company_IdFK})"
def add_job(self, db, job):
db.add(job)
def get_job_count(self, db):
number_jobs = db.query(Job).count()
return number_jobs
def get_job_count_by_company(self, db, company_id):
return db.query(Job).filter_by(company_IdFK=company_id).count()
def get_job_by_company(self, db, company_id):
return db.query(Job).filter_by(company_IdFK=company_id).all()
def get_all_jobs(self, db):
job_list = db.query(Job).all()
return job_list
|
[
"bvbgrad@gmail.com"
] |
bvbgrad@gmail.com
|
0861499e55823f78d671498aeddd26d9b458d104
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_67/85.py
|
d68b82b4d6c548dceb594577cb58f6b243360429
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
def readInts():
r = raw_input()
s = r.split()
if len(s) == 1:
return int(s[0])
return [int(ss) for ss in s]
def readString():
r = raw_input()
return r
def main():
t = readInts()
case = 1
for i in xrange(t):
r = readInts()
brd = {}
brd2 = {}
for rr in xrange(r):
x1, y1, x2, y2 = readInts()
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
for x in xrange(x1, x2 + 1):
if not brd.has_key(x):
brd[x] = {}
for y in xrange(y1, y2 + 1):
brd[x][y] = 1
count = 0
while True:
for k in brd:
x = k
for y in brd[x]:
if brd[x].has_key(y - 1) or brd.has_key(x - 1) and brd[x - 1].has_key(y):
if not brd2.has_key(x):
brd2[x] = {}
brd2[x][y] = 1
if brd.has_key(x - 1) and brd[x - 1].has_key(y + 1):
if not brd2.has_key(x):
brd2[x] = {}
brd2[x][y + 1] = 1
count += 1
if not brd2:
break
brd = brd2
brd2 = {}
print 'Case #%d: %d' % (case, count)
case += 1
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
f310e26c49fb95a9932897541a80ff45b89157ef
|
c10ee508ab7dbfd2053849aa964219a0831936fe
|
/read_camera.py
|
7dec92f6afda55ae54d615258cb5ea71766f65e0
|
[] |
no_license
|
amiltonwong/traffic_sign
|
a2aceba6ede9b5f70b4c8735dfbce7fcad9ea0da
|
d7a80bcd42e26f6a8a98b645e9cbf7efc5031dd6
|
refs/heads/master
| 2020-05-04T19:50:28.482519
| 2019-04-04T02:55:14
| 2019-04-04T02:55:14
| 179,409,891
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
import sys
import os
import cv2
from PIL import Image
directory = sys.argv[1]
imagecount = int(sys.argv[2])
os.makedirs(directory, exist_ok=True)
video = cv2.VideoCapture(0)
filename = len(os.listdir(directory))
count = 0
while True and count < imagecount:
filename += 1
count += 1
_, frame = video.read()
im = Image.fromarray(frame, 'RGB')
im = im.resize((128,128))
im.save(os.path.join(directory, str(filename)+".jpg"), "JPEG")
cv2.imshow("Capturing", frame)
key=cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
[
"mb55420@umac.mo"
] |
mb55420@umac.mo
|
41fee592304b050544009c33fb9927497f274ee1
|
cba0f1286e4271ac35101a25d5040b2e4f405bde
|
/cgi-bin/admin/severe2/advanced/cases/release.py.cln
|
9ba0f67d21bb665fbc00853ee76a3e4f9b6eba5d
|
[] |
no_license
|
akrherz/pals
|
271c92d098909abb5b912db4ae08f0c3589e5ec7
|
adc213333fb23dc52d6784ce160c4ff8a8f193e3
|
refs/heads/master
| 2021-01-10T15:01:59.570168
| 2019-12-18T16:59:08
| 2019-12-18T16:59:08
| 45,484,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,865
|
cln
|
#!/usr/local/bin/python
# This will allow instructors to release cases
# Daryl Herzmann 12-7-99
import pg, cgi, time, SEVERE2
form = cgi.FormContent()
mydb = pg.connect('severe2','localhost', 5555)
mydb2 = pg.connect('severe2_adv','localhost', 5555)
def list_cases2():
print '<SELECT name="caseNum" size="10">'
cases2 = mydb2.query("SELECT casenum from basecases").getresult()
cases2.sort()
for i in range(len(cases2)):
thiscase = cases2[i][0]
cases = mydb.query("SELECT date(startTime) from cases WHERE casenum = '"+thiscase+"' ").getresult()
startTime = cases[0][0]
print '<OPTION value="'+thiscase+'">'+thiscase+' '+startTime
print '</SELECT><BR>'
def Main():
if form.has_key("type"):
caseNum = form["caseNum"][0]
type = form["type"][0]
if type == "add":
delete = mydb2.query("DELETE from basecases WHERE caseNum = '"+caseNum+"' ")
hello = mydb2.query("INSERT into basecases VALUES ('"+caseNum+"')")
else:
hello = mydb2.query("DELETE from basecases WHERE caseNum = '"+caseNum+"' ")
print 'Content-type: text/html \n\n'
print '<H3 align="CENTER">Pick A Case</H3>'
print '<P>Release or remove cases to your hearts content.'
print '<TABLE WIDTH="100%"><TR><TD>'
print '<H3>Cases available</H3>'
print '<FORM name="add" METHOD="POST" ACTION="release.py">'
print '<input type="hidden" name="type" value="add">'
SEVERE2.listAllCases()
print '<input type="submit" value="Add This Case">'
print '</form><BR>'
print '</TD><TD>'
print '<H3>Cases allready released</H3>'
print '<FORM name="del" METHOD="POST" ACTION="release.py">'
print '<input type="hidden" name="type" value="del">'
list_cases2()
print '<input type="submit" value="Delete This Case">'
print '</form><BR>'
print '</TD></TR></TABLE>'
print '<HR><a href="/admin/index.html">Back to Admin Page</a>'
Main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
978d29b943992807aa0a8037ada1eeff0540ba1c
|
287b60b06bed640418c9a4fc49b03ddb4ac53345
|
/models/miura_sheet/analyze_traj.py
|
08834223fcaad2875e4def020be85919bb2fb7e5
|
[
"MIT"
] |
permissive
|
saridut/FloriPy
|
29f56e63354073a57281b00dea4f2c6af28027d4
|
0117d358b9c2362ea32ecf9ec719fdaed87d3e14
|
refs/heads/main
| 2023-01-25T01:30:29.617918
| 2020-12-02T00:48:56
| 2020-12-02T00:48:56
| 317,380,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,755
|
py
|
#!/usr/bin/env python
import math
import csv
import numpy as np
from floripy.mathutils import xform as tr
from floripy.mathutils.linalg import unitized
from .miura_sheet_trajectory import MiuraSheetTrajectory
def get_phi_theta(v):
'''
v: (3,) ndarray
Returns phi and theta in degrees.
phi: Angle measured from the y-axis of the projection on the xy-plane
-pi<=phi<=pi
theta: Angle measured from the z-axis, -pi/2<=theta<=pi/2
'''
vec = unitized(v)
phi = math.atan2(vec[0], vec[1])
theta = math.acos(vec[2])
phi_deg = math.degrees(phi)
theta_deg = math.degrees(theta)
return phi_deg, theta_deg
def basic_data(fn_traj, fn_model, fn_data):
#Open trajectory file and calculate
mt = MiuraSheetTrajectory(fn_traj, fn_model)
num_frames = len(mt)
print('Number of frames: ', num_frames)
field_names = ['time', 'beta',
'chord', 'chord_normalized',
'span', 'span_normalized',
'aspect_ratio', 'aspect_ratio_normalized',
'theta_director', 'theta_codirector', 'theta_bidirector',
'phi_director', 'phi_codirector','phi_bidirector',
'roll', 'yaw', 'pitch',
'comx', 'comy', 'comz',
'directorx', 'directory', 'directorz',
'codirectorx', 'codirectory', 'codirectorz',
'bidirectorx', 'bidirectory', 'bidirectorz']
data = {}
with open(fn_data, 'w') as fh_data:
writer = csv.DictWriter(fh_data, field_names)
writer.writeheader()
for k in range(num_frames):
print('Frame: ', k) if k%100==0 else None
time, ms = mt.get_frame(k)
data['time'] = time
data['beta'] = math.degrees(ms.beta)
data['chord'] = ms.chord
data['chord_normalized'] = ms.chord/ms.max_chord
data['span'] = ms.span
data['span_normalized'] = ms.span/ms.max_span
data['aspect_ratio'] = ms.aspect_ratio
data['aspect_ratio_normalized'] = ms.aspect_ratio/ms.max_aspect_ratio
director = ms.director
codirector = ms.codirector
bidirector = ms.bidirector
data['directorx'] = director[0]
data['directory'] = director[1]
data['directorz'] = director[2]
data['codirectorx'] = codirector[0]
data['codirectory'] = codirector[1]
data['codirectorz'] = codirector[2]
data['bidirectorx'] = bidirector[0]
data['bidirectory'] = bidirector[1]
data['bidirectorz'] = bidirector[2]
#Theta: Angle measured from the z-axis, -pi/2<=theta<=pi/2
#Phi: Angle measured from the y-axis of the projection on the xy-plane
#-pi<=phi<=pi
phi_director, theta_director = get_phi_theta(director)
phi_codirector, theta_codirector = get_phi_theta(codirector)
phi_bidirector, theta_bidirector = get_phi_theta(bidirector)
data['theta_director'] = theta_director
data['theta_codirector'] = theta_codirector
data['theta_bidirector'] = theta_bidirector
data['phi_director'] = phi_director
data['phi_codirector'] = phi_codirector
data['phi_bidirector'] = phi_bidirector
ori = ms.orientation
ori_euler_body = np.rad2deg(tr.quat_to_euler(ori, seq='XYZ', world=False))
data['roll'], data['yaw'], data['pitch'] = tuple(ori_euler_body)
data['comx'], data['comy'], data['comz'] = tuple(ms.com)
writer.writerow(data)
mt.close()
|
[
"saridut@gmail.com"
] |
saridut@gmail.com
|
1c80d581650c8d33363badd30f0b28cf5ee5a1d2
|
5680657baab8d64612c8a4cdeeb09ce69c9120e2
|
/yolo_detection/yolo.py
|
632e5123e5bf05ddfc0c66c8a69914a7d7ff9919
|
[] |
no_license
|
vinodgit44/opencv
|
d489f0426a65e90f5390f2aa5b5fb5b701fa87bb
|
dce5cad3b783b869a5396b457bad6fe210b75c9d
|
refs/heads/master
| 2023-08-04T05:07:12.289034
| 2021-09-26T12:17:20
| 2021-09-26T12:17:20
| 408,375,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
import cv2 as cv
import numpy as np
cap = cv.VideoCapture("video.mp4")
width = 320
height = 320
confThreshold =0.5
nmsThreshold= 0.2
#### LOAD MODEL
## Coco Names
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('n').split('\n')
print(classNames)
## Model Files
modelConfiguration = "./input/yolov2-tiny.cfg"
modelWeights = "./input/yolov2-tiny.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
print(classId)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
# print(x,y,w,h)
cv.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%',
(x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
while True:
suc, img = cap.read()
if cv.waitKey(1) == ord('q'):
break
# img=cv.imread("images.jpeg")
blob = cv.dnn.blobFromImage(img, 1 / 255, (width, height), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv.imshow('Image', img)
cv.waitKey(1)
|
[
"bhandarivinod43@gmail.com"
] |
bhandarivinod43@gmail.com
|
aa6d18ea83f9ad6357f4c67f63f1c726924bc095
|
85aecab4c2bfeb7791eed44ac87998e809918a69
|
/communityprofiles/profiles/urls.py
|
dde460ae400410f597a96e3294a54a154e50ac58
|
[] |
no_license
|
DataSparkRI/Profiles
|
ab5b4042123f3061d3dbb2b58c2138c96a298b06
|
64123355a9d2dc19aa6b97a0ee936d77ddcc64d1
|
refs/heads/master
| 2021-10-11T21:15:56.679229
| 2019-01-29T21:35:24
| 2019-01-29T21:35:24
| 18,562,836
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^data_display/', include('data_displays.urls')),
url(r'^dataview/(?P<level_slug>[-\w]+)/(?P<geo_slug>[-\w]+)/(?P<indicator_slug>[-\w]+)/$', 'profiles.views.data_view', name='data_view'),
url(r'^indicator/info/$', 'profiles.views.indicator_info', name='indicator_info'),
url(r'^indicator_data/multi/$', 'profiles.views.multi_indicator_json', name='multi_indicator_json'),
url(r'^api/geo/$','profiles.views.geography_list_json', name='geo_api'),
url(r'^api/raw/$','profiles.admin_views.raw_indicator_json', name='raw_indicator_json'),
url(r'^geojson/$', 'profiles.views.geojson', name='geojson'),
url(r'^preview/$','profiles.admin_views.admin_preview', name='admin_preview'),
url(r'^export/$', 'profiles.views.export_csv', name='export_csv'),
url(r'^i_a/(?P<indicator_id>[\d]+)/$', 'profiles.admin_views.indicator_action', name="indicator_action"),
url(r'^(?P<geo_level_slug>[-\w]+)/(?P<geo_record_slug>[-\w]+)/(?P<data_domain_slug>[-\w]+)/(?P<indicator_slug>[-\w]+)/$',
'profiles.views.indicator', name='indicator'),
url(r'^(?P<geo_level_slug>[-\w]+)/(?P<geo_record_slug>[-\w]+)/(?P<data_domain_slug>[-\w]+)/$', 'profiles.views.data_domain', name='data_domain'),
url(r'^(?P<geo_level_slug>[-\w]+)/(?P<geo_record_slug>[-\w]+)/$', 'profiles.views.geo_record', name='geo_record'),
url(r'^(?P<geo_level_slug>[-\w]+)/$', 'profiles.views.geo_level', name='geo_level'),
)
|
[
"asmedrano@gmail.com"
] |
asmedrano@gmail.com
|
b4623a921ad3dc6202d291d2c9b1fc536769e4e5
|
8f16434074a47d2b646fb6c56275bea94155954e
|
/src/currency/api/views.py
|
f595bfbd147619fcda632ca045a9b9a9112dc853
|
[] |
no_license
|
VadimNedojnov/currency_exhange
|
fe9bff3f1bc92db0eed2ac51c04a3f747b76ba97
|
10ba4fc19345ac52b27cb7788ba593f2f91f5753
|
refs/heads/master
| 2022-12-10T02:10:46.235623
| 2020-04-06T18:37:02
| 2020-04-06T18:37:02
| 240,323,235
| 0
| 0
| null | 2022-12-08T03:43:51
| 2020-02-13T17:43:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
from rest_framework import generics
from django_filters import rest_framework as filters
from django_filters import DateFromToRangeFilter
from django_filters.widgets import RangeWidget
from currency.api.serializers import RateSerializer, ContactSerializer
from currency.models import Rate
from account.models import Contact
class RateFilter(filters.FilterSet):
class Meta:
model = Rate
created_range = DateFromToRangeFilter(widget=RangeWidget(attrs={'placeholder': 'YYYY/MM/DD'}))
fields = {
'created': ['exact', 'gt', 'lt', 'gte', 'lte', 'range'],
'currency': ['exact', ],
'source': ['exact', ],
}
class RatesView(generics.ListCreateAPIView):
queryset = Rate.objects.all()
# queryset = Rate.objects.all()[:20] WRONG
serializer_class = RateSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = RateFilter
class RateView(generics.RetrieveUpdateDestroyAPIView):
queryset = Rate.objects.all()
serializer_class = RateSerializer
class ContactsView(generics.ListCreateAPIView):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(id=self.request.user.id)
class ContactView(generics.RetrieveUpdateAPIView):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
|
[
"vadim.nedoynov@gmail.com"
] |
vadim.nedoynov@gmail.com
|
6d58e5c66006d6909676396794ce59a4dcc2c967
|
0141eec18296ef01c17fd2004c257fc95af93d0c
|
/wildcat/Attention_pooling.py
|
c14ad0d6c50e70d74277873483c0646b9751bd0c
|
[
"MIT"
] |
permissive
|
ngonthier/wildcat_box
|
d03067e8970cdc9e8abf087298241246fca06eae
|
0d321dac85d4edbe615a3c609501a688f162f799
|
refs/heads/master
| 2023-04-04T13:50:51.412299
| 2021-04-23T09:13:01
| 2021-04-23T09:13:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self,sizeMaps,num_maps,num_classes):
super(Attention, self).__init__()
self.num_maps = num_maps
#self.L = 500 # Number of neurons of the last layers
self.D = 128
self.L = 128
self.K = 1
self.num_classes = num_classes
self.sizeMaps = sizeMaps
#self.feature_extractor_part1 = nn.Sequential(
#nn.Conv2d(1, 20, kernel_size=5),
#nn.ReLU(),
#nn.MaxPool2d(2, stride=2),
#nn.Conv2d(20, 50, kernel_size=5),
#nn.ReLU(),
#nn.MaxPool2d(2, stride=2)
#)
#self.feature_extractor_part2 = nn.Sequential(
#nn.Linear(self.sizeMaps*self.sizeMaps,self.L),
#nn.ReLU(),
#)
self.attention = nn.Sequential(
nn.Linear(self.num_classes*self.num_maps, self.L), # Here self.num_classes is the number of features we keep per region
nn.Tanh(),
nn.Linear(self.L, self.K)
)
self.classifier = nn.Sequential(
nn.Linear(self.num_classes*self.num_maps, self.num_classes),
#nn.Sigmoid() # Because there is a sigmoid in multilabel_soft_margin_loss
)
def forward(self, x):
#print('x',x.shape)
batch_size = x.size(0)
num_channels = x.size(1)
h = x.size(2)
w = x.size(3)
# batch, classes, h,w
x = x.view(batch_size,num_channels,h*w) # batch size, self.num_classes, (h/32 +1) *( w/32 +1)
xx = torch.transpose(x,1,2).contiguous()
#print('x',x.shape)
#print('xx',xx.shape)
H = xx.view(-1,num_channels)
#print('H',H.shape)
A = self.attention(H)
#print('A',A.shape)
A = A.view(batch_size,h*w,-1)
#print('A',A.shape)
A = F.softmax(A, dim=1) # Softmax over the regions
#print('A',A.shape)
M = torch.bmm(x,A) # KxL
#print('M',M.shape)
M = M.view(batch_size,-1)
#print('M',M.shape)
Y_prob = self.classifier(M)
#print('Y',Y_prob.shape)
#Y_hat = torch.ge(Y_prob, 0.5).float()
#return Y_prob, Y_hat, A
return Y_prob
|
[
"nicolas.gonthier@telecom-paristech.fr"
] |
nicolas.gonthier@telecom-paristech.fr
|
1456280d503130f6d93613f592d8e3f393f9dc5d
|
723ec2becc30c37e0686649f5497066fb99371ed
|
/C22_Algorithms/C22_FizzBuzz.py
|
161d299e42cb59afa3d59228e0e5cf57be02d9b4
|
[] |
no_license
|
eztaban/theselftaughtprogrammer_althoff
|
bc2abb4e2d821337782052fcd43c03267e5cdcb5
|
cf4d1a544f6b33f4ce103cd8fa5fa7cdbd037f31
|
refs/heads/main
| 2023-07-15T03:43:44.589053
| 2021-08-25T14:15:48
| 2021-08-25T14:15:48
| 399,367,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
def fizz_buzz():
for i in range(1,101):
if i % 3 == 0 and i % 5 == 0:
print("FizzBuzz")
elif i % 3 == 0:
print("Fizz")
elif i % 5 == 0:
print("Buzz")
else:
print(i)
fizz_buzz()
|
[
"iasonasmg@protonmail.com"
] |
iasonasmg@protonmail.com
|
9c2449a93b94ae03b31f80d3b9aab1392fda13c6
|
14d7075dc32fe6206a3a946f01600c608f090378
|
/demo.py
|
9a210a48e727f6477da3dddd23c2389fdf4b6bfe
|
[] |
no_license
|
CPS-AI/Learning-in-Frequency
|
2d4d434498a3efbff4db19da150daa38c7e82614
|
3708fe96d5042c2891ab4e880f62ae3be7ef6fa4
|
refs/heads/master
| 2023-04-06T20:26:48.982306
| 2021-04-14T20:03:58
| 2021-04-14T20:03:58
| 289,206,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
import os
import numpy as np
import tensorflow as tf
from tfrecord_utils import get_dataset
from stfnet import STFNet
LAYER_TYPE = "conv"
BATCH_SIZE = 32
SERIES_SIZE = 512
SENSOR_CHANNEL = 3
SENSOR_NUM = 2
CLASS_NUM = 10
EPOCH_NUM = 10000000
SAVE_EPOCH_NUM = 500
TRAIN_TFRECORD = os.path.join("tfrecords", "speech", "train.tfrecord")
TEST_TFRECORD = os.path.join("tfrecords", "speech", "train.tfrecord")
# TRAIN_TFRECORD = os.path.join("tfrecords", "hhar", "train.tfrecord")
# TEST_TFRECORD = os.path.join("tfrecords", "hhar", "eval.tfrecord")
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
dataset_train = get_dataset(
TRAIN_TFRECORD,
BATCH_SIZE,
SERIES_SIZE,
SENSOR_NUM,
SENSOR_CHANNEL,
CLASS_NUM
)
dataset_eval = get_dataset(
TEST_TFRECORD,
BATCH_SIZE,
SERIES_SIZE,
SENSOR_NUM,
SENSOR_CHANNEL,
CLASS_NUM,
shuffle_sample=False,
)
model = STFNet(LAYER_TYPE, CLASS_NUM, SENSOR_NUM, SENSOR_CHANNEL, BATCH_SIZE)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-2, beta_1=0.9, beta_2=0.99),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.CategoricalAccuracy()],
)
checkpoint_path = "demo/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_freq=SAVE_EPOCH_NUM * BATCH_SIZE,
)
model.fit(
dataset_train,
# steps_per_epoch=10,
epochs=EPOCH_NUM,
validation_data=dataset_eval,
callbacks=[cp_callback]
)
if __name__ == "__main__":
main()
|
[
"leowangx2013@outlook.com"
] |
leowangx2013@outlook.com
|
e16230916f670e5d95b4539713a019cea4511803
|
646a83d3de5ff2d2dc0c6f7efbd3f459a6479a63
|
/HW2TermStructure/TermStructure.py
|
d3877d9c37e09e3f0e5935bfaa9bc2b1ccc7c0a3
|
[] |
no_license
|
Wangvory/AdvQuantFin
|
f35454f04ddcb80e80bd76bcf7e0e378322113ae
|
c198a7b04d4e88996c4baec2f926d71d566faddf
|
refs/heads/master
| 2020-12-26T20:57:05.019188
| 2020-12-14T19:30:18
| 2020-12-14T19:30:18
| 237,636,629
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,113
|
py
|
import math
from Bond import *
class TermStructure(object):
def __init__(self):
self.bonds = []
self.spot_rates = []
self.forward_6m_rates = []
self.discount_factors = []
for _ in range(20):
self.spot_rates.append(0.0)
self.forward_6m_rates.append(0.0)
self.discount_factors.append(0.0)
def set_bonds(self, bonds):
self.bonds = bonds
def get_spot_rate(self, index):
return self.spot_rates[index]
def get_forward_6m_rate(self, index):
return self.forward_6m_rates[index]
def get_discount_factor(self, index):
return self.discount_factors[index]
def compute_spot_rates(self):
for bond in self.bonds:
if bond.get_name() == "6m":
self.spot_rates[0] = bond.compute_ytm()
elif bond.get_name() == "1y":
bond.bootstrap_spot_rate(self.spot_rates, 0, 1)
elif bond.get_name() == "2y":
bond.bootstrap_spot_rate(self.spot_rates, 1, 3)
elif bond.get_name() == "3y":
bond.bootstrap_spot_rate(self.spot_rates, 3, 5)
elif bond.get_name() == "5y":
bond.bootstrap_spot_rate(self.spot_rates, 5, 9)
elif bond.get_name() == "7y":
bond.bootstrap_spot_rate(self.spot_rates, 9, 13)
elif bond.get_name() == "10y":
bond.bootstrap_spot_rate(self.spot_rates, 13, 19)
def compute_discount_factors(self):
for i in range(len(self.spot_rates)):
self.discount_factors[i] = math.pow(1.0 + self.spot_rates[i] / 100.0 / 2.0, -(i + 1) / 2.0)
def compute_forward_6m_rates(self):
for i in range(len(self.spot_rates) - 1):
self.forward_6m_rates[i] = (math.pow(
(math.pow(1.0 + self.spot_rates[i + 1] / 100.0 / 2.0, (i + 2) / 2.0) /
math.pow(1.0 + self.spot_rates[i] / 100.0 / 2.0, (i + 1) / 2.0)),
1.0 / 0.5) - 1.0) * 100.0 * 2.0
if __name__ == "__main__":
bonds = []
name = "6m"
coupon = 4.0
issue_date = 20130301
maturity_date = 20130901
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "1y"
coupon = 5.0
issue_date = 20130301
maturity_date = 20140301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "2y"
coupon = 7.0
issue_date = 20130301
maturity_date = 20150301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "3y"
coupon = 9.0
issue_date = 20130301
maturity_date = 20160301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "5y"
coupon = 9.25
issue_date = 20130301
maturity_date = 20180301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "7y"
coupon = 9.50
issue_date = 20130301
maturity_date = 20200301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
name = "10y"
coupon = 10.0
issue_date = 20130301
maturity_date = 20230301
compounding_frequency_per_annum = 2
price = 100.0
bond = Bond(name, coupon, issue_date, maturity_date, compounding_frequency_per_annum)
bond.set_price(price)
bonds.append(bond)
print(f'Name\tCoupon\tIssueDate\tMaturityDate\tPrice\t\tYTM')
for bond in bonds:
print(
f'{bond.get_name()}\t{bond.get_coupon():10.4f}\t{bond.get_issue_date()}\t{bond.get_maturity_date()}\t{bond.get_price():10.4f}\t{bond.compute_ytm():10.4f}')
term_structure = TermStructure()
term_structure.set_bonds(bonds)
term_structure.compute_spot_rates()
term_structure.compute_discount_factors()
term_structure.compute_forward_6m_rates()
tenors = ["6m", "1y", "18m", "2y", "2.5y", "3y", "3.5y", "4y", "4.5y", "5y", "5.5y", "6y", "6.5y", "7y", "7.5y", "8y", "8.5y", "9y", "9.5y", "10y"]
print(f'Tenor\tSpot Rate\tDiscount Factor\tForward 6m Rate')
for i in range(20):
print(f'{tenors[i]}\t{term_structure.get_spot_rate(i):10.4f}\t{term_structure.get_discount_factor(i):10.4f}\t{term_structure.get_forward_6m_rate(i):10.4f}')
|
[
"noreply@github.com"
] |
Wangvory.noreply@github.com
|
c6156127858d51a22d6fb2d77f57845b5803867c
|
8bdcf59378fcb9fc9d6ac4a1e75d8dd689cf5262
|
/testpaper/migrations/0020_auto_20200907_1255.py
|
f6920a79e1cdb05f9b39bdc39ae4cbd5e74d9b1b
|
[] |
no_license
|
gauravmakode/backend
|
f1407a410e7d1276dee2fafb9b088a386cbafed1
|
746f8ab85255e61ae9d21f73eb2f72b08ff7692d
|
refs/heads/main
| 2023-01-04T22:23:52.143601
| 2020-10-27T17:26:44
| 2020-10-27T17:26:44
| 307,774,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.1 on 2020-09-07 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testpaper', '0019_auto_20200906_1323'),
]
operations = [
migrations.AlterField(
model_name='userresult',
name='test_name',
field=models.CharField(max_length=100),
),
]
|
[
"gouravmmakode002@gmail.com"
] |
gouravmmakode002@gmail.com
|
2cd2652921cd3489a23bca8b196de9bd001c7f0b
|
4ed9d9e6fcbf0382af37135a9eb0d4d6bab6df82
|
/utils/model_utils.py
|
d22ab9fa94989f20daa51daed43031bb83bd2bbf
|
[
"MIT"
] |
permissive
|
haowei772/nli_test_bed
|
51bf8cde77d859ac3f3f40a377f6e88c11ba44a5
|
ed104398e12354200f77b4eefac53e0caa744431
|
refs/heads/master
| 2020-04-15T20:01:46.068627
| 2018-10-09T16:48:06
| 2018-10-09T16:48:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
import copy
import seaborn
import torch.nn as nn
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def clones_sequential(module, N):
"Produce N identical layers."
return nn.Sequential(copy.deepcopy(module) for _ in range(N))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))
return out.view(size[0], size[1], -1)
class Linear(Bottle, nn.Linear):
pass
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
|
[
"amir.kargarb@gmail.com"
] |
amir.kargarb@gmail.com
|
81ac21d7b63b06175e93fa6285ede6e54ec7a47b
|
df997d83d44a50242f1f0468c48d6fe678194cd7
|
/dheerendra.py
|
53f5e53bc960f306b4b71b8ce4fe7aec08193f6b
|
[] |
no_license
|
ashsingh16/Ashish_test
|
85c84bb871c247c2499cf7840302a19bacb8ae26
|
4772e8051ce357605b12ef69fac85face3168bcc
|
refs/heads/master
| 2022-02-13T13:11:57.130637
| 2018-03-08T14:04:26
| 2018-03-08T14:05:57
| 124,188,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
# -*- coding: utf-8 -*-
import sys,re,os,time,signal,datetime
def signal_handler(signum,frame):
raise Exception("Time Out !!!!")
fh=open("exam_1", "w+")
a=datetime.datetime.now().hour*60+datetime.datetime.now().minute+datetime.datetime.now().second/60
result=[]
def exam():
print """Question 1 : In which one of the following page replacement policies, Belady’s anomaly may occur?
(A) FIFO (B) Optimal(C) LRU (D) MRU"""
ans=raw_input("Please Select the Correct Option from The above : ")
if ans.lower().strip() == "a".strip():
result.append("T")
else:
result.append("F")
print "Your Answers is Saved : "
b=datetime.datetime.now().hour*60+datetime.datetime.now().minute+datetime.datetime.now().second/60
print "Time Remaining for this test : ",30-(b-a),"Minutes"
print """Question 2 : Consider a main memory with five page frames and the following sequence of page references: 3, 8, 2, 3, 9, 1, 6, 3, 8, 9, 3, 6, 2, 1, 3. Which one of the following is true with respect to page replacement policies First In First Out(FIFO) and Least Recently Used(LRU)?
(A) Both incur the same number of page faults.(B) FIFO incurs 2 more page faults than LRU(C) LRU incurs 2 more page faults than FIFO(D) FIFO incurs 1 more page faults than LRU. """
print
ans=raw_input("Please Select the Correct Option from The above : ")
if ans.lower().strip() == "a".strip():
result.append("T")
else:
result.append("F")
print "Your Answers is Saved : "
b=datetime.datetime.now().hour*60+datetime.datetime.now().minute+datetime.datetime.now().second/60
print "Time Remaining for this test : ",30-(b-a),"Minutes"
print
print """Question 3 : A process refers to 5 pages, A, B, C, D and E in the following order A, B, C, D, A, B, E, A, B, C, D, E. If the page replacement algorithm is FIFO, the number of page transfer with an empty internal store of 3frames is:
A) 8 B) 10 C) 9 D) 7"""
print
ans=raw_input("Please Select the Correct Option from The above : ")
if ans.lower().strip() == "c".strip():
result.append("T")
else:
result.append("F")
print "Your Answers is Saved : "
b=datetime.datetime.now().hour*60+datetime.datetime.now().minute+datetime.datetime.now().second/60
print "Time Remaining for this test : ",30-(b-a),"Minutes"
print
print """Question 4 : A system has 3 processes sharing 4 resources. If each process needs a maximum of 2 units then :
A) deadlock can never occur B) deadlock may occur
C) deadlock has to occur D) None of the above"""
print
ans=raw_input("Please Select the Correct Option from The above : ")
if ans.lower().strip() == "a".strip():
result.append("T")
else:
result.append("F")
print "Your have answered all the question : "
print
print "You have Scored : ",result.count('T')*100/len(result),"%"
fh.write(" ".join(result)+"\n")
fh.close()
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(1800) # Ten seconds
try:
exam()
except Exception, msg:
fh.write(" ".join(result))
fh.close()
print "Timed out for this Test!"
|
[
"ashish301979@gmail.com"
] |
ashish301979@gmail.com
|
edcc2376fd7627d14edf000a76f493cfd3be1586
|
20cd34df5d9b2b5257f673c0abd577c7b64760f4
|
/boggle_gui.py
|
468534e278ea7f56aeadf5744013b9584ef78ac6
|
[] |
no_license
|
Michaliv/boggle-game
|
c2654dd07c57e806cf10660d7e52158042607f96
|
e2192dcf74e6253ee3255fb9f4697de8b9171127
|
refs/heads/master
| 2023-08-26T04:48:49.391464
| 2021-11-10T11:53:43
| 2021-11-10T11:53:43
| 397,701,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,540
|
py
|
###########################
# FILE: boggle.py
# WRITER: michal ivianski , michaliv , 207182452
# maya kedem, maya_kedem, 209544121
# EXERCISE: intro2cs1 ex12 2021
# DESCRIPTION: A program that runs Boggle game using GUI.
# WEB PAGES I USED: https://katzr.net/9da5f0
# https://katzr.net/d47847
##########################
import tkinter as tk
from ex12_utils import all_coords
from boggle_board_randomizer import randomize_board
class BoggleGui:
"""
The class which creates the GUI elements of the game
"""
# color palettes:
BUTTON_COLOR = "#3c8c9e"
BUTTON_ACTIVE_COLOR = "#0980ab"
CHECK_BUTTON_COLOR = "#c2f2fc"
DISPLAY_BARS_COLOR = "#bdbebf"
OUTER_FRAME_COLOR = "#879ded"
MINI_CANVAS_COLOR = "#a3cfc9"
BUTTON_STYLE = {"font": ("Cambria", 20), "borderwidth": 1, "relief":
tk.RAISED, "bg": BUTTON_COLOR, "activebackground": BUTTON_ACTIVE_COLOR}
# storing class data:
WORDS_FOUND = []
CUR_WORD = ""
buttons_dict = {}
# text messages:
INVALID_WORD = "Invalid word, try again!"
WORD_REP = "Oops, already found this word, try again!"
RULES = "1. A valid word is between 3 to 16 characters. \n" \
"2. If you choose a letter, the next letter could be only in the" \
" 8 cubes which are next to this letter in order for the word " \
"to be valid. \n" \
"3. You have 3 minutes to find as many words as possible. \n"\
"4. You get the power of 2 to the length of the word you found " \
"points. \n" \
"5. You don't get points for a word if you already found it. \n" \
"6. Every new round the points count goes back to zero. \n" \
"7. ENJOY!"
def __init__(self,activate_func, restart_activate, check_word_activate):
"""
The constructor of the GUI object of the game
:param activate_func: a function which activates the buttons
:param restart_activate: a function which activates the restart button
:param check_word_activate: a function which activates the check word
button
"""
root = tk.Tk()
root.title("Boggle Game")
root.resizable(False, False)
self._board = randomize_board()
self._main_window = root
self.activate = activate_func
self.restart_activate = restart_activate
self.check_word_activate = check_word_activate
self._pointscount = 0
# creates the display:
self._outer_frame = tk.Frame(root, bg=self.OUTER_FRAME_COLOR,
highlightbackground=self.OUTER_FRAME_COLOR,
highlightthickness=5)
self._outer_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self._display_label = tk.Label(root, font=("Castellar", 25),
bg=self.DISPLAY_BARS_COLOR, width=23,
relief="ridge",
text="BOGGLE GAME")
self._display_label.pack(side=tk.TOP, fill=tk.BOTH)
self._down_bar = tk.Label(root, font=("Courier", 30),
bg=self.DISPLAY_BARS_COLOR, width=23,
relief="ridge")
self._down_bar.pack(side=tk.BOTTOM, fill=tk.BOTH)
bg_image = tk.PhotoImage(file="codeblauw.png")
self.label = tk.Label(root, height=500, width=500, image=bg_image)
self.label.photo = bg_image
self.label.pack(fill=tk.BOTH)
self.mini_canvas = tk.Canvas(self.label, height=400, width=400,
bg=self.BUTTON_COLOR)
self.mini_canvas.place(relwidth=0.5, relheight=0.5, relx=0.5,
rely=0.5, anchor='c')
self._points = tk.Label(self.label, font=(30),
bg=self.DISPLAY_BARS_COLOR, width=23,
relief="ridge",
text="Points: " + str(self._pointscount))
self._points.place(relwidth=0.5, relheight=0.05, relx=0.5,
rely=0.02, anchor='n')
self._cur_word = tk.Label(self.label, font=(10),
bg=self.DISPLAY_BARS_COLOR,
width=23, relief="ridge", text=self.CUR_WORD)
self._cur_word.place(relwidth=0.5, relheight=0.1, relx=0.5, rely=0.1,
anchor='n')
self._check_word = tk.Button(self.label, bg=self.CHECK_BUTTON_COLOR,
activebackground=self.BUTTON_ACTIVE_COLOR,
text="check word", width=8, height=4)
self._check_word.place(relwidth=0.3, relheight=0.05, relx=0.5,
rely=0.2, anchor='n')
self._words_found = tk.Label(self.label, font=(20),
bg=self.DISPLAY_BARS_COLOR, width=23,
relief="ridge")
self._words_found.place(relwidth=0.8, relheight=0.2, relx=0.5,
rely=0.77, anchor='n')
self._quit = tk.Button(self._display_label, text="Quit",
command=self.close_window)
self._quit.place(relwidth=0.1, relheight=1, relx=0.9,
rely=0.02, anchor='n')
self._rules = tk.Button(self._display_label, text="Rules",
command=self.popup_window)
self._rules.place(relwidth=0.1, relheight=1, relx=0.1,
rely=0.02, anchor='n')
# variables of the countdown:
self.state = False
self.limit_minutes = 3
self.limit_seconds = 0
self.minutes = 3
self.seconds = 0
self.create_countdown()
#####################################################################
# creating the board and initializing the game:
def create_a_new_board(self):
"""
initizalizes a new board using a different function
:return: a list of lists (4*4) which represents the board
"""
return randomize_board()
def get_existing_board(self):
"""
returns the current board
:return: the current board (a list of lists)
"""
return self._board
def create_board(self, board):
"""
creates a matrix of buttons, each containing the char from the matching
index in the board object which was randomized.
:return: None
"""
for i in range(4):
tk.Grid.columnconfigure(self.mini_canvas, i, weight=1)
for j in range(4):
tk.Grid.rowconfigure(self.mini_canvas, j, weight=1)
board_dict = all_coords(board)
for coord, value in board_dict.items():
self.create_button(value, coord[0], coord[1])
def create_button(self, button_char, row, col):
"""
creates a grid of buttons which is the board of the game.
:param button_char: the char to appear on the button
:param row: x coord
:param col: y coord
:return: None
"""
button = tk.Button(self.mini_canvas, text=button_char,
**BoggleGui.BUTTON_STYLE)
button.grid(row=row, column=col, rowspan=1, columnspan=1,
sticky=tk.NSEW, ipadx=16, ipady=4.4)
self.buttons_dict[button] = [button_char, (row, col)]
def button_pressed(event):
button["bg"] = self.BUTTON_ACTIVE_COLOR
button.bind("<Button-1>", button_pressed)
def initialize_game(self):
"""
creates a new board object in the game when a new round starts
:return: None
"""
self._board = self.create_a_new_board()
self.create_board(self._board)
def create_countdown(self):
"""
creates a countdown clock
:return: None
"""
self.display = tk.Label(self._down_bar, height=30, width=30,
textvariable="", bg=self.DISPLAY_BARS_COLOR)
self.display.config(text="00:00", font=("Castellar", 30))
self.display.place(relwidth=1, relheight=1, relx=0.5, rely=0.1,
anchor='n')
if self.state == False:
self.start_button = \
tk.Button(self.display, bg=self.BUTTON_COLOR,
activebackground=self.BUTTON_ACTIVE_COLOR,
text="Start", width=8, height=4,
command=self.start)
self.start_button.place(relwidth=0.5, relheight=1, relx=0.25,
rely=0.5,anchor='w')
self.countdown()
def countdown(self):
"""
The function which displays the countdown
:return: None
"""
if self.state == True:
if self.seconds < 10:
if self.minutes < 10:
self.display.config(text="0%d : 0%d" % (self.minutes,
self.seconds))
else:
self.display.config(text="%d : 0%d" % (self.minutes,
self.seconds))
else:
if self.minutes < 10:
self.display.config(text="0%d : %d" % (self.minutes,
self.seconds))
else:
self.display.config(text="%d : %d" % (self.minutes,
self.seconds))
if (self.minutes == 0) and (self.seconds == 0):
self.display.config(text="Times up!")
self.end_of_game()
else:
if self.seconds == 0:
self.minutes -= 1
self.seconds = 59
else:
self.seconds -= 1
self.display.after(1000, self.countdown)
else:
self.display.after(100, self.countdown)
def start(self):
"""
the command of the start buttons, initializes the game
:return: None
"""
if self.state == False:
self.state = True
self.minutes = self.limit_minutes # restart minute count
self.seconds = self.limit_seconds # restart seconds count
self.initialize_game()
self.activate() # activate buttons board
self.check_word_activate() # activate check word button
self.start_button.destroy()
def popup_window(self):
"""
creates the popup window which is the rules of the game
:return: None
"""
window = tk.Toplevel()
label = tk.Label(window, text=self.RULES)
label.pack(fill='x', padx=50, pady=5)
button_close = tk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
#####################################################################
# setters and getters of the labels and buttons:
def set_points(self, points):
"""
sets the points board and points count
:param points: the updated point count of the player
:return: None
"""
self._points["text"] = "Points: " + str(points)
self._pointscount = points
def set_words_found_canvas(self, word):
"""
sets the board which shows the words that were found
:param word: the updated list of word that were found
:return: None
"""
words_found = ""
for i in word:
words_found += i + ", "
self._words_found["text"] = words_found
def set_word_to_check(self, char):
"""
sets the text in the current word to check display
:param char: the char to add to the display
:return: None
"""
self._cur_word["text"] = char
if char == self.INVALID_WORD or char == self.WORD_REP:
self._cur_word["fg"] = "red"
else:
self._cur_word["fg"] = "black"
def set_button_command(self, button_object, cmd):
"""
is given a button name and a command, find this button in the dict of
all buttons and configures this command to it
:param button_name: the name of the button
:param cmd: the command to configure
:return: None
"""
for key, value in self.buttons_dict.items():
if key == button_object:
button_object.configure(command=cmd)
def set_check_word_button_command(self, cmd):
"""
sets the command which belongs to the check word button
:param cmd: the command
:return: None
"""
self._check_word.config(command=cmd)
def get_buttons_dict(self):
"""
returns the dict of buttons
:return: the dict of buttons
"""
return self.buttons_dict
#####################################################################
# end of game:
def reset_board(self):
"""
resets all buttons which were pressed to their original color
:return: None
"""
for button in self.buttons_dict.keys():
button["bg"] = self.BUTTON_COLOR
def end_of_game(self):
"""
when the time runs out, clears all the boards, creates a restart button
and shows the player how many points he achieved.
:return: None
"""
self.create_restart_button()
self._points["text"] = "Congrats! You scored " +\
str(self._pointscount) + " Points"
self.set_word_to_check("")
self.set_words_found_canvas("")
self.clear_board()
self.restart_activate()
def create_restart_button(self):
"""
creates the restart button when the countdown ends
:return: None
"""
self.restart_button = tk.Button(self.display, bg=self.BUTTON_COLOR,
activebackground=self.BUTTON_ACTIVE_COLOR,
text="Play again?", width=8, height=4)
self.restart_button.place(relwidth=0.5, relheight=1, relx=0.25,
rely=0.5, anchor='w')
def set_restart_command(self, cmd):
"""
sets the command of the restart button
:param cmd: the command
:return: None
"""
if self.state == True:
self.state = False
self.restart_button["command"] = cmd
def set_state_to_false(self):
"""
sets the state(flag) to False
:return: None
"""
self.state = False
def clear_board(self):
"""
when the countdown ends, clears the buttons boards
:return: None
"""
for button_object in self.buttons_dict.keys():
button_object["text"] = ""
button_object["bg"] = self.BUTTON_COLOR
button_object.bind("<Button-1>", "disabled")
button_object.forget()
def close_window(self):
self._main_window.destroy()
#####################################################################
# run game:
def run(self):
"""
runs the mainloop of the game
:return: None
"""
self._main_window.mainloop()
|
[
"michal.ivianski@mail.huji.ac.il"
] |
michal.ivianski@mail.huji.ac.il
|
eafbc12700a5b5ca6d92534e9b63be154bb929a5
|
cef874e7aa937b3b7c2d16dd7468a676f4bdf479
|
/day76/bbs/blog/views.py
|
befc6ec033304dcfb1efeb269d90d0c5bbc04b93
|
[] |
no_license
|
YB947624487/RBAC
|
88e8a79985ba78874c6346e32de02f0ca26641d6
|
cc0c13ad666d3a61d38b06b6f027d53816d82e1e
|
refs/heads/master
| 2020-08-07T10:39:29.962517
| 2019-10-07T15:31:59
| 2019-10-07T15:31:59
| 213,415,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,395
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from django.http import JsonResponse
from django.contrib import auth
from geetest import GeetestLib
from blog import forms, models
# Create your views here.
# VALID_CODE = ""
# 自己生成验证码的登录
# def login(request):
# # if request.is_ajax(): # 如果是AJAX请求
# if request.method == "POST":
# # 初始化一个给AJAX返回的数据
# ret = {"status": 0, "msg": ""}
# # 从提交过来的数据中 取到用户名和密码
# username = request.POST.get("username")
# pwd = request.POST.get("password")
# valid_code = request.POST.get("valid_code") # 获取用户填写的验证码
# print(valid_code)
# print("用户输入的验证码".center(120, "="))
# if valid_code and valid_code.upper() == request.session.get("valid_code", "").upper():
# # 验证码正确
# # 利用auth模块做用户名和密码的校验
# user = auth.authenticate(username=username, password=pwd)
# if user:
# # 用户名密码正确
# # 给用户做登录
# auth.login(request, user)
# ret["msg"] = "/index/"
# else:
# # 用户名密码错误
# ret["status"] = 1
# ret["msg"] = "用户名或密码错误!"
# else:
# ret["status"] = 1
# ret["msg"] = "验证码错误"
#
# return JsonResponse(ret)
# return render(request, "login.html")
# 使用极验滑动验证码的登录
def login(request):
# if request.is_ajax(): # 如果是AJAX请求
if request.method == "POST":
# 初始化一个给AJAX返回的数据
ret = {"status": 0, "msg": ""}
# 从提交过来的数据中 取到用户名和密码
username = request.POST.get("username")
pwd = request.POST.get("password")
# 获取极验 滑动验证码相关的参数
gt = GeetestLib(pc_geetest_id, pc_geetest_key)
challenge = request.POST.get(gt.FN_CHALLENGE, '')
validate = request.POST.get(gt.FN_VALIDATE, '')
seccode = request.POST.get(gt.FN_SECCODE, '')
status = request.session[gt.GT_STATUS_SESSION_KEY]
user_id = request.session["user_id"]
if status:
result = gt.success_validate(challenge, validate, seccode, user_id)
else:
result = gt.failback_validate(challenge, validate, seccode)
if result:
# 验证码正确
# 利用auth模块做用户名和密码的校验
user = auth.authenticate(username=username, password=pwd)
if user:
# 用户名密码正确
# 给用户做登录
auth.login(request, user)
ret["msg"] = "/index/"
else:
# 用户名密码错误
ret["status"] = 1
ret["msg"] = "用户名或密码错误!"
else:
ret["status"] = 1
ret["msg"] = "验证码错误"
return JsonResponse(ret)
return render(request, "login2.html")
def index(request):
return render(request, "index.html")
# 获取验证码图片的视图
def get_valid_img(request):
# with open("valid_code.png", "rb") as f:
# data = f.read()
# 自己生成一个图片
from PIL import Image, ImageDraw, ImageFont
import random
# 获取随机颜色的函数
def get_random_color():
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
# 生成一个图片对象
img_obj = Image.new(
'RGB',
(220, 35),
get_random_color()
)
# 在生成的图片上写字符
# 生成一个图片画笔对象
draw_obj = ImageDraw.Draw(img_obj)
# 加载字体文件, 得到一个字体对象
font_obj = ImageFont.truetype("static/font/kumo.ttf", 28)
# 开始生成随机字符串并且写到图片上
tmp_list = []
for i in range(5):
u = chr(random.randint(65, 90)) # 生成大写字母
l = chr(random.randint(97, 122)) # 生成小写字母
n = str(random.randint(0, 9)) # 生成数字,注意要转换成字符串类型
tmp = random.choice([u, l, n])
tmp_list.append(tmp)
draw_obj.text((20+40*i, 0), tmp, fill=get_random_color(), font=font_obj)
print("".join(tmp_list))
print("生成的验证码".center(120, "="))
# 不能保存到全局变量
# global VALID_CODE
# VALID_CODE = "".join(tmp_list)
# 保存到session
request.session["valid_code"] = "".join(tmp_list)
# 加干扰线
# width = 220 # 图片宽度(防止越界)
# height = 35
# for i in range(5):
# x1 = random.randint(0, width)
# x2 = random.randint(0, width)
# y1 = random.randint(0, height)
# y2 = random.randint(0, height)
# draw_obj.line((x1, y1, x2, y2), fill=get_random_color())
#
# # 加干扰点
# for i in range(40):
# draw_obj.point((random.randint(0, width), random.randint(0, height)), fill=get_random_color())
# x = random.randint(0, width)
# y = random.randint(0, height)
# draw_obj.arc((x, y, x+4, y+4), 0, 90, fill=get_random_color())
# 将生成的图片保存在磁盘上
# with open("s10.png", "wb") as f:
# img_obj.save(f, "png")
# # 把刚才生成的图片返回给页面
# with open("s10.png", "rb") as f:
# data = f.read()
# 不需要在硬盘上保存文件,直接在内存中加载就可以
from io import BytesIO
io_obj = BytesIO()
# 将生成的图片数据保存在io对象中
img_obj.save(io_obj, "png")
# 从io对象里面取上一步保存的数据
data = io_obj.getvalue()
return HttpResponse(data)
# 请在官网申请ID使用,示例ID不可使用
pc_geetest_id = "b46d1900d0a894591916ea94ea91bd2c"
pc_geetest_key = "36fc3fe98530eea08dfc6ce76e3d24c4"
# 处理极验 获取验证码的视图
def get_geetest(request):
user_id = 'test'
gt = GeetestLib(pc_geetest_id, pc_geetest_key)
status = gt.pre_process(user_id)
request.session[gt.GT_STATUS_SESSION_KEY] = status
request.session["user_id"] = user_id
response_str = gt.get_response_str()
return HttpResponse(response_str)
# 注册的视图函数
def register(request):
if request.method == "POST":
ret = {"status": 0, "msg": ""}
form_obj = forms.RegForm(request.POST)
print(request.POST)
# 帮我做校验
if form_obj.is_valid():
# 校验通过,去数据库创建一个新的用户
form_obj.cleaned_data.pop("re_password")
avatar_img = request.FILES.get("avatar")
models.UserInfo.objects.create_user(**form_obj.cleaned_data, avatar=avatar_img)
ret["msg"] = "/index/"
return JsonResponse(ret)
else:
print(form_obj.errors)
ret["status"] = 1
ret["msg"] = form_obj.errors
print(ret)
print("=" * 120)
return JsonResponse(ret)
# 生成一个form对象
form_obj = forms.RegForm()
print(form_obj.fields)
return render(request, "register.html", {"form_obj": form_obj})
|
[
"947624484@qq.com"
] |
947624484@qq.com
|
ef18296744eaa63797c87bf223f1135977cbf646
|
c2666b4da3e35a1d7222cfa78bd8575bb11cfda2
|
/ticketmodifiedfiles/__init__.py
|
b807a5a2e730f52abc60b05cce6fc6b61fe9eb33
|
[] |
no_license
|
manicstar/trac-ticketmodifiedfiles
|
a2a383cfc5dbea93b515218908082ee166e3a015
|
0dbda18fde13083eaad18318c7a141806de5b9c0
|
refs/heads/master
| 2021-01-10T07:51:23.171184
| 2016-02-19T12:36:09
| 2016-02-19T12:36:09
| 52,072,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
# -*- coding: utf-8 -*-
# ticketmodifiedfiles module
#from ticketmodifiedfiles import *
import web_ui
import api
|
[
"manicstar@users.noreply.github.com"
] |
manicstar@users.noreply.github.com
|
58287c8f16ad52e7eb29b9100bdfe1a92737cd7f
|
0b909f1657083407bfedb9742770ae2e31211f5e
|
/AttentionModel/model/Image_AttModel.py
|
e431011b4894bb2d98183adfdd76f1e579a0f4cb
|
[] |
no_license
|
jwonged/Visual-Question-Answering
|
4ee3eef84f6d638699ae71bab21b3dac594ae13e
|
9f4e1b2c8bc91ca801747beceadca14f37752640
|
refs/heads/master
| 2021-09-14T15:14:35.725572
| 2018-05-15T13:10:24
| 2018-05-15T13:10:24
| 107,781,120
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,614
|
py
|
'''
Created on 15 Jan 2018
@author: jwong
'''
import csv
import json
import os
from model.Base_AttModel import BaseModel
from utils.model_utils import getPretrainedw2v
import tensorflow as tf
class ImageAttentionModel(BaseModel):
'''
VQA Model implementing attention over images
'''
def __init__(self, config):
super(ImageAttentionModel, self).__init__(config)
def _addPlaceholders(self):
# add network placeholder
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name="word_ids")
# shape = (batch size, img tensor dimensions)
self.img_vecs = tf.placeholder(tf.float32,
shape=[None, 512, 14, 14],
name="img_vecs")
# shape = (batch size)
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name="sequence_lengths")
# shape = (batch size, cancel(max length of sentence in batch))
self.labels = tf.placeholder(tf.int32, shape=[None], name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[], name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[], name="lr")
def _addEmbeddings(self):
#add word embeddings
with tf.variable_scope("words"):
if self.config.usePretrainedEmbeddings:
print('Using pretrained w2v embeddings')
pretrainedEmbeddings = getPretrainedw2v(self.config.shortenedEmbeddingsWithUNKFile)
wordEmbedsVar = tf.Variable(pretrainedEmbeddings,
name="wordEmbedsVar",
dtype=tf.float32,
trainable=self.config.trainEmbeddings)
else:
print('Using untrained embeddings')
wordEmbedsVar = tf.get_variable(
name='_word_embeddings',
shape=[self.config.vocabSize, self.config.wordVecSize],
dtype=tf.float32)
#embedding matrix, word_ids
self.word_embeddings = tf.nn.embedding_lookup(wordEmbedsVar,
self.word_ids, name="word_embeddings")
self.word_embeddings = tf.nn.dropout(self.word_embeddings, self.dropout)
def _addLSTMInput(self):
#Handle LSTM Input
print('Constructing imageAfterLSTM model')
self.LSTMinput = self.word_embeddings
def _addLSTM(self):
#LSTM part
with tf.variable_scope("lstm"):
if self.config.LSTMType == 'bi':
print('Using bi-LSTM')
cell_fw = tf.contrib.rnn.LSTMCell(self.config.LSTM_num_units)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.LSTM_num_units)
#Out [batch_size, max_time, cell_output_size] output, outputState
(_, _), (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw,
self.LSTMinput,
sequence_length=self.sequence_lengths, dtype=tf.float32)
print('Shape of state.c: {}'.format(fw_state.c.get_shape()))
#lstmOutput shape = LSTM_num_units * 4
#fw_out = tf.concat([fw_state.c, fw_state.h], axis=-1)
#bw_out = tf.concat([bw_state.c, bw_state.h], axis=-1)
#lstmOutput = tf.concat([fw_out, bw_out], axis=-1)
lstmOutput = tf.concat([fw_state.h, bw_state.h], axis=-1) #1024
print('Shape of LSTM output after concat: {}'.format(lstmOutput.get_shape()))
#lstm output 2048 --> 1024
lstmOutput = tf.layers.dense(inputs=lstmOutput,
units=self.config.fclayerAfterLSTM,
activation=tf.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
else:
print('Using Uni-LSTM')
#rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in self.config.LSTMCellSizes]
#multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
lstm_cell = tf.contrib.rnn.LSTMCell(self.config.LSTM_num_units)
_, lstmOutState = tf.nn.dynamic_rnn(cell=lstm_cell,
inputs=self.word_embeddings,
sequence_length=self.sequence_lengths,
initial_state=None,
dtype=tf.float32)
lstmOutput = lstmOutState.c #output state 512
#lstmOutput = tf.concat([lstmOutState.c, lstmOutState.h], axis=-1) #1024
self.lstmOutput = tf.nn.dropout(lstmOutput, self.dropout)
def construct(self):
self._addPlaceholders()
self._addEmbeddings()
self._addLSTMInput()
self.batch_size = tf.shape(self.img_vecs)[0]
print('Batch size = {}'.format(self.batch_size))
#reshape image features [bx512x14x14] --> [bx196x512]
transposedImgVec = tf.transpose(self.img_vecs, perm=[0,3,2,1]) #bx14x14x512
print('transposedImgVec = {}'.format(transposedImgVec.get_shape()))
self.flattenedImgVecs = tf.reshape(transposedImgVec, [self.batch_size, 196, 512])
self._addLSTM()
#########Attention layer##########
with tf.variable_scope("attention"):
self.lstmOutput = tf.layers.dense(inputs=self.lstmOutput,
units=1024,
activation=tf.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
#duplicate qn vec to combine with each region to get [v_i, q]
qnAtt_in = tf.expand_dims(self.lstmOutput, axis=1)
qnAtt_in = tf.tile(qnAtt_in, [1,tf.shape(self.flattenedImgVecs)[1],1])
print('Shape of attention input : {}'.format(tf.shape(qnAtt_in)))
att_in = tf.concat([self.flattenedImgVecs, qnAtt_in], axis=-1) #[bx196x1536]
print('Shape of attention input : {}'.format(att_in.get_shape()))
#compute attention weights
''''w = tf.get_variable('w',
shape=[att_in.get_shape()[-1], att_in.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b',
shape=[att_in.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer())
print('Shape of attention weight matrix: {}'.format(w.get_shape()))
print('Shape of attention bias : {}'.format(b.get_shape()))'''
#beta * tanh(wx + b) -- get a scalar val for each region
print('att_in shape: {}'.format(att_in.get_shape()))
att_f = tf.layers.dense(att_in, units=att_in.get_shape()[-1],
activation=tf.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer()) #1536
print('att_f = {}'.format(att_f.get_shape()))
print('att_f = {}'.format(tf.shape(att_f)))
beta_w = tf.get_variable("beta", shape=[att_f.get_shape()[-1], 1], dtype=tf.float32) #1536,1
att_flat = tf.reshape(att_f, shape=[-1, att_f.get_shape()[-1]]) #[b*196, 1536]
att_flatWeights = tf.matmul(att_flat, beta_w) #get scalar for each batch, region [b*196]
print('att_flatWeights = {}'.format(att_flatWeights.get_shape()))
att_regionWeights = tf.reshape(att_flatWeights, shape=[-1, 196]) #[b, 196]
print('Region weights = {}'.format(att_regionWeights.get_shape()))
#compute context: c = sum alpha * img
if self.config.attentionFunc == 'softmax':
self.alpha = tf.nn.softmax(att_regionWeights, name='alpha') # [b,196]
elif self.config.attentionFunc == 'sigmoid':
print('Using sigmoid attention function')
unnorm_alpha = tf.nn.sigmoid(att_regionWeights, name='alpha')
norm_denominator = tf.expand_dims(
tf.reduce_sum(unnorm_alpha, axis=-1), axis=-1)
self.alpha = unnorm_alpha / norm_denominator
else:
raise NotImplementedError
alpha = tf.expand_dims(self.alpha, axis=-1)
#broadcast; output shape=[bx1024 or bx1536]
self.imgContext = tf.reduce_sum(tf.multiply(alpha, self.flattenedImgVecs), axis=1)
#Handle output according to model structure
if self.config.modelStruct == 'imagePerWord':
self.multimodalOutput = self.lstmOutput
elif self.config.modelStruct == 'imageAsFirstWord':
self.multimodalOutput = self.lstmOutput
else: #imageAfterLSTM
if self.config.elMult:
print('Using pointwise mult')
#1024 --> 512 or 1536 --> 1024
attended_img_vecs = tf.layers.dense(inputs=self.imgContext,
units=self.lstmOutput.get_shape()[-1],
activation=tf.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
#dropout after img mapping layer
attended_img_vecs = tf.nn.dropout(attended_img_vecs, self.dropout)
self.multimodalOutput = tf.multiply(self.lstmOutput, attended_img_vecs) #size=512
else: #using concat
print('Using concat')
self.multimodalOutput = tf.concat([self.lstmOutput, attended_img_vecs], axis=-1)
#fully connected layer
with tf.variable_scope("proj"):
hidden_layer2 = tf.layers.dense(inputs=self.multimodalOutput,
units=1000,
activation=tf.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
y = tf.layers.dense(inputs=hidden_layer2,
units=self.config.nOutClasses,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
print('Shape of y: {}'.format(y.get_shape()))
#predict & get accuracy
self.labels_pred = tf.cast(tf.argmax(tf.nn.softmax(y), axis=1), tf.int32, name='labels_pred')
is_correct_prediction = tf.equal(self.labels_pred, self.labels)
self.accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32), name='accuracy')
#define losses
crossEntropyLoss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y, labels=self.labels)
self.loss = tf.reduce_mean(crossEntropyLoss)
predProbs = tf.nn.softmax(y)
self.topK = tf.nn.top_k(predProbs, k=5, name='topK')
# Add to tensorboard
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("accuracy", self.accuracy)
self._addOptimizer()
#init vars and session
self._initSession()
def loadTrainedModel(self, restoreModel, restoreModelPath):
graph = super(ImageAttentionModel, self).loadTrainedModel(restoreModel, restoreModelPath)
self.alpha = graph.get_tensor_by_name('attention/alpha:0')
def solve(self, qn, img_id, processor):
qnAsWordIDsBatch, seqLens, img_vecs = processor.processInput(qn, img_id)
feed = {
self.word_ids : qnAsWordIDsBatch,
self.sequence_lengths : seqLens,
self.img_vecs : img_vecs,
self.dropout : 1.0
}
alphas, labels_pred = self.sess.run([self.alpha, self.labels_pred], feed_dict=feed)
return alphas[0], self.classToAnsMap[labels_pred[0]]
|
[
"dsjw2@cam.ac.uk"
] |
dsjw2@cam.ac.uk
|
782afe4b89cc895ef7db3febec0d8ea4d1fe68bd
|
48a522b031d45193985ba71e313e8560d9b191f1
|
/baekjoon/python/10845.py
|
03ba76a2e3bda466570074981b85970ee3ff655e
|
[] |
no_license
|
dydwnsekd/coding_test
|
beabda0d0aeec3256e513e9e0d23b43debff7fb3
|
4b2b4878408558239bae7146bb4f37888cd5b556
|
refs/heads/master
| 2023-09-04T12:37:03.540461
| 2023-09-03T15:58:33
| 2023-09-03T15:58:33
| 162,253,096
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
import sys
class myqueue():
def __init__(self):
self.queue = list()
self.count = 0
def push(self, num):
self.queue.append(num)
self.count += 1
def pop(self):
if self.count == 0:
return -1
else:
self.count -= 1
return self.queue.pop(0)
def size(self):
return self.count
def empty(self):
if self.count == 0:
return 1
else:
return 0
def front(self):
if self.empty() == 1:
return -1
else:
return self.queue[0]
def back(self):
if self.empty() == 1:
return -1
else:
return self.queue[self.count-1]
if __name__ == "__main__":
command_num = int(sys.stdin.readline())
myqueue = myqueue()
for _ in range(command_num):
command = sys.stdin.readline().strip().split(" ")
cmd = command[0]
if cmd == "push":
myqueue.push(command[1])
elif cmd == "pop":
print(myqueue.pop())
elif cmd == "size":
print(myqueue.size())
elif cmd == "empty":
print(myqueue.empty())
elif cmd == "front":
print(myqueue.front())
elif cmd == "back":
print(myqueue.back())
|
[
"dydwnsekd123@gmail.com"
] |
dydwnsekd123@gmail.com
|
f75aed436c24e9d2538883eb78396f37e7a6cd54
|
b30b7d7054b5d0cbe26193744f2413028f3572b5
|
/Tasks/Lebedev_Tasks/WH/Task2H/time.py
|
ae8fa6b36cb0c3fb6cd8932cb80654f0251751fa
|
[] |
no_license
|
LappoAndrey/M-PT1-37-21
|
92b5d6506d672c273455ed692218d1974e28b245
|
face0917445435fd6b4b52fe4074ae1008d39567
|
refs/heads/main
| 2023-05-29T20:13:17.196826
| 2021-06-11T15:33:55
| 2021-06-11T15:33:55
| 347,397,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,770
|
py
|
import time
#c_datetime = time.strftime("%H:%M")
#print(c_datetime)
c_datetime=input('Введите текущее время: ')
#create a list
c_datetime_list = c_datetime.split(":")
#separate the list
h = c_datetime_list[0]
m = c_datetime_list[1]
h_int=int(h)
m_int=int(m)
#Hours with endings
hours_WE={
"00" : 'первого',
"12" : 'первого',
"01" : 'второго',
"13" : 'второго',
"02" : 'третьего',
"14" : 'третьего',
"03" : 'четвертого',
"15" : 'четвертого',
"04" : 'пятого',
"16" : 'пятого',
"05" : 'шестого',
"17" : 'шестого',
"06" : 'седьмого',
"18" : 'седьмого',
"07" : 'восьмого',
"19" : 'восьмого',
"08" : 'девятого',
"20" : 'девятого',
"09" : 'десятого',
"21" : 'десятого',
"10" : 'одиннадцатого',
"22" : 'одиннадцатого',
"11" : 'двенадцатого',
"23" : 'двенадцатого',
"24" : 'первого'
}
#hours without endings
hours_WoE={
"00" : 'час',
"12" : 'час',
"01" : 'два',
"13" : 'два',
"02" : 'три',
"14" : 'три',
"03" : 'четыре',
"15" : 'четыре',
"04" : 'пять',
"16" : 'пять',
"05" : 'шесть',
"17" : 'шесть',
"06" : 'семь',
"18" : 'семь',
"07" : 'восемь',
"19" : 'восемь',
"08" : 'девять',
"20" : 'девять',
"09" : 'десять',
"21" : 'десять',
"10" : 'одиннадцать',
"22" : 'одиннадцать',
"11" : 'двенадцать',
"23" : 'двенадцать',
"24" : 'час',
}
#hours if 00 minutes
hours_IF00={
"01" : 'час',
"13" : 'час',
"02" : 'два',
"14" : 'два',
"03" : 'три',
"15" : 'три',
"04" : 'четыре',
"16" : 'четыре',
"05" : 'пять',
"17" : 'пять',
"06" : 'шесть',
"18" : 'шесть',
"07" : 'семь',
"19" : 'семь',
"08" : 'восемь',
"20" : 'восемь',
"09" : 'девять',
"21" : 'девять',
"10" : 'десять',
"22" : 'десять',
"11" : 'одиннадцать',
"23" : 'одиннадцать',
"12" : 'двенадцать',
"00" : 'двенадцать',
"24" : 'двенадцать',
}
#minutes till 39
minutes_T39 = {
"00" : "часов ровно",
"01" : "одна минута",
"02" : "две минуты",
"03" : "три минуты",
"04" : "четыре минуты",
"05" : "пять минут",
"06" : "шесть минут",
"07" : "семь минут",
"08" : "восемь минут",
"09" : "девять минут",
"10" : "десять минут",
"11" : "одиннадцать минут",
"12" : "двенадцать минут",
"13" : "тринадцать минут",
"14" : "четырнадцать минут",
"15" : "пятнадцать минут",
"16" : "шестнадцать минут",
"17" : "семнадцать минут",
"18" : "восемнадцать минут",
"19" : "девятнадцать минут",
"20" : "двадцать минут",
"21" : "двадцать одна минута",
"22" : "двадцать две минуты",
"23" : "двадцать три минуты",
"24" : "двадцать четыре минуты",
"25" : "двадцать пять минут",
"26" : "двадцать шесть минут",
"27" : "двадцать семь минут",
"28" : "двадцать восемь минут",
"29" : "двадцать девять минут",
"30" : "половина",
"31" : "тридцать одна минута",
"32" : "тридцать две минуты",
"33" : "тридцать три минуты",
"34" : "тридцать четыре минуты",
"35" : "тридцать пять минут",
"36" : "тридцать шесть минут",
"37" : "тридцать семь минут",
"38" : "тридцать восемь минут",
"39" : "тридцать девять минут",
}
#minutes between 39 and 59
minutes_B = {
"40" : "без двадцати минут",
"41" : "без девятнадцати минут",
"42" : "без восемнадцати минут",
"43" : "без семнадцати минут",
"44" : "без шестнадцати минут",
"45" : "без пятнадцати минут",
"46" : "без четырнадцати минут",
"47" : "без тринадцати минут",
"48" : "без двенадцати минут",
"49" : "без одиннадцати минут",
"50" : "без десяти минут",
"51" : "без девяти минут",
"52" : "без восьми минут",
"53" : "без семи минут",
"54" : "без шести минут",
"55" : "без пяти минут",
"56" : "без четырех минут",
"57" : "без трех минут",
"58" : "без двух минут",
"59" : "без одной минуты",
}
if 0 <= h_int <= 24 and 0 < m_int < 40:
print(minutes_T39[m], hours_WE[h])
elif 0 <= h_int <= 24 and 40<=m_int<=59:
print(minutes_B[m], hours_WoE[h])
elif 0 <= h_int <= 24 and m_int == 0:
print(hours_IF00[h], minutes_T39[m])
else:
print('Произошла ошибка')
|
[
"lebedev.023046@gmail.com"
] |
lebedev.023046@gmail.com
|
c232749d16c4bdf311f885b5125932b5fac40854
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/tlm/sero/runner/mes_zero_handle_debug.py
|
c86af0614314dbdd51dc12cbb8a2402f2613f6a9
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
from my_tf import tf
from taskman_client.wrapper import report_run
from tf_util.tf_logging import tf_logging, MuteEnqueueFilter
from tlm.model.mes_sel_var import MES_const_0_handle
from tlm.model_cnfig import JsonConfig
from tlm.sero.mes_mask_debug_model import model_fn_binary_classification_loss
from tlm.training.flags_wrapper import get_input_files_from_flags, show_input_files
from tlm.training.input_fn import input_fn_builder_classification
from tlm.training.train_config import TrainConfigEx
from tlm.training.train_flags import *
from tlm.training.train_flags import FLAGS
from trainer.tpu_estimator import run_estimator
@report_run
def main(_):
input_files = get_input_files_from_flags(FLAGS)
config = JsonConfig.from_json_file(FLAGS.model_config_file)
train_config = TrainConfigEx.from_flags(FLAGS)
show_input_files(input_files)
special_flags = FLAGS.special_flags.split(",")
special_flags.append("feed_features")
is_training = FLAGS.do_train
model_fn = model_fn_binary_classification_loss(
config,
train_config,
MES_const_0_handle,
)
input_fn = input_fn_builder_classification(input_files, FLAGS.max_d_seq_length, is_training, FLAGS,
num_cpu_threads=4,
repeat_for_eval=False)
if FLAGS.do_predict:
tf_logging.addFilter(MuteEnqueueFilter())
result = run_estimator(model_fn, input_fn)
return result
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("model_config_file")
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("run_name")
tf.compat.v1.app.run()
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
6fb21e9904ab1f37489b526de098d10c517a749a
|
df8233daf32997022c50dd26e93cdd13257657d7
|
/redelivery_model.py
|
0debddce544ca09fc46a39eef9a1dc7c5fcc37d9
|
[] |
no_license
|
gympohnpimol/Last-Mile-Logistics
|
57b65fe60c5a40318fd465b490090237443e3a7f
|
ae4f4022a2058d74d244e62f07a77611a754cd0e
|
refs/heads/master
| 2023-03-28T04:49:36.864120
| 2021-04-03T17:22:14
| 2021-04-03T17:22:14
| 331,338,075
| 19
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
import sys
import os
import numpy as np
import pandas as pd
import time
from docplex.mp.model import Model
from scipy.spatial import distance_matrix
# from scipy.spatial import distance_matrix
df = pd.read_csv("/Users/gym/Desktop/research/Benchmark_nSTW/lc101.csv")
Q = 10000
cust_size = df.shape[0]-1
n = [2, 6, 7, 10, 12, 13, 14, 15, 16, 17, 18]
N = [i for i in n]
V = [0] + N
K = [i for i in range(1,26)]
q = {i: 1 for i in N}
df2 = df.iloc[:, 1:3]
# df2.loc[n+1,:]=df2.loc[0,:]
dist_m = pd.DataFrame(distance_matrix(df2.values, df2.values),index=df2.index, columns=df2.index)
time_start = time.time()
mdl = Model()
A = [(i, j) for i in V for j in V]
d = {(i, j): dist_m[i][j] for i, j in A}
t = {(i, j): dist_m[i][j] for i, j in A}
x = mdl.binary_var_dict(A, name = "x")
s = mdl.continuous_var_dict(N, name = "s")
T = 480
u = mdl.continuous_var_dict(N, ub= Q, name= 'u')
# w = mdl.continuous_var_dict(W, name='w')
time = mdl.continuous_var_dict(V, ub = T, name= "time")
# print(time)
# Define objective function:
mdl.minimize(mdl.sum(((t[i,j]*0.18)+(d[i,j]*0.22))*x[i,j] for i, j in A))
# mdl.minimize(mdl.sum((d[i,j])*x[i,j] for i, j in A))
# mdl.minimize(mdl.sum((t[i,j])*x[i,j] for i, j in A))
# Add constraints:
mdl.add_constraints(mdl.sum(x[i,j] for j in V if j != i) == 1 for i in N) # Each point must be visited
mdl.add_constraints(mdl.sum(x[i,j] for i in V if i != j) == 1 for j in N) # Each point must be left
mdl.add_indicator_constraints(mdl.indicator_constraint(x[i,j], u[i]+q[j] == u[j]) for i,j in A if i!=0 and j!=0)
mdl.add_constraints(u[i] >= q[i] for i in N)
mdl.add_indicator_constraints(mdl.indicator_constraint(x[i,j], time[i]+t[i,j] + 5 == time[j]) for i,j in A if i != 0 and j != 0)
mdl.parameters.timelimit.set(10)
#Solving model:
solution = mdl.solve(log_output=True)
print(solution)
print(solution.solve_status)
# active_arcs =[a for a in A if x[a].solution_value> 0.8]
# print(active_arcs)
print(len(n))
|
[
"gympohnpimol@gmail.com"
] |
gympohnpimol@gmail.com
|
dde78d0e969fb24139d8b9d41e9e210b9753402d
|
621dd2995f7b7a7ca814d15f37f86f98ed4cbbfe
|
/Game1.py
|
bb52e48b16128c47a4702550a41b411c5f079815
|
[] |
no_license
|
AkshayAlt/FinalArcade
|
68ccefe5586bbd345dceaf5e3ce8b0fb752f8b85
|
ea9b69e0b54fae3e25323da5354bb190aa3e1307
|
refs/heads/master
| 2022-04-22T16:42:26.754186
| 2020-04-12T15:37:53
| 2020-04-12T15:37:53
| 255,114,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,455
|
py
|
import os, sys
import pygame
import random
import time
pygame.init()
green = [0,255,0]
black = [100,0,0]
white = [255,255,255]
red = [255,0,0]
blue = [0,0,255]
window_width = 800
window_height = 600
gameDisplay = pygame.display.set_mode((window_width,window_height))
pygame.display.set_caption("Snake")
def gquit():
pygame.quit()
sys.exit(0)
clock = pygame.time.Clock()
FPS = 5
blockSize = 20
noPixel = 0
font = pygame.font.SysFont(None, 25, bold=True)
def drawGrid():
sizeGrd = window_width // blockSize
def snake(blockSize, snakeList):
for size in snakeList:
pygame.draw.rect(gameDisplay,black,[size[0]+5,size[1],blockSize,blockSize],2)
def msg_display(msg,color):
screen_text = font.render(msg,True,color)
gameDisplay.blit(screen_text,[window_width//2,window_height//2])
def score_display(score):
scorems = "Score: "+str(score)
score_text = font.render(scorems,True,red)
gameDisplay.blit(score_text,[15,window_height-15])
def snakeGameLoop():
gameExit = False
gameOver = False
leadX = window_width/2
leadY = window_height/2
pixelchangeX = 0
pixelchangeY = 0
snakelist = []
snakelength = 1
score = 0
randappleX = round(random.randrange(0, window_width-blockSize)/10.0)*10.0
randappleY = round(random.randrange(0, window_height-blockSize)/10.0)*10.0
while(not gameExit):
while(gameOver == True):
gameDisplay.fill(white)
msg_display("Hit space to play and esc to quit",red)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = False
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
snakeGameLoop()
if event.key == pygame.K_ESCAPE:
gameExit = True
gameOver = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
left = event.key == pygame.K_a
right = event.key == pygame.K_d
up = event.key == pygame.K_w
down = event.key == pygame.K_s
if left:
pixelchangeX = -blockSize
pixelchangeY = noPixel
elif right:
pixelchangeX = blockSize
pixelchangeY = noPixel
elif up:
pixelchangeY = -blockSize
pixelchangeX = noPixel
elif down:
pixelchangeY = blockSize
pixelchangeX = noPixel
if leadX >= window_width or leadX < 0 or leadY >= window_height or leadY < 0:
gameOver = True
leadX += pixelchangeX
leadY += pixelchangeY
gameDisplay.fill(white)
AppleThickness = 20
print([int(randappleX),int(randappleY),AppleThickness,AppleThickness])
pygame.draw.rect(gameDisplay,red,[randappleX,randappleY,AppleThickness,AppleThickness])
asp = []
asp.append(leadX)
asp.append(leadY)
snakelist.append(asp)
if len(snakelist) > snakelength:
del snakelist[0]
for seg in snakelist[:-1]:
if seg == asp:
gameOver = True
snake(blockSize,snakelist)
score_display(score)
pygame.display.update()
if leadX >= randappleX and leadX <= randappleX+AppleThickness:
if leadY >= randappleY and leadY <= randappleY+AppleThickness:
randappleX = round(random.randrange(0,window_width-blockSize)/10.0)*10.0
randappleY = round(random.randrange(0,window_height-blockSize)/10.0)*10.0
snakelength += 1
score += 1
clock.tick(FPS)
pygame.quit()
gquit()
|
[
"noreply@github.com"
] |
AkshayAlt.noreply@github.com
|
4b2291a81c251957eab2419c9c40272462bbfcb3
|
a6df2f31e8ac71529491b6de5989dafc3447cd04
|
/44/pentagonNumbers.py
|
8972c5b0473200ea1ce3c695a404257c26df0976
|
[] |
no_license
|
triplejay2013/ProjectEuler
|
fcc32a135dc83569e9f0fb82cc32c0c0e9acae16
|
09a011bf22bf9fbebdd34373cd177b65543939e2
|
refs/heads/master
| 2021-04-30T04:42:31.398547
| 2018-11-08T23:01:49
| 2018-11-08T23:01:49
| 121,542,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
# Pentagonal numbers are generated by the formula, P(n)=n(3n−1)/2. The first ten
# pentagonal numbers are:
#
# 1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
#
# It can be seen that P(4) + P(7) = 22 + 70 = 92 = P(8). However, their difference, 70
# − 22 = 48, is not pentagonal.
#
# Find the pair of pentagonal numbers, P(j) and P(k), for which their sum and
# difference are pentagonal and D = |P(k) − P(j)| is minimised; what is the value of
# D?
from time import *
from math import sqrt
# REF: http://www.divye.in/2012/07/how-do-you-determine-if-number-n-is.html
# This provides a formula for discovering if a number is pentagonal
def isPent(n):
return sqrt(24*n+1)%6 == 5
while True:
n=int(input("Enter num of pents to generate (3000): "))
start=clock()
pent = [int((n*(3*n-1))/2) for n in range(1,n)] # generate pentagonal numbers
D=float("inf") # Keep track of minimum values
kmin=0
jmin=0
for pk in range(1,len(pent)): # start one ahead of pent
for pj in range(pk):
pentNum=pent[pk]+pent[pj]
if isPent(pentNum):
tmp = pent[pk] - pent[pj]
if isPent(tmp) and tmp < D:
jmin=pent[pj]
kmin=pent[pk]
D=tmp
print("Found Min val! {}".format(D))
print("Minimum value for D is {} for pk={} and pj={}".format(D, kmin, jmin))
print("Program took {} seconds to run".format(clock()-start))
|
[
"triplejay2013@gmail.com"
] |
triplejay2013@gmail.com
|
e15318eec56ae80e8449fe36614c11cab71997af
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_068/ch1_2020_04_01_00_28_02_782231.py
|
c3f7b5170aa04c0fd8eef7a720671cf05649681c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
def calcula_valor_devido(valor_emprestado, número_de_meses, taxa_de_juros):
a = (valor_emprestado) * ((1 + taxa_de_juros) ** número_de_meses)
return a
resultado = calcula_valor_devido(10, 1, 0.2)
print(resultado)
|
[
"you@example.com"
] |
you@example.com
|
8dcb5177d7252d183b9810c7a7d1f52501f1e0f3
|
fa3b5a7293ffcb8f0bb6fe744e74f9192330d590
|
/my_testing.py
|
382144b051b7c4d41430987cb8e68b0da7dfdea1
|
[] |
no_license
|
Alvaru89/crypto_RL_project
|
7c749a8760cf5ed9c072300a210649ca54a7ad59
|
d8b5b82de14672c78acccbebba85134577bef718
|
refs/heads/master
| 2023-07-11T16:24:03.579200
| 2021-08-12T10:21:49
| 2021-08-12T10:21:49
| 394,950,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import numpy as np
from my_env import Crypto_Env
import random
start_date="2014-01-01"
end_date="2021-07-06"
trial_dict = {'try': [],
'prob_buy': [],
'end_wallet':[],
'max_reached':[]}
for i in range(0,100): #100 trials
env = Crypto_Env() # initialize our environment
prob_buy = random.uniform(0.05,0.45)
prob_sell=prob_buy
prob_hold=1-prob_buy-prob_sell
for days in range(2715): #days/steps between start and end date
random_action = np.random.choice(np.arange(0, 3), p=[prob_buy, prob_sell, prob_hold])
# random_coin = random.randint(0,3)
amount = 2 #??????
action_w = np.array([int(random_coin), int(random_action), amount])
env.step(action_w)
# env.render() no rendering required
#recording results for later analysis and tuning
trial_dict["try"].append(i)
trial_dict['prob_buy'].append(prob_buy)
trial_dict['end_wallet'].append([-1])
trial_dict['max_reached'].append(max())
trial_historic=
pd.DataFrame.from_dict(trial_dict).to_csv("output/trials_summary.csv")
|
[
"alvaro.rodriguezdelgado@gmail.com"
] |
alvaro.rodriguezdelgado@gmail.com
|
8ba5356bb4ab76f98101238c56a70d702bc4d8ab
|
96a34a048c783a75736bf0ec775df22142f9ee53
|
/services/web/server/src/simcore_service_webserver/studies_dispatcher/__init__.py
|
da93464a56f2cc9d34ea34d4cf4cb80fe01cf85b
|
[
"MIT"
] |
permissive
|
ITISFoundation/osparc-simcore
|
77e5b9f7eb549c907f6ba2abb14862154cc7bb66
|
f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63
|
refs/heads/master
| 2023-08-31T17:39:48.466163
| 2023-08-31T15:03:56
| 2023-08-31T15:03:56
| 118,596,920
| 39
| 29
|
MIT
| 2023-09-14T20:23:09
| 2018-01-23T10:48:05
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
"""
This app module dispatches pre-configured or on-the-fly studies to a user from a permalink
that is redirected to the front-end
"""
|
[
"noreply@github.com"
] |
ITISFoundation.noreply@github.com
|
60fcecd50b8626d70912fffe094466339b77dc16
|
4370f6d89b01e06cdeacf25a464c3455fcefe215
|
/src/kondisi_if/kondisi_ketiga.py
|
6dcdf03f0f78ebd999f583343b41b846813fcb64
|
[] |
no_license
|
frestea09/Python
|
7d4ba210be327dedead395965cc6160f24a29b43
|
6fa1abeb419970506c9ebbd9ac4775333a68d97b
|
refs/heads/master
| 2020-04-25T19:48:01.702084
| 2019-03-08T14:17:53
| 2019-03-08T14:17:53
| 173,032,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
#name-file : kondisi_pertama.py
from __future__ import print_function
def main():
#input
inputKalimatPertama = input('Kalimat Pertama : ')
inputKalimatKedua = input('Kalimat Kedua : ')
#membuat satu variabel penampung
hasilPembanding = " "
if(inputKalimatPertama==inputKalimatKedua):
hasilPembanding = "Kalimat Pertama Sama dengan Kalimat Kedua"
else:
hasilPembanding = "Kalimat Pertama tidak sama dengan Kalimat Kedua"
#menampilkan Variabel dan hasil
print("==========================")
print("=========Hasil============")
print("==========================")
print("Kalimat Pertama = %s "%(inputKalimatPertama))
print("Kalimat Kedua = %s"%(inputKalimatKedua))
print("Hasil Perbandingan : %s"%(hasilPembanding))
if __name__ == "__main__":
main()
|
[
"ilmanfrasetya@gmail.com"
] |
ilmanfrasetya@gmail.com
|
9b82e4e6bc26f150094c12115fbbac9688d3cf99
|
11d2d067f5103acf844377236a525de908b29e1f
|
/0x01-python-if_else_loops_functions/2-print_alphabet.py
|
3b60af6d598440521a9b17c1aabc158544114651
|
[] |
no_license
|
jagrvargen/holbertonschool-higher_level_programming
|
0a9815c08cea848b5190f4acd4adb44b89cd3360
|
510c52c5f48b05aaed62b8f9add90af8dbef909c
|
refs/heads/master
| 2021-09-14T09:07:25.930617
| 2018-05-11T00:11:10
| 2018-05-11T00:11:10
| 113,070,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
#!/usr/bin/python3
for l in range(97, 123):
print('{}'.format(chr(l)), end='')
|
[
"229@holbertonschool.com"
] |
229@holbertonschool.com
|
66adda6a51cfd981791f7639a62eccbeb494543c
|
33824a028810a1ea0cbe16cd1bdfe9b55f6b275b
|
/Week 5- Object Oriented Programming/9. Classes and Inheritance/Exercise- Spell
|
a673467f7ed13f4a9df74636d480ab4ce7dfbbec
|
[] |
no_license
|
kavithacm/MITx-6.00.1x-
|
ff56dc254e2516b621aa1bbedd3b916d336081c7
|
ae18cb9dfc3e5241ba603d4ca16cba13c5f8e334
|
refs/heads/master
| 2021-07-08T05:10:31.710929
| 2020-08-04T17:29:58
| 2020-08-04T17:29:58
| 155,975,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 14:35:16 2018
@author: Kavitha
"""
'''
Exercise: spell
9.0/9 points (graded)
ESTIMATED TIME TO COMPLETE: 12 minutes
Consider the following code:
class Spell(object):
def __init__(self, incantation, name):
self.name = name
self.incantation = incantation
def __str__(self):
return self.name + ' ' + self.incantation + '\n' + self.getDescription()
def getDescription(self):
return 'No description'
def execute(self):
print(self.incantation)
class Accio(Spell):
def __init__(self):
Spell.__init__(self, 'Accio', 'Summoning Charm')
class Confundo(Spell):
def __init__(self):
Spell.__init__(self, 'Confundo', 'Confundus Charm')
def getDescription(self):
return 'Causes the victim to become confused and befuddled.'
def studySpell(spell):
print(spell)
spell = Accio()
spell.execute()
studySpell(spell)
studySpell(Confundo())
How do we need to modify Accio so that print(Accio()) will print the following description?
"Summoning Charm Accio
This charm summons an object to the caster, potentially over a significant distance."
'''
class Accio(Spell):
def __init__(self):
Spell.__init__(self, 'Accio', 'Summoning Charm')
def getDescription(self):
return 'This charm summons an object to the caster, potentially over a significant distance.'
##Correct
|
[
"kavithamohan0404@gmail.com"
] |
kavithamohan0404@gmail.com
|
|
98850ccfd0eb4e6595d4644d14f5ccf787eeb799
|
cfa4d505b10b0f8637210031a4709e2d99958e1d
|
/Histogram.py
|
8f461b02f00417676e829a655e2e6ce0fd165e7a
|
[] |
no_license
|
FominVlad/Computer-graphics
|
3d4ee0497c1c408bc566caae1a49db31cb46c7c2
|
943485bfcf7bbd465aba409e49570377d7be482c
|
refs/heads/master
| 2022-09-15T09:27:53.043384
| 2020-06-02T11:21:17
| 2020-06-02T11:21:17
| 268,129,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from PIL import Image
from pylab import *
img = Image.open('images/profile.jpg').convert('L')
img_array = array(img)
#img.show()
figure()
hist(img_array.flatten(),500)
show()
|
[
"vladislav.fomin@kizv.gov.ua"
] |
vladislav.fomin@kizv.gov.ua
|
3e554690085a9f9b019a352ca8d7d7968991c1d8
|
97a3f1f9ae76ddfcab4a6be9d4fe493a5a9e2727
|
/dataset_utils/Tokenizer.py
|
c1603f8fa58e68e41cfd4a92072cff157099772b
|
[] |
no_license
|
1cipher/NaiveBayesTextClassifier_AI
|
04737f9937cd0199842ea6fac124811c5e8d6428
|
626fda91494f244e7a6aff1f8c98bddf977f7516
|
refs/heads/main
| 2023-04-02T04:49:20.140663
| 2021-04-05T13:49:54
| 2021-04-05T13:49:54
| 331,957,409
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import string
import re
from nltk import word_tokenize
class Tokenizer(object):
def __init__(self):
self.regex = re.compile('[%s]' % re.escape(string.punctuation)) #Core function for tokenization
def __call__(self, doc):
stemmer = PorterStemmer()
clean = [self.regex.sub('',w) for w in word_tokenize(doc)]
return [stemmer.stem(t) for t in clean if( len(t)>2 and t not in stopwords.words('english'))]
|
[
"noreply@github.com"
] |
1cipher.noreply@github.com
|
a2a13a58086d60404137b9a201ee59c00653f220
|
ab1410b0c7de1b165199a457b24eae7c834acb93
|
/qrhunt/qrmaster/urls.py
|
40225e6a2d9a0f622c6bd5e13185ac0b4b47c449
|
[] |
no_license
|
augustoerico/qrhunt
|
d23b957a8efe31e14406ad481a6d254b489e1bfe
|
f44e93071534c5c3db75d07fb53af2d05cc808b6
|
refs/heads/master
| 2021-01-10T14:01:23.099385
| 2017-02-26T22:51:33
| 2017-02-26T22:51:33
| 43,642,441
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', views.do_login, name='login'),
url(r'^logout/$', views.do_logout, name='logout'),
url(r'^quests/(?P<pk>[0-9]+)/$', views.quest, name='quest'),
url(r'^quest/create/$', views.quest_create, name='quest_create'),
url(r'^quests/(?P<pk>[0-9]+)/delete/$', views.quest_delete, name='quest_delete'),
url(r'^quests/(?P<pk>[0-9]+)/edit/$', views.quest_edit, name='quest_edit'),
url(r'^hints/(?P<pk>[0-9a-z]+)/$', views.hint, name='hint'),
url(r'^hints/(?P<pk>[0-9a-z]+)/delete/$', views.hint_delete, name='hint_delete'),
url(r'^hints/(?P<pk>[0-9a-z]+)/edit/$', views.hint_edit, name='hint_edit'),
url(r'^quests/(?P<quest_pk>[0-9]+)/hint/create$', views.hint_create, name='hint_create'),
url(r'^account/create/$', views.account_create, name='account_create'),
]
|
[
"augusto.ericosilva@gmail.com"
] |
augusto.ericosilva@gmail.com
|
6c3901233177962dcbd90a56dcf59e086be9167b
|
12b8ecb48089aabf70f890b65115b79330794d44
|
/backend/manage.py
|
6d3daffa6e6e52a7b10ab519931e21e13c7cc550
|
[] |
no_license
|
crowdbotics-apps/vncopaken-insurance-21368
|
d186a9414fac8d003b0466c316e9e2f9cc3d8e4d
|
15740f76f03ccae4716b2e4a817dccd92f7035cf
|
refs/heads/master
| 2022-12-29T07:39:35.203942
| 2020-10-11T18:40:10
| 2020-10-11T18:40:10
| 303,188,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "vncopaken_insurance_21368.settings"
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
80bb4c4d4f30b02e1e473b7632e7bf3fe24f1724
|
658c5de92605229e569e39563f6fdaa8a08c807d
|
/2016/day16.py
|
19667b4f20c569eebc9a5267f985c44f82e339b5
|
[] |
no_license
|
bufordsharkley/advent_of_code
|
861a920cc5ec94bec44a1afd209cae32c56d3c62
|
a8baf06c792fe5397071ba7ce6526322b6cdea16
|
refs/heads/master
| 2021-01-10T12:54:38.752683
| 2017-11-17T02:10:35
| 2017-11-17T02:10:35
| 47,371,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
def translate(char):
if char == '1':
return '0'
elif char == '0':
return '1'
else:
return char
def checksum(text):
resp = []
for ii in range(0, len(text), 2):
if text[ii] == text[ii + 1]:
resp.append('1')
else:
resp.append('0')
resp = ''.join(resp)
if not len(resp) % 2:
return checksum(resp)
return resp
def dragon(text):
a = text
b = ''.join(reversed(a))
b = ''.join(translate(x) for x in b)
return '{}0{}'.format(a, b)
def fill_disk(length, text):
while len(text) < length:
text = dragon(text)
return checksum(text[:length])
def main():
assert dragon('1') == '100'
assert dragon('0') == '001'
assert dragon('11111') == '11111000000'
assert dragon('111100001010') == '1111000010100101011110000'
assert checksum('110010110100') == '100'
disk = fill_disk(length=20, text='10000')
print disk
print fill_disk(length=272, text='11101000110010100')
print fill_disk(length=35651584, text='11101000110010100')
if __name__ == "__main__":
main()
|
[
"bufordsharkley@gmail.com"
] |
bufordsharkley@gmail.com
|
fa17bd0ab2ddad5e39e110fbd035c6e644f077f3
|
39b75cc0944192abb7362551a828f99b13071cdb
|
/EBC/python/setup.py
|
42e1224b13785ae4c4a40e22f6a3dcf474d3d5e2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
wenzuochao/antchain-openapi-prod-sdk
|
afa34fbfa0bbfcff7ec3c4417ecbf56a381317ad
|
7b83983b880bf32025614479e680ad9c665b72da
|
refs/heads/master
| 2022-12-03T04:01:45.648840
| 2020-07-13T13:10:26
| 2020-07-13T13:10:26
| 284,925,309
| 0
| 0
|
MIT
| 2020-08-04T08:43:52
| 2020-08-04T08:43:51
| null |
UTF-8
|
Python
| false
| false
| 2,436
|
py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from setuptools import setup, find_packages
import os
"""
setup module for antchain_ebc.
Created on 13/07/2020
@author: Ant Chain SDK
"""
PACKAGE = "antchain_sdk_ebc"
NAME = "antchain_ebc"
DESCRIPTION = "Ant Chain EBC SDK Library for Python"
AUTHOR = "Ant Chain SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/alipay/antchain-openapi-prod-sdk"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
REQUIRES = ["antchain_alipay_util==0.0.2","alibabacloud_tea_util==0.1.0","alibabacloud_rpc_util==0.0.3"]
desc_file = open("README.md", encoding='utf-8')
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["antchain_ebc"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"Topic :: Software Development"
)
)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
4823e857f0fd6789502ebdbfa8fcc92232b23d74
|
e5b469b80def40cc0e1178a1306ad9517c83fd0c
|
/src/api_info.py
|
0a0598017b14d2f70e2416e16a93b909c7a02e77
|
[] |
no_license
|
seorakwon/project-mongodb-geopartial-queries
|
ebfc66f15016310e8ff9edfd4f9fec1780b3aab2
|
347bb5903d2a57e38dad87626c957319f332786f
|
refs/heads/master
| 2020-09-11T04:45:07.545477
| 2020-01-12T13:14:53
| 2020-01-12T13:14:53
| 221,942,738
| 0
| 0
| null | 2019-11-22T11:45:59
| 2019-11-15T14:42:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
import requests
import json
import sys
import os
from dotenv import load_dotenv
load_dotenv()
import pandas as pd
# yellp request function
def requestyelp(business):
yelp_api_key = os.getenv("YELP_API_KEY")
if not yelp_api_key:
raise ValueError("No key provided")
else:
print("The key provided is: ", yelp_api_key[0:5])
url='https://api.yelp.com/v3/businesses/search'
headers = {'Authorization': 'Bearer %s' % yelp_api_key}
info = []
for offset in range(0, 1000, 50):
params = {'limit': 50,'term':business,'location':'new york','offset': offset, 'radius':5000}
req=requests.get(url, params=params, headers=headers)
if req.status_code == 200:
info += req.json()['businesses']
elif req.status_code == 400:
print('400 Bad Request')
break
print('The status code is {}'.format(req.status_code))
return info
starbucks = requestyelp("starbucks")
vegan = requestyelp("vegan")
# function to get relevant columns out of the json file
def tabla(company):
info = []
for i in company:
venues={
'name': i['name'],
'categories': i['alias'],
'city': i['location']['city'],
'geolocation': {'type': 'Point', 'coordinates':[i['coordinates']['longitude'],i['coordinates']['latitude']]} }
info.append(venues)
company_info = pd.DataFrame(info)
return company_info
starbucks_df = tabla(starbucks)
vegan_df = tabla(vegan)
# filter for real Starbucks
starbucks_df = starbucks_df[['Starbucks' in x for x in starbucks_df['name']]]
vegan_df = vegan_df[['vegan' in x for x in vegan_df['name']]]
#output to json file
starbucks_df.to_json('./starbucks1_api.json', orient='records')
vegan_df.to_json('./vegan1_api.json', orient='records')
|
[
"seorakwon@gmail.com"
] |
seorakwon@gmail.com
|
fb5c88bbf473447647caa9171dec878fc534c6e2
|
9580cfa5e34137db9b9f5f4a1c5708539e60b667
|
/projects/DensePose/densepose/vis/densepose_outputs_vertex.py
|
a9c0c561dfef00e6df16a20ac0d17b7f7d07937a
|
[
"Apache-2.0"
] |
permissive
|
ivanpp/detectron2
|
31759b11f4c3ad8dd39ece557710b0a6522648f7
|
57e9a631bc9ab96c314af70a1aec1437d19300fb
|
refs/heads/windows
| 2021-08-15T09:06:42.004337
| 2021-08-01T08:39:44
| 2021-08-01T08:39:44
| 238,376,055
| 28
| 13
|
Apache-2.0
| 2021-01-16T15:23:09
| 2020-02-05T05:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 8,432
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import numpy as np
from functools import lru_cache
from typing import Dict, List, Optional, Tuple
import cv2
import torch
from detectron2.utils.file_io import PathManager
from densepose.modeling import build_densepose_embedder
from densepose.modeling.cse.utils import get_closest_vertices_mask_from_ES
from ..data.utils import get_class_to_mesh_name_mapping
from ..structures import DensePoseEmbeddingPredictorOutput
from ..structures.mesh import create_mesh
from .base import Boxes, Image, MatrixVisualizer
from .densepose_results_textures import get_texture_atlas
@lru_cache()
def get_xyz_vertex_embedding(mesh_name: str, device: torch.device):
if mesh_name == "smpl_27554":
embed_path = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/cse/mds_d=256.npy"
)
embed_map, _ = np.load(embed_path, allow_pickle=True)
embed_map = torch.tensor(embed_map).float()[:, 0]
embed_map -= embed_map.min()
embed_map /= embed_map.max()
else:
mesh = create_mesh(mesh_name, device)
embed_map = mesh.vertices.sum(dim=1)
embed_map -= embed_map.min()
embed_map /= embed_map.max()
embed_map = embed_map ** 2
return embed_map
class DensePoseOutputsVertexVisualizer(object):
def __init__(
self,
cfg,
inplace=True,
cmap=cv2.COLORMAP_JET,
alpha=0.7,
device="cuda",
default_class=0,
**kwargs,
):
self.mask_visualizer = MatrixVisualizer(
inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha
)
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.embedder = build_densepose_embedder(cfg)
self.device = torch.device(device)
self.default_class = default_class
self.mesh_vertex_embeddings = {
mesh_name: self.embedder(mesh_name).to(self.device)
for mesh_name in self.class_to_mesh_name.values()
if self.embedder.has_embeddings(mesh_name)
}
def visualize(
self,
image_bgr: Image,
outputs_boxes_xywh_classes: Tuple[
Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
],
) -> Image:
if outputs_boxes_xywh_classes[0] is None:
return image_bgr
S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
outputs_boxes_xywh_classes
)
for n in range(N):
x, y, w, h = bboxes_xywh[n].int().tolist()
mesh_name = self.class_to_mesh_name[pred_classes[n]]
closest_vertices, mask = get_closest_vertices_mask_from_ES(
E[[n]],
S[[n]],
h,
w,
self.mesh_vertex_embeddings[mesh_name],
self.device,
)
embed_map = get_xyz_vertex_embedding(mesh_name, self.device)
vis = (embed_map[closest_vertices].clip(0, 1) * 255.0).cpu().numpy()
mask_numpy = mask.cpu().numpy().astype(dtype=np.uint8)
image_bgr = self.mask_visualizer.visualize(image_bgr, mask_numpy, vis, [x, y, w, h])
return image_bgr
def extract_and_check_outputs_and_boxes(self, outputs_boxes_xywh_classes):
densepose_output, bboxes_xywh, pred_classes = outputs_boxes_xywh_classes
if pred_classes is None:
pred_classes = [self.default_class] * len(bboxes_xywh)
assert isinstance(
densepose_output, DensePoseEmbeddingPredictorOutput
), "DensePoseEmbeddingPredictorOutput expected, {} encountered".format(
type(densepose_output)
)
S = densepose_output.coarse_segm
E = densepose_output.embedding
N = S.size(0)
assert N == E.size(
0
), "CSE coarse_segm {} and embeddings {}" " should have equal first dim size".format(
S.size(), E.size()
)
assert N == len(
bboxes_xywh
), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format(
len(bboxes_xywh), N
)
assert N == len(pred_classes), (
"number of predicted classes {}"
" should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
)
return S, E, N, bboxes_xywh, pred_classes
def get_texture_atlases(json_str: Optional[str]) -> Optional[Dict[str, Optional[np.ndarray]]]:
"""
json_str is a JSON string representing a mesh_name -> texture_atlas_path dictionary
"""
if json_str is None:
return None
paths = json.loads(json_str)
return {mesh_name: get_texture_atlas(path) for mesh_name, path in paths.items()}
class DensePoseOutputsTextureVisualizer(DensePoseOutputsVertexVisualizer):
def __init__(
self,
cfg,
texture_atlases_dict,
device="cuda",
default_class=0,
**kwargs,
):
self.embedder = build_densepose_embedder(cfg)
self.texture_image_dict = {}
self.alpha_dict = {}
for mesh_name in texture_atlases_dict.keys():
if texture_atlases_dict[mesh_name].shape[-1] == 4: # Image with alpha channel
self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, -1] / 255.0
self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, :3]
else:
self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name].sum(axis=-1) > 0
self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name]
self.device = torch.device(device)
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.default_class = default_class
self.mesh_vertex_embeddings = {
mesh_name: self.embedder(mesh_name).to(self.device)
for mesh_name in self.class_to_mesh_name.values()
}
def visualize(
self,
image_bgr: Image,
outputs_boxes_xywh_classes: Tuple[
Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
],
) -> Image:
image_target_bgr = image_bgr.copy()
if outputs_boxes_xywh_classes[0] is None:
return image_target_bgr
S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
outputs_boxes_xywh_classes
)
meshes = {
p: create_mesh(self.class_to_mesh_name[p], self.device) for p in np.unique(pred_classes)
}
for n in range(N):
x, y, w, h = bboxes_xywh[n].int().cpu().numpy()
mesh_name = self.class_to_mesh_name[pred_classes[n]]
closest_vertices, mask = get_closest_vertices_mask_from_ES(
E[[n]],
S[[n]],
h,
w,
self.mesh_vertex_embeddings[mesh_name],
self.device,
)
uv_array = meshes[pred_classes[n]].texcoords[closest_vertices].permute((2, 0, 1))
uv_array = uv_array.cpu().numpy().clip(0, 1)
textured_image = self.generate_image_with_texture(
image_target_bgr[y : y + h, x : x + w],
uv_array,
mask.cpu().numpy(),
self.class_to_mesh_name[pred_classes[n]],
)
if textured_image is None:
continue
image_target_bgr[y : y + h, x : x + w] = textured_image
return image_target_bgr
def generate_image_with_texture(self, bbox_image_bgr, uv_array, mask, mesh_name):
alpha = self.alpha_dict.get(mesh_name)
texture_image = self.texture_image_dict.get(mesh_name)
if alpha is None or texture_image is None:
return None
U, V = uv_array
x_index = (U * texture_image.shape[1]).astype(int)
y_index = (V * texture_image.shape[0]).astype(int)
local_texture = texture_image[y_index, x_index][mask]
local_alpha = np.expand_dims(alpha[y_index, x_index][mask], -1)
output_image = bbox_image_bgr.copy()
output_image[mask] = output_image[mask] * (1 - local_alpha) + local_texture * local_alpha
return output_image.astype(np.uint8)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
2a0ca3b44cfea8ffd2ee7f2e77226ffcad22c6da
|
264b789b89dce7c55860463bacf9fb804e10c2fc
|
/view.py
|
78ce8a005defb5d9fee08f54f3d761574b35265c
|
[] |
no_license
|
rwu780/project_gui
|
b851137fb127820508ff5afb0179863b59687329
|
8f52f81c61513498b0344cb62fc05f90ce9ddbd7
|
refs/heads/master
| 2020-03-08T00:11:59.848099
| 2018-04-02T19:11:45
| 2018-04-02T19:11:45
| 127,800,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
import wx
import wx.xrc
import wx.grid as gridlib
OUTPUT_FILE = "output.txt"
class MainFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, id = wx.ID_ANY, title = title, pos = wx.DefaultPosition, size = wx.Size(500, 408), style = wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL )
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
# Variable Grid
var_Grid = gridlib.Grid(panel)
var_Grid.CreateGrid(7, 4)
var_Grid.SetColLabelValue(0, "Name")
var_Grid.SetColLabelValue(1, "Variance")
var_Grid.SetColLabelValue(2, "Mean")
var_Grid.SetColLabelValue(3, "Type of Distribution")
var_Grid.AutoSizeColumn(3)
self.var_Grid = var_Grid
sizer.Add(self.var_Grid, 1, wx.EXPAND)
# Equation Grid
eql_grid = gridlib.Grid(panel)
eql_grid.CreateGrid(6, 1)
eql_grid.SetColLabelValue(0, "Equation Input")
eql_grid.SetColSize(0, 350)
eql_grid.CanDragColSize(True)
eql_grid.EnableDragColSize(True)
self.eql_grid = eql_grid
sizer.Add(self.eql_grid, 1, wx.EXPAND)
button = wx.Button(panel, id=wx.ID_ANY, label="Save", style=wx.TE_CENTER)
button.Bind(wx.EVT_BUTTON, self.save)
sizer.Add(button)
panel.SetSizer(sizer)
def save(self, event):
with open(OUTPUT_FILE, 'w') as f:
f.writelines("---------- Variables ---------- \n")
for i in range(0, self.var_Grid.GetNumberRows()):
f.write("Var{}: ".format(str(i)))
for j in range(0, self.var_Grid.GetNumberCols()):
f.write("{} ".format(str(self.var_Grid.GetCellValue(i, j))))
f.writelines("\n")
f.writelines("----- End of Variables ----- \n")
f.writelines("\n---------- Equations ---------- \n")
for i in range(0, self.eql_grid.GetNumberRows()):
f.write("Eqn{}: ".format(str(i)))
for j in range(0, self.eql_grid.GetNumberCols()):
f.write("{} ".format(str(self.eql_grid.GetCellValue(i, j))))
f.writelines("\n")
f.writelines("----- End of Equations -----\n")
f.writelines("End Input\n")
print("Finish Writing to " + OUTPUT_FILE)
self.Close()
if __name__ == '__main__':
app = wx.App()
frame = MainFrame(None, "GUI")
frame.Show(True)
app.MainLoop()
|
[
"rwu4@ualberta.ca"
] |
rwu4@ualberta.ca
|
063495c24383602ceca3d91d5accd9543399bf73
|
6e857ce7f134284b520129cdb673a4581ecfbafa
|
/Data augmentation.py
|
42c6187fd1bc05b1b3a59d7604c2ec58538dc7fd
|
[] |
no_license
|
arkanivasarkar/Bacterial-Cell-Classification
|
31a8c0c4dbea649260a1c9ae0d068d31ba9e8dcd
|
64bd3c128e0e321ecc4c9ed60b0bba2b138696f8
|
refs/heads/main
| 2023-08-23T05:12:04.139475
| 2021-10-15T14:30:09
| 2021-10-15T14:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
import os
from keras.preprocessing.image import ImageDataGenerator,load_img,img_to_array
#data augmentation parameter
datagen = ImageDataGenerator(
rotation_range=120,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='reflect')
img_dir = 'E:/Bac cell classification/Bac img/' #image_directory
#augment gram pos bac imgs
gmpos_images = os.listdir(img_dir + 'gram_pos/')
for i, image_name in enumerate(gmpos_images):
if (image_name.split('.')[1] == 'jpg') or (image_name.split('.')[1] == 'JPG') or (image_name.split('.')[1] == 'jpeg') or (image_name.split('.')[1] == 'tif'):
img = load_img(img_dir + 'gram_pos/' + image_name)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size=1, save_to_dir='E:/Bac cell classification/Bac img/augmented/gm_pos', save_prefix='gmpos', save_format='jpg'):
i += 1
if i > 30:
break
#augment gram neg bac imgs
gmneg_images = os.listdir(img_dir + 'gram_neg/')
for i, image_name in enumerate(gmneg_images):
if (image_name.split('.')[1] == 'jpg') or (image_name.split('.')[1] == 'JPG') or (image_name.split('.')[1] == 'jpeg') or (image_name.split('.')[1] == 'tif'):
img = load_img(img_dir + 'gram_neg/' + image_name)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size=1, save_to_dir='E:/Bac cell classification/Bac img/augmented/gm_neg', save_prefix='gmneg', save_format='jpg'):
i += 1
if i > 30:
break
|
[
"noreply@github.com"
] |
arkanivasarkar.noreply@github.com
|
0dc96aa1ecef2bd663aa44978dda26774af79e69
|
93d29717570ace199ed3db8c5f40fb1baa127fc8
|
/app/check_db.py
|
7277c5b49d3c91bc938f04e8a8c67bf79ac23693
|
[] |
no_license
|
akramhussein/herdit-django
|
77dc6bd3e23871a76ae839907700cf6b7ab4facb
|
eb843738e9ce1d6fb7810dcea459f2d322b32113
|
refs/heads/master
| 2021-01-11T14:49:58.405007
| 2017-01-27T17:01:37
| 2017-01-27T17:01:37
| 80,225,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
#!/usr/bin/python
"""
Test connection to database
"""
import os
import sys
import psycopg2
db_host = os.environ['DB_HOST']
db_name = os.environ['DB_NAME']
db_port = os.environ['DB_PORT']
db_user = os.environ['DB_USER']
db_password = os.environ['DB_PASS']
db_check_timeout = os.environ['DB_CHECK_TIMEOUT']
try:
print 'Checking database connection on %s:%s with timeout of %ss' % (db_host, db_port, db_check_timeout)
conn = psycopg2.connect(
host=db_host,
database=db_name,
user=db_user,
password=db_password,
port=db_port,
connect_timeout=db_check_timeout)
cursor = conn.cursor()
print 'SUCCESS: Connected to database'
except:
# Get the most recent exception
_, error, _ = sys.exc_info()
sys.exit("ERROR: Database connection failed\n %s" % (error))
|
[
"akramhussein@gmail.com"
] |
akramhussein@gmail.com
|
5577d6eec3a7f31cd64b42298ec68c478d934d16
|
86cd22354f2431087c9b3ff06188f071afb3eb72
|
/710. Random Pick with Blacklist.py
|
573343a2bb000d9af8213541e8815e74745a24f5
|
[] |
no_license
|
tlxxzj/leetcode
|
0c072a74d7e61ef4700388122f2270e46c4ac22e
|
06dbf4f5b505a6a41e0d93367eedd231b611a84b
|
refs/heads/master
| 2023-08-31T11:04:34.585532
| 2023-08-31T08:25:51
| 2023-08-31T08:25:51
| 94,386,828
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
class Solution:
def __init__(self, N: int, blacklist: List[int]):
n = len(blacklist)
m = N - n
if m <= 100000:
self.seq = []
black = set(blacklist)
for i in range(N):
if i not in black:
self.seq.append(i)
else:
self.N = N
self.black = set(blacklist)
self.m = m
def pick(self) -> int:
if self.m <= 100000:
return random.choice(self.seq)
else:
while 1:
x = random.randint(0, self.N-1)
if x not in self.black:
return x
# Your Solution object will be instantiated and called as such:
# obj = Solution(N, blacklist)
# param_1 = obj.pick()
|
[
"tlxxzj@qq.com"
] |
tlxxzj@qq.com
|
a68aa928fe1fc0a3a409e2a12b28372f45b00c1d
|
ca58ba9d1246279f93feaf4a51a3de0f0adff738
|
/804.py
|
5ba4aa349006e954384349093f99c5807399753b
|
[] |
no_license
|
BigbyNick/LeetCode_notes
|
bd55161b190439ef7cdd14a71e133e2162b06dd8
|
41261c7864a6456193f86fa704c0980a22481d24
|
refs/heads/master
| 2018-11-01T02:18:01.212659
| 2018-09-03T15:54:28
| 2018-09-03T15:54:28
| 143,669,284
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 15:33:18 2018
@author: NickYue
"""
class Solution(object):
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
code = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
lists = []
for ele in words:
word = ""
for char in ele:
word = word + code[ord(char)-97]
if word not in lists:
lists.append(word)
return len(lists)
def stringToStringArray(input):
return json.loads(input)
def intToString(input):
if input is None:
input = 0
return str(input)
def main():
import sys
def readlines():
for line in sys.stdin:
yield line.strip('\n')
lines = readlines()
while True:
try:
line = lines.next()
words = stringToStringArray(line)
ret = Solution().uniqueMorseRepresentations(words)
out = intToString(ret)
print out
except StopIteration:
break
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
BigbyNick.noreply@github.com
|
dac5b27e96fa3bfce9abbe7545c50fab816838ca
|
d29ebc34734aefc6845ac58306fc6a802e775730
|
/postprocessing/gz-results.py
|
fbb2c33a00bc28ab414cd5c84f2a7a0703a42aad
|
[] |
no_license
|
bergey/ASHRAE-RP-1449
|
bbe596832e30d31eefd5b40e8721423e57ed7aa2
|
eeac8e92265ad8ca26a81a31fa328b4012f8c0f4
|
refs/heads/master
| 2016-09-06T10:47:30.963376
| 2013-01-09T19:09:51
| 2013-01-09T19:09:51
| 2,482,557
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
#!/usr/bin/env python
# zip (or unzip) every regular file in a given directory tree
from os import listdir, system
from os.path import isdir, join
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-q', '--quiet', help='only report errors', dest='verbose', action='store_false', default=True)
parser.add_option('-u', '--unzip', action='store_true', help='unzip encountered gz files', dest='unzip', default=False)
(options, args) = parser.parse_args()
def already_zip(p):
if p[-2:] in ['gz'] or p[-3:] in ['bz2', 'zip']:
return True
else:
return False
paths = [d for d in args if isdir(d)]
while paths:
wd = paths.pop(0)
for name in listdir(wd):
p = join(wd, name)
if isdir(p):
paths.append(p)
elif already_zip(p) ^ options.unzip:
if options.verbose:
print('skipping {0}'.format(p))
continue
else:
if options.unzip:
cmd = 'gunzip {0}'.format(p)
else:
cmd = 'gzip {0}'.format(p)
if options.verbose:
print(cmd)
system(cmd)
|
[
"bergey@alum.mit.edu"
] |
bergey@alum.mit.edu
|
8ba4083f6c01ce0cd3c3907414ccb8bb4d1439da
|
6ce8bac9967ad98db865321cc7a3ca9481e846a1
|
/task3_tree/draw.py
|
c708b9e8447a4495d3b3a48765aa77f57aa45f45
|
[] |
no_license
|
ArtyomKaltovich/ib_ml
|
d56fe6a3e557586dec1b91df8d29cdd2869abdb7
|
d3e3d3e705bfd5018ee123d1e8469aae17d734aa
|
refs/heads/master
| 2021-01-05T04:35:31.442984
| 2020-05-09T21:46:01
| 2020-05-09T21:46:01
| 240,882,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from task3_tree.tree import DecisionTreeNode
def plot_roc_curve(y_test, p_pred, save_path=None, show=True):
positive_samples = sum(1 for y in y_test if y == 0)
tpr = []
fpr = []
for w in np.arange(-0.01, 1.02, 0.01):
y_pred = [(1 if p > w else 0) for p in p_pred]
tpr.append(sum(1 for yp, yt in zip(y_pred, y_test) if yp == 0 and yt == 0) / positive_samples)
fpr.append(sum(1 for yp, yt in zip(y_pred, y_test) if yp == 0 and yt != 0) / (len(y_test) - positive_samples))
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], linestyle="--")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.xlim(-0.01, 1.01)
plt.ylim(-0.01, 1.01)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
elif show:
plt.show()
def rectangle_bounds(bounds):
return ((bounds[0][0], bounds[0][0], bounds[0][1], bounds[0][1]),
(bounds[1][0], bounds[1][1], bounds[1][1], bounds[1][0]))
def plot_2d_tree(tree_root, bounds, colors):
if isinstance(tree_root, DecisionTreeNode):
if tree_root.split_dim:
plot_2d_tree(tree_root.left, [bounds[0], [bounds[1][0], tree_root.split_value]], colors)
plot_2d_tree(tree_root.right, [bounds[0], [tree_root.split_value, bounds[1][1]]], colors)
plt.plot(bounds[0], (tree_root.split_value, tree_root.split_value), c=(0, 0, 0))
else:
plot_2d_tree(tree_root.left, [[bounds[0][0], tree_root.split_value], bounds[1]], colors)
plot_2d_tree(tree_root.right, [[tree_root.split_value, bounds[0][1]], bounds[1]], colors)
plt.plot((tree_root.split_value, tree_root.split_value), bounds[1], c=(0, 0, 0))
else:
x, y = rectangle_bounds(bounds)
plt.fill(x, y, c=colors[tree_root.y] + [0.2])
def plot_2d(tree, X, y):
plt.figure(figsize=(9, 9))
colors = dict((c, list(np.random.random(3))) for c in np.unique(y))
bounds = list(zip(np.min(X, axis=0), np.max(X, axis=0)))
plt.xlim(*bounds[0])
plt.ylim(*bounds[1])
plot_2d_tree(tree.root, list(zip(np.min(X, axis=0), np.max(X, axis=0))), colors)
for c in np.unique(y):
plt.scatter(X[y == c, 0], X[y == c, 1], c=[colors[c]], label=c)
plt.legend()
plt.tight_layout()
plt.show()
def tree_depth(tree_root):
if isinstance(tree_root, DecisionTreeNode):
return max(tree_depth(tree_root.left_node), tree_depth(tree_root.right_node)) + 1
else:
return 1
def draw_tree_rec(tree_root, x_left, x_right, y):
x_center = (x_right - x_left) / 2 + x_left
if isinstance(tree_root, DecisionTreeNode):
x_center = (x_right - x_left) / 2 + x_left
x = draw_tree_rec(tree_root.left_node, x_left, x_center, y - 1)
plt.plot((x_center, x), (y - 0.1, y - 0.9), c=(0, 0, 0))
x = draw_tree_rec(tree_root.right_node, x_center, x_right, y - 1)
plt.plot((x_center, x), (y - 0.1, y - 0.9), c=(0, 0, 0))
plt.text(x_center, y, f"x[{tree_root.split_dim}] < {tree_root.split_value}", horizontalalignment='center')
else:
plt.text(x_center, y, str(tree_root.y),
horizontalalignment='center')
return x_center
def draw_tree(tree, save_path=None):
td = tree_depth(tree.root)
plt.figure(figsize=(0.33 * 2 ** td, 2 * td))
plt.xlim(-1, 1)
plt.ylim(0.95, td + 0.05)
plt.axis('off')
draw_tree_rec(tree.root, -1, 1, td)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
|
[
"KaltovichArtyom@gmail.com"
] |
KaltovichArtyom@gmail.com
|
fe3a54f610a846262df9a9407063628ad2f42d5c
|
aa06aa622a43fc52af376776229f38c8c931163d
|
/lesson16_1.py
|
4ee11fe9e65ec63592a21a401706a64abb774337
|
[] |
no_license
|
DazzleTan/Turle
|
e55cf8f7126ccdb0a91147c497213d3096a15500
|
a151c64112062a9a6c7559d90ea9f0fb7a634cd2
|
refs/heads/master
| 2023-02-28T17:58:49.675002
| 2021-02-04T10:13:05
| 2021-02-04T10:13:05
| 314,497,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
'''
顺序结构绘制十字形图案
'''
import turtle as t
t.fd(50)
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.rt(90)
t.done()
|
[
"377648526@qq.com"
] |
377648526@qq.com
|
c5de8f16af42b546b41504875a5e68942e82afd8
|
b3fd61fdfd6ea82695d805c95321619423b836e6
|
/Pratik_Trend_Micro.py
|
df8e679f7d4c93e8dcf2c256008144ecf2e90677
|
[] |
no_license
|
sjogleka/General_codes
|
761967fd1175c97804d49290af9db10828d4900f
|
2772ea7b723c4ca680864b40b41fd34cc197726d
|
refs/heads/master
| 2021-07-16T07:41:05.841942
| 2020-10-14T01:49:12
| 2020-10-14T01:49:12
| 218,369,391
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
def MaxTime(A, B, C, D)
ArrayElements = [A, B, C, D]
if ArrayElements == ArrayElements - [0, 1, 2]:
return "NOT POSSIBLE"
TimeReturn = " : "
if ArrayElements.include?(2):
TimeReturn[0] = ArrayElements.delete_at(ArrayElements.find_indexOf(2)).to_s1
elif ArrayElements.include?(1):
TimeReturn[0] = ArrayElements.delete_at(ArrayElements.find_indexOf(1)).to_s1
else
TimeReturn[0] = 0.to_s1
if ArrayElements.find_indexOf(0)
ArrayElements.delete_at(ArrayElements.find_indexOf(0))
end
end
if ArrayElements.length == 4
return "NOT POSSIBLE"
end
if TimeReturn[0] == "2"
MaxThree = (ArrayElements - [4, 5, 6, 7, 8, 9]).maxOf
if !MaxThree
return "NOT POSSIBLE"
else
TimeReturn[1] = ArrayElements.delete_at(ArrayElements.find_indexOf(MaxThree)).to_s1
end
end
if TimeReturn[0] == "1" | | TimeReturn[0] == "0"
maxOf = ArrayElements.maxOf
TimeReturn[1] = ArrayElements.delete_at(ArrayElements.find_indexOf(maxOf)).to_s1
end
if ArrayElements.length == 3
return "NOT POSSIBLE"
end
minute_one_permone = ArrayElements.first
minute_two_permone = ArrayElements.last
MnutesPerOne = ArrayElements.join("")
minutes_permtwo = ArrayElements.reverse.join("")
if MnutesPerOne > "59" & & minutes_permtwo > "59"
return "NOT POSSIBLE"
end
if MnutesPerOne > "59" & & minutes_permtwo <= "59"
TimeReturn[3] = minutes_permtwo[0]
TimeReturn[4] = minutes_permtwo[1]
elsif
minutes_permtwo > "59" & & MnutesPerOne <= "59"
TimeReturn[3] = MnutesPerOne[0]
TimeReturn[4] = MnutesPerOne[1]
else
if MnutesPerOne > minutes_permtwo
TimeReturn[3] = MnutesPerOne[0]
TimeReturn[4] = MnutesPerOne[1]
else
TimeReturn[3] = minutes_permtwo[0]
TimeReturn[4] = minutes_permtwo[1]
end
end
if TimeReturn.split(":").join("") > "2359"
return "NOT POSSIBLE"
else
return TimeReturn
end
end
|
[
"sjogleka@uncc.edu"
] |
sjogleka@uncc.edu
|
2667aeb61866ef3940a2f09655a0a16202037b54
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03437/s131014636.py
|
d165f68f0df6b573cd860e5f700d5753c9a77d8a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
a,b =list(map(int,input().split()))
if a%b == 0:
print(-1)
else:
print(a*(b-1))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
46dedff450414944a6e5a9c5d83b9053ca4dfc20
|
aa7574b7eaa56eacca9c176470f33d0fc5890680
|
/app/providers/nlpprovider/nlp_provider.py
|
02db44986777102633b83feb88297384889f20ae
|
[] |
no_license
|
Grinnbob/g_theclone
|
2f07715f0b48e71b3fb2ba517d8ba52adf7cc2a8
|
8bc8b07b9800356c64f98156f36fbea8eafb08ac
|
refs/heads/main
| 2023-03-01T00:45:23.348207
| 2021-02-10T14:41:05
| 2021-02-10T14:41:05
| 337,743,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
from app.exceptions import *
from typing import Any
import traceback
import os
from app.core.config import settings
import pandas as pd
from .topics import TopicStatistics
from .entities import EntityStatistics
from .dummy_topics import dummy_topics_list
class NlpProvider():
def __init__(self, direct=True):
if direct:
raise AppErrors("Must use async create_api_provider to create instance")
self.session = None
@classmethod
async def create_api_provider(cls,
settings: dict=settings.NLP_PROVIDER_SETTINGS) -> Any:
provider = cls(direct=False)
provider.settings = settings
return provider
async def get_entities_report(self, payload):
model = EntityStatistics()
# Get entities for each lead
all_entities = []
#n_messages_total = 0
lead_data = []
for lead in payload:
all_entities.append(model.get_cleaned_entities(lead))
lead_data.append({'email': lead.data['email'], 'n_messages': len(lead['data']['dialogs'].values())})
#n_messages_total += len(lead['data']['dialogs'].values())
settings.LOGGER.info(f'... {len(lead_data)} leads received')
#settings.LOGGER.info(f'... {n_messages_total} total messages received')
df_all_entities = pd.concat(all_entities)
# Get avg values
#model.set_eatalon_entities(df_all_entities, n_messages_total)
# Get reports
reports = []
i = 0
for lead_entities in all_entities:
report_data = model.get_top_entities(lead_entities, lead_data[i]['n_messages'])
#report_data = model.get_compared_entities(top_lead_entities)
# transform int keys to strings for mongo
report_data = {
'count': {str(key): value for key, value in report_data['count'].items()},
'name': {str(key): value for key, value in report_data['name'].items()}
}
reports.append({'email': lead_data[i]['email'], 'report_type': 'entities', 'data': report_data})
i += 1
return reports
async def get_topics_report(self, payload):
model = TopicStatistics()
topics = dummy_topics_list # dummy topics
model.fit(topics)
# Get topics for each lead
all_topics = []
emails = []
for lead in payload:
all_topics.append(model.get_topics_from_messages(list(lead.data['dialogs'].values())))
emails.append(lead.data['email'])
df_all_topics = pd.concat(all_topics)
# Get avg values
etalon_topics = df_all_topics.groupby(['topic']).mean().reset_index()
model.set_etalon_topics(etalon_topics)
# Get reports
reports = []
i = 0
for lead_topics in all_topics:
report_data = model.get_compared_topics(lead_topics)
# transform int keys to strings for mongo
report_data = {
'count': {str(key): value for key, value in report_data['count'].items()},
'topic': {str(key): value for key, value in report_data['topic'].items()}
}
reports.append({'email': emails[i], 'report_type': 'topics', 'data': report_data})
i += 1
return reports
|
[
"grifon12358@gmail.com"
] |
grifon12358@gmail.com
|
5d0494f09f1df3f8be4a967f863027493cac9073
|
31cae5035cac9e839f82a782f6bb57bde672d108
|
/mdp_grid_world
|
e7d75889916b9b806ee16a579914486b2d97585e
|
[] |
no_license
|
abdalmoniem/MDP_GridWorld
|
ca86db1913550c49a56d8b7f82dda6a171b33ec2
|
3541c49126b2067f196245b2a4f096dc21d8483c
|
refs/heads/master
| 2020-06-10T21:04:59.945030
| 2016-12-08T00:00:04
| 2016-12-08T00:00:04
| 75,874,198
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,788
|
#!/usr/bin/env python3
###########################################
# @author: AbdAlMoniem AlHifnawy #
# #
# @email: hifnawy_moniem@hotmail.com #
# #
# @date: Thu Dec 1 5:28:03 PM #
###########################################
from tkinter import *
from GridWorld import GridWorld
from GUI import MDPGUI
from GUI import MDPChooser
def parseWorld(file_path):
world = []
file = open(file_path, 'r')
for line in file:
row = []
chars = line.rstrip().split(' ')
for char in chars:
if char.lower() == 'v':
row.append(GridWorld.CELL_VOID)
elif char.lower() == 'w':
row.append(GridWorld.CELL_WALL)
elif char.lower() == 'e':
row.append(GridWorld.CELL_EXIT)
elif char.lower() == 'p':
row.append(GridWorld.CELL_PIT)
world.append(row)
file.close()
return world
def startSimulation(mode, w):
if mode == "1":
w.setRewards(-0.04, -1, 1)
w.setProbabilities(0.8, 0.1, 0.1, 0)
w.setDiscountFactor(1)
g = MDPGUI(w)
elif mode == "2":
w.setRewards(-0.04, -1, 1)
w.setProbabilities(0.8, 0.1, 0.1, 0)
w.setDiscountFactor(0.9)
g = MDPGUI(w)
elif mode == "3":
w.setRewards(-0.01, -1, 1)
w.setProbabilities(0.8, 0.1, 0.1, 0)
w.setDiscountFactor(1)
g = MDPGUI(w)
elif mode == "4":
w.setRewards(-2, -1, 1)
w.setProbabilities(0.8, 0.1, 0.1, 0)
w.setDiscountFactor(0.6)
g = MDPGUI(w)
else:
mdpc = MDPChooser(w)
mainloop()
if __name__ == '__main__':
def showhelp():
hlpStr = ("Markov Decision Process Examples\n" +
"Usage: %s gridworld [<number>] [file]\n" %sys.argv[0] +
"numbers:\n" +
"0: manually tweek and set cost, reward, discount, etc..\n" +
"1: standard grid world as the book (step cost -0.04, discount factor 1)\n" +
"2: low discount factor 0.6 (step cost -0.04)\n" +
"3: low step cost -0.01\n" +
"4: suicide mode (step cost -2)\n\n" +
"file: a file containing letters that represent world cells.\n"
"file format:\n" +
"\tletter [letter] [letter] [letter]...\n"
"\tletter [letter] [letter] [letter]...\n"
"\t.\n\t.\n\t.\n\n" +
"letter formats: (all lowercases)\n" +
"\tv\tindicates a void cell\n" +
"\tw\tindicates a wall cell\n" +
"\te\tindicates an exit cell\n" +
"\tp\tindicates a pit cell\n\n" +
"Examples:\n" +
"%s world_file.txt\n" %sys.argv[0] +
"%s 1\n" %sys.argv[0] +
"%s 3 world_file.txt" %sys.argv[0]
)
print(hlpStr)
exit()
if len(sys.argv) < 2: showhelp()
elif len(sys.argv) == 2:
if not sys.argv[1].isdigit():
file_path = sys.argv[1]
world = parseWorld(file_path)
w = GridWorld(world, discountFactor = 1)
else:
w = GridWorld([[GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_EXIT],
[GridWorld.CELL_VOID, GridWorld.CELL_WALL, GridWorld.CELL_VOID, GridWorld.CELL_PIT],
[GridWorld.CELL_VOID, GridWorld.CELL_WALL, GridWorld.CELL_VOID, GridWorld.CELL_WALL],
[GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID]], discountFactor = 1)
mode = sys.argv[1]
startSimulation(mode, w)
elif len(sys.argv) == 3:
file_path = sys.argv[2]
world = parseWorld(file_path)
w = GridWorld(world, discountFactor = 1)
mode = sys.argv[1]
startSimulation(mode, w)
else:
w = GridWorld([[GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_EXIT],
[GridWorld.CELL_VOID, GridWorld.CELL_WALL, GridWorld.CELL_VOID, GridWorld.CELL_PIT],
[GridWorld.CELL_VOID, GridWorld.CELL_WALL, GridWorld.CELL_VOID, GridWorld.CELL_WALL],
[GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID, GridWorld.CELL_VOID]], discountFactor = 1)
startSimulation(0, w)
|
[
"hifnawy_moniem@hotmail.com"
] |
hifnawy_moniem@hotmail.com
|
|
2e140b4f3ffeeaba4d020b9e77a7076b45123bc9
|
1a634742ec485cdf14fb38e8baa3b90357215c24
|
/Cipher-one-Cipher/core/migrations/0012_upcoming_patient.py
|
a4bf046f79701e99419b7e1cba79b1f7c93b16f1
|
[] |
no_license
|
Vishalmast/PCP-management
|
249230cdb6880e6724680d0328924f7a2e0c2c38
|
3940a3db5472ea779bd1732ed15eded3bfaa23a6
|
refs/heads/master
| 2022-12-22T05:53:01.758663
| 2020-09-27T12:53:59
| 2020-09-27T12:53:59
| 289,913,693
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Generated by Django 3.1.1 on 2020-09-24 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20200923_1758'),
]
operations = [
migrations.CreateModel(
name='Upcoming_patient',
fields=[
('UID', models.CharField(max_length=10, primary_key=True, serialize=False)),
('Scheduled_date', models.DateField()),
('Payment_by_payer', models.FloatField()),
('Payment_by_dependant', models.FloatField()),
],
),
]
|
[
"kumarvishalmast@gmail.com"
] |
kumarvishalmast@gmail.com
|
ce8c5a4b65e03f2ca0b87c3ed02b4dcceed91544
|
5bf4d693c2d0ef152eeb299c7b11ae62f5a24d67
|
/tut53 - diamond shape problem in multiple inheritance oops14.py
|
1dea0ba195866023b77e8f0647e8d68bfa63305f
|
[] |
no_license
|
Python-Geek-Labs/learn-python-
|
ff8faa9d0676fc9677e937c8bfcd2163a80b88de
|
411e611ef8dbab5090868ef5c84879fd7277c1fd
|
refs/heads/master
| 2023-03-17T19:46:57.620975
| 2020-03-26T22:47:39
| 2020-03-26T22:47:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
class A:
def met(self):
print('This is a method from class A')
class B(A):
def met(self):
print('This is a method from class B')
class C(A):
def met(self):
print('This is a method from class C')
class D(B, C):
def met(self):
print('This is a method from class D')
a = A()
b = B()
c = C()
d = D()
d.met()
|
[
"52421094+mr-vaibh@users.noreply.github.com"
] |
52421094+mr-vaibh@users.noreply.github.com
|
1dd73119d46c94c6ae20a8bbb5f88f0a121068b2
|
255021fadf9f739db042809ca95f5b9f75609ec5
|
/D3/3408 세가지합구하기.py
|
076a44d353d49b9dc9e80cb3eae5e69402e79435
|
[] |
no_license
|
unsung107/Algorithm_study
|
13bfff518fc1bd0e7a020bb006c88375c9ccacb2
|
fb3b8563bae7640c52dbe9324d329ca9ee981493
|
refs/heads/master
| 2022-12-13T02:10:31.173333
| 2020-09-13T11:32:10
| 2020-09-13T11:32:10
| 295,137,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
results = []
for rounds in range(int(input())):
result = []
N = int(input())
result.append(str(N*(N+1)//2))
result.append(str(N**2))
result.append(str(N**2+N))
results.append(f'#{rounds + 1} {" ".join(result)}')
print('\n'.join(results))
|
[
"unsung102@naver.com"
] |
unsung102@naver.com
|
80bc927b02f46308bf74bba86251982a165f1ba2
|
ad1fd7cbe87fe6545dc4c09479fb2064ff6f8487
|
/python_scripts/set_colors.py
|
c4eb7a7b1c6175423344acccc4d593be6d156bd2
|
[] |
no_license
|
lruhlen/CppHenyeyCode
|
ea61df59aec0ff01c91589e79b6dfdc7d583bdec
|
8e670f95008a35aab998985fb2be4a363fb6645b
|
refs/heads/master
| 2021-01-17T14:32:32.905693
| 2015-04-19T20:28:38
| 2015-04-19T20:28:38
| 20,532,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
import matplotlib, random
def set_colors(NumColors=8):
import matplotlib, random
cc = [0] * NumColors
for item in range(NumColors):
cc[item] = ( (item) * 256 / NumColors)
random.shuffle(cc)
rcParams['axes.color_cycle'] = list(cm.hsv(cc))
rcParams['lines.linewidth'] = 2
|
[
"lruhlen@ucsc.edu"
] |
lruhlen@ucsc.edu
|
2a60b3d2786853d877585aa5e29db5d3cc8e568e
|
cc1d11339b22bb6d62cc1529b63eb2f1437ebf0b
|
/dfs/MinDepthOfBinaryTree.py
|
a48a4acd88fc860b9d5d9199089a772612d3df70
|
[] |
no_license
|
munkhtsogt/algorithms
|
38f408295ca8a91f40661f85f64b0020da1b483b
|
0428189510b0742c7270ffbe73f71ba165b0ad42
|
refs/heads/master
| 2020-03-07T08:31:13.994374
| 2018-07-25T13:56:11
| 2018-07-25T13:56:11
| 127,380,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import sys
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None: return 0
self.minDepth = sys.maxint
def traverse(node, depth):
if node == None:
return
if not node.left and not node.right:
self.minDepth = min(self.minDepth, depth)
if node.left:
traverse(node.left, depth + 1)
if node.right:
traverse(node.right, depth + 1)
traverse(root, 0)
return self.minDepth
|
[
"munkhuu48@gmail.com"
] |
munkhuu48@gmail.com
|
9165489745cbf9d388db49cd43e80d475ddd628b
|
8d8f0ce57775c3cd79e99fd992eac5658e7e1c77
|
/dipy/core/benchmarks/bench_sphere.py
|
8e56e85aafec734ca2ad18d2d8f38ca327ddf684
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MNagtegaal/dipy
|
16e75768e770aa337e1f44aba16b1cd32b705e75
|
cfc259b5dfb7dc585f098f9000456fd4948517d8
|
refs/heads/master
| 2020-12-05T23:44:04.669013
| 2020-01-07T12:04:36
| 2020-01-07T12:04:36
| 232,279,846
| 0
| 1
|
NOASSERTION
| 2020-01-07T08:33:20
| 2020-01-07T08:33:19
| null |
UTF-8
|
Python
| false
| false
| 3,966
|
py
|
""" Benchmarks for sphere
Run all benchmarks with::
import dipy.core as dipycore
dipycore.bench()
With Pytest, Run this benchmark with:
pytest -svv -c bench.ini /path/to/bench_sphere.py
"""
from __future__ import with_statement
import sys
import time
import dipy.core.sphere_stats as sphere_stats
import dipy.core.sphere as sphere
from matplotlib import pyplot as plt
mode = None
if len(sys.argv) > 1 and sys.argv[1] == '-s':
mode = "subdivide"
class Timer(object):
def __enter__(self):
self.__start = time.time()
def __exit__(self, type, value, traceback):
# Error handling here
self.__finish = time.time()
def duration_in_seconds(self):
return self.__finish - self.__start
def func_minimize_adhoc(init_hemisphere, num_iterations):
opt = sphere.disperse_charges(init_hemisphere, num_iterations)[0]
return opt.vertices
def func_minimize_scipy(init_pointset, num_iterations):
return sphere.disperse_charges_alt(init_pointset, num_iterations)
num_points = [20, 40, 60]
num_subdivide = [2, 3, 4]
def bench_disperse_charges_alt():
dpi = 72
figsize = (1920/dpi, 1080/dpi)
fig = plt.figure(num='Electrostatic repulsion methods benchmark',
figsize=figsize, dpi=dpi)
for (idx, subplot_index) in enumerate(['131', '132', '133']):
num_repetitions = 20
num_trials = 3
execution_time_adhoc = []
execution_time_scipy = []
minimum_adhoc = []
minimum_scipy = []
if mode == 'subdivide':
init_sphere = sphere.unit_octahedron.subdivide(num_subdivide[idx])
init_hemisphere = sphere.HemiSphere.from_sphere(init_sphere)
init_pointset = init_hemisphere.vertices
else:
init_pointset = sphere_stats.random_uniform_on_sphere(
num_points[idx])
init_hemisphere = sphere.HemiSphere(xyz=init_pointset)
print('num_points = {}'.format(init_pointset.shape[0]))
for j in range(num_trials):
print(' Iteration {}/{}'.format(j + 1, num_trials))
for num_iterations in range(12):
# The time of an iteration of disperse charges is much
# faster than an iteration of fmin_slsqp.
num_iterations_dipy = 20 * num_iterations
# Measure execution time for dipy.core.sphere.disperse_charges
timer = Timer()
with timer:
for i in range(num_repetitions):
opt = func_minimize_adhoc(init_hemisphere,
num_iterations_dipy)
execution_time_adhoc.append(timer.duration_in_seconds() /
num_repetitions)
minimum_adhoc.append(sphere._get_forces_alt(opt.ravel()))
# Measure execution time for
# dipy.core.sphere.disperse_charges_alt
timer = Timer()
with timer:
for i in range(num_repetitions):
opt = func_minimize_scipy(init_pointset, num_iterations)
execution_time_scipy.append(timer.duration_in_seconds() /
num_repetitions)
minimum_scipy.append(sphere._get_forces_alt(opt.ravel()))
ax = fig.add_subplot(subplot_index)
ax.plot(execution_time_adhoc, minimum_adhoc, 'r+',
label='DIPY original')
ax.plot(execution_time_scipy, minimum_scipy, 'g+',
label='SciPy-based')
ax.set_yscale('log')
plt.xlabel('Average execution time (s)')
plt.ylabel('Objective function value')
if mode == 'subdivide':
plt.title('Num subdiv: {}'.format(num_subdivide[idx]))
else:
plt.title('Num points: {}'.format(num_points[idx]))
plt.legend()
plt.show()
|
[
"jon.haitz.legarreta@gmail.com"
] |
jon.haitz.legarreta@gmail.com
|
48df0d26d34bda96ca1e48b42d964a4ae7afb4b3
|
abe67b1777755e053d7a739c37e7486f767bb89c
|
/PagerankPySpark.py
|
7cf58db958ad554901aec93af357608fbe70bfce
|
[] |
no_license
|
AlesundPE/PageRank
|
764ad42d860494cff9a83e6d07332b20685d388c
|
2b59a898ee83c12b7aafcc649412b6ccc5bbed94
|
refs/heads/main
| 2023-04-21T17:48:20.013331
| 2021-04-27T10:58:58
| 2021-04-27T10:58:58
| 362,079,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
def destReceive(data):
dests, PageRank = data[1]
contribution = []
for dest in dests:
contribution.append((dest,PageRank/len(dests)))
return contribution
import findspark
findspark.init("D:/spark-3.0.1-bin-hadoop2.7") # you need to specify the path to PySpark here.
from pyspark import SparkContext
import time
iteration = 5
total_time = []
sc = SparkContext(appName = "PageRank")
lines = sc.textFile("C:/Users/90381/Desktop/links") # you need to change the path to graph here
links = lines.flatMap(lambda x : x.split("\n")).map(lambda x : x.split()).map(lambda x : (x[0],x[1].split(","))).persist()
ranks = links.mapValues(lambda x : 1)
for i in range(iteration):
start = time.time()
ranks = links.join(ranks).partitionBy(2).flatMap(lambda x : destReceive(x)).reduceByKey(lambda x, y : x + y).mapValues(lambda x : 0.15 + 0.85*x) # you can change the number of partition here by modifing the parameter of partitionBy()
ranks.collect()
print(ranks.getNumPartitions())
end = time.time()
print("iteration {} takes {} seconds".format(i, end - start))
total_time.append(end-start)
print("Average is {}".format(sum(total_time)/len(total_time)) )
sc.stop()
|
[
"noreply@github.com"
] |
AlesundPE.noreply@github.com
|
bc41cc014ae80e7e855d634a8b71826cf8f32bab
|
6fb37fee016346120d4c14c4343516532304055a
|
/src/genie/libs/parser/iosxe/tests/test_show_flow.py
|
b8b395d32c1bed5e1602f1cb7df54ef910a695eb
|
[
"Apache-2.0"
] |
permissive
|
devbollinger/genieparser
|
011526ebbd747c6dcd767535ce4bd33167e15536
|
ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c
|
refs/heads/master
| 2020-12-20T11:36:00.750128
| 2020-01-24T18:45:40
| 2020-01-24T18:45:40
| 236,061,155
| 0
| 0
|
Apache-2.0
| 2020-01-24T18:38:43
| 2020-01-24T18:38:42
| null |
UTF-8
|
Python
| false
| false
| 18,650
|
py
|
# Python
import unittest
from unittest.mock import Mock
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError,SchemaMissingKeyError
# ATS
from ats.topology import Device
from ats.topology import loader
# iosxe show_flow
from genie.libs.parser.iosxe.show_flow import (ShowFlowMonitor,
ShowFlowMonitorCache,
ShowFlowMonitorCacheRecord,
ShowFlowExporterStatistics)
# ==============================================================
# Unit test for 'show flow monitor {name} cache format table'
# ==============================================================
class TestShowFlowMonitor(unittest.TestCase):
'''Unit test for "show flow monitor {name} cache format table"
'''
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output1 = {
'cache_type': 'Normal (Platform cache)',
'cache_size': 16,
'current_entries': 1,
'high_water_mark': 1,
'flows_added': 1,
'flows_aged': 0,
'ipv4_src_addr': {
'10.4.1.10': {
'ipv4_dst_addr': {
'10.4.10.1': {
'index': {
1: {
'trns_src_port': 0,
'trns_dst_port': 0,
'ip_tos': '0xC0',
'ip_port': 89,
'bytes_long': 100,
'pkts_long': 1,
},
2: {
'trns_src_port': 1,
'trns_dst_port': 1,
'ip_tos': '0xC0',
'ip_port': 89,
'bytes_long': 100,
'pkts_long': 1,
},
}
},
},
},
'10.4.1.11': {
'ipv4_dst_addr': {
'10.4.10.2': {
'index': {
1: {
'trns_src_port': 0,
'trns_dst_port': 0,
'ip_tos': '0xC0',
'ip_port': 89,
'bytes_long': 100,
'pkts_long': 1,
}
}
},
},
},
},
}
golden_output1 ={'execute.return_value':'''
Device#show flow monitor FLOW-MONITOR-1 cache format table
Cache type: Normal (Platform cache)
Cache size: 16
Current entries: 1
High Watermark: 1
Flows added: 1
Flows aged: 0
IPV4 SRC ADDR IPV4 DST ADDR TRNS SRC PORT TRNS DST PORT IP TOS IP PROT bytes long pkts long
=============== =============== ============= ============= ====== ======= ==================== ====================
10.4.1.10 10.4.10.1 0 0 0xC0 89 100 1
10.4.1.10 10.4.10.1 1 1 0xC0 89 100 1
10.4.1.11 10.4.10.2 0 0 0xC0 89 100 1
Device#
'''}
golden_parsed_output2 = {
'cache_type': 'Normal (Platform cache)',
'cache_size': 16,
'current_entries': 1,
'flows_added': 1,
'flows_aged': 0,
}
golden_output2 = {'execute.return_value': '''
Device#show flow monitor FLOW-MONITOR-1 cache format table
Cache type: Normal (Platform cache)
Cache size: 16
Current entries: 1
Flows added: 1
Flows aged: 0
There are no cache entries to display.
Device#
'''}
def test_empty(self):
self.maxDiff = None
self.device = Mock(**self.empty_output)
obj = ShowFlowMonitor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(name='FLOW-MONITOR-1')
def test_golden1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output1)
obj = ShowFlowMonitor(device=self.device)
parsed_output = obj.parse(name='FLOW-MONITOR-1')
self.assertEqual(parsed_output, self.golden_parsed_output1)
def test_golden2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowFlowMonitor(device=self.device)
parsed_output = obj.parse(name='FLOW-MONITOR-1')
self.assertEqual(parsed_output, self.golden_parsed_output2)
class TestShowFlowMonitorCache(unittest.TestCase):
'''Unit test for "show flow monitor {name} cache"
'''
maxDiff = None
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'cache_type': 'Normal (Platform cache)',
'cache_size': 200000,
'current_entries': 1,
'high_water_mark': 3,
'flows_added': 16,
'flows_aged': {
'total': 15,
'inactive_timeout': 15,
'inactive_timeout_secs': 15,
},
'entries': {
1: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.189.254',
'ipv4_dst_addr': '192.168.189.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1003',
'pkts': 2,
},
2: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.16.254',
'ipv4_dst_addr': '192.168.16.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1001',
'pkts': 3,
},
3: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.229.254',
'ipv4_dst_addr': '192.168.229.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1002',
'pkts': 3,
},
},
}
golden_output ={'execute.return_value':'''
Device#show flow monitor mon_vrf_1 cache
Load for five secs: 3%/0%; one minute: 2%; five minutes: 5%
Time source is NTP, 16:04:38.706 UTC Wed Nov 6 2019
Cache type: Normal (Platform cache)
Cache size: 200000
Current entries: 1
High Watermark: 3
Flows added: 16
Flows aged: 15
- Inactive timeout ( 15 secs) 15
IP VRF ID INPUT IPV4 SRC ADDR IPV4 DST ADDR intf input intf output pkts
============================= =============== =============== ==================== ==================== ==========
0 (DEFAULT) 192.168.189.254 192.168.189.253 Null Te0/0/0.1003 2
0 (DEFAULT) 192.168.16.254 192.168.16.253 Null Te0/0/0.1001 3
0 (DEFAULT) 192.168.229.254 192.168.229.253 Null Te0/0/0.1002 3
Device#
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowFlowMonitorCache(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(name='mon_vrf_1')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowFlowMonitorCache(device=self.device)
parsed_output = obj.parse(name='mon_vrf_1')
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowFlowMonitorCacheRecord(unittest.TestCase):
'''Unit test for "show flow monitor {name} cache format record"
'''
maxDiff = None
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'cache_type': 'Normal (Platform cache)',
'cache_size': 200000,
'current_entries': 3,
'high_water_mark': 3,
'flows_added': 18,
'flows_aged': {
'total': 6,
'active_timeout': 0,
'active_timeout_secs': 100,
'inactive_timeout': 0,
'inactive_timeout_secs': 100,
'event_aged': 0,
'watermark_aged': 6,
'emergency_aged': 0,
},
'entries': {
1: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.189.254',
'ipv4_dst_addr': '192.168.189.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1003',
'pkts': 3,
},
2: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.16.254',
'ipv4_dst_addr': '192.168.16.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1001',
'pkts': 4,
},
3: {
'ip_vrf_id_input': '0 (DEFAULT)',
'ipv4_src_addr': '192.168.229.254',
'ipv4_dst_addr': '192.168.229.253',
'intf_input': 'Null',
'intf_output': 'TenGigabitEthernet0/0/0.1002',
'pkts': 4,
},
},
}
golden_output ={'execute.return_value':'''
Device#show flow monitor mon_vrf_1 cache format record
Load for five secs: 3%/0%; one minute: 2%; five minutes: 5%
Time source is NTP, 16:04:45.275 UTC Wed Nov 6 2019
Cache type: Normal (Platform cache)
Cache size: 200000
Current entries: 3
High Watermark: 3
Flows added: 18
Flows aged: 6
- Active timeout ( 100 secs) 0
- Inactive timeout ( 100 secs) 0
- Event aged 0
- Watermark aged 6
- Emergency aged 0
IP VRF ID INPUT: 0 (DEFAULT)
IPV4 SOURCE ADDRESS: 192.168.189.254
IPV4 DESTINATION ADDRESS: 192.168.189.253
interface input: Null
interface output: Te0/0/0.1003
counter packets: 3
IP VRF ID INPUT: 0 (DEFAULT)
IPV4 SOURCE ADDRESS: 192.168.16.254
IPV4 DESTINATION ADDRESS: 192.168.16.253
interface input: Null
interface output: Te0/0/0.1001
counter packets: 4
IP VRF ID INPUT: 0 (DEFAULT)
IPV4 SOURCE ADDRESS: 192.168.229.254
IPV4 DESTINATION ADDRESS: 192.168.229.253
interface input: Null
interface output: Te0/0/0.1002
counter packets: 4
Device#
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowFlowMonitorCacheRecord(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(name='mon_vrf_1')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowFlowMonitorCacheRecord(device=self.device)
parsed_output = obj.parse(name='mon_vrf_1')
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowFlowExporterStatistics(unittest.TestCase):
""" Unit tests for:
* show flow exporter statistics
* show flow exporter {exporter} statistics
"""
device = Device(name="aDevice")
empty_output = {"execute.return_value": ""}
golden_output = {"execute.return_value": """
show flow exporter statistics
Flow Exporter test:
Packet send statistics (last cleared 00:10:17 ago):
Successfully sent: 6 (410 bytes)
Reason not given: 163 (7820 bytes)
No destination address: 421 (10423 bytes)
Client send statistics:
Client: Flow Monitor Test
Records added: 21
- sent: 8
- failed to send: 13
Bytes added: 1260
- sent: 145
- failed to send: 1115
"""}
golden_parsed_output = {
"flow_exporter": {
"test": {
"pkt_send_stats": {
"last_cleared": "00:10:17",
"successfully_sent": 6,
"successfully_sent_bytes": 410,
"reason_not_given": 163,
"reason_not_given_bytes": 7820,
"no_destination_address": 421,
"no_destination_address_bytes": 10423
},
"client_send_stats": {
"Flow Monitor Test": {
"records_added": {
"total": 21,
"sent": 8,
"failed": 13
},
"bytes_added": {
"total": 1260,
"sent": 145,
"failed": 1115
}
}
}
}
}
}
golden_output_exporter = {"execute.return_value": """
show flow exporter rest statistics
Flow Exporter rest:
Packet send statistics (last cleared 00:10:17 ago):
Successfully sent: 6 (410 bytes)
Reason not given: 163 (7820 bytes)
Client send statistics:
Client: Flow Monitor Test
Records added: 21
- sent: 8
- failed to send: 13
Bytes added: 1260
- sent: 145
- failed to send: 1115
"""}
golden_parsed_output_exporter = {
"flow_exporter": {
"rest": {
"pkt_send_stats": {
"last_cleared": "00:10:17",
"successfully_sent": 6,
"successfully_sent_bytes": 410,
"reason_not_given": 163,
"reason_not_given_bytes": 7820
},
"client_send_stats": {
"Flow Monitor Test": {
"records_added": {
"total": 21,
"sent": 8,
"failed": 13
},
"bytes_added": {
"total": 1260,
"sent": 145,
"failed": 1115
}
}
}
}
}
}
golden_output_partial = {"execute.return_value": """
flow exporter statistics
Flow Exporter test:
Packet send statistics (last cleared 00:12:12 ago):
Successfully sent: 0 (0 bytes)
Client send statistics:
Client: Flow Monitor Test
Records added: 0
Bytes added: 0
"""}
golden_parsed_output_partial = {
"flow_exporter": {
"test": {
"pkt_send_stats": {
"last_cleared": "00:12:12",
"successfully_sent": 0,
"successfully_sent_bytes": 0
},
"client_send_stats": {
"Flow Monitor Test": {
"records_added": {
"total": 0
},
"bytes_added": {
"total": 0
}
}
}
}
}
}
def test_empty(self):
self.maxDiff = None
self.device = Mock(**self.empty_output)
obj = ShowFlowExporterStatistics(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowFlowExporterStatistics(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_exporter(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_exporter)
obj = ShowFlowExporterStatistics(device=self.device)
parsed_output = obj.parse(exporter='rest')
self.assertEqual(parsed_output, self.golden_parsed_output_exporter)
def test_golden_partial(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_partial)
obj = ShowFlowExporterStatistics(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_partial)
if __name__ == '__main__':
unittest.main()
|
[
"karmoham@cisco.com"
] |
karmoham@cisco.com
|
4d4106f5f500ab131f597624202fcac34758728c
|
1cc158417f5b8e54cacbb1789740907ff7004a94
|
/SB52/DictionaryPharser.py
|
869d6343dd76f8b58874d02581e419469ece6d43
|
[] |
no_license
|
SoftwareBound/Project-II
|
74a142f63ad5bfc418126b1496abb932512c14cc
|
6c59200efc7515f10b8b9551b7d4da18028359ee
|
refs/heads/master
| 2020-04-14T05:42:34.314230
| 2019-01-01T02:48:54
| 2019-01-01T02:48:54
| 163,666,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
import Dictionaries
import pprint
pp = pprint.PrettyPrinter(indent=4, width=5)
class DictionaryPharser():
def __init__(self):
self._EscapedUnicodeEmoticonsDic = None
self._NegativeStopWordsList = None
self._NickNamesList = None
self._NRC = None
self._SlangDic = None
self._StopWordsList = None
def EscapedUnicodeEmoticonsDicBuilder(self):
EscapedUnicodeEmoticonsDic = Dictionaries.Dictionary("EscapedUnicodeEmoticonsDic")
EscapedUnicodeEmoticonsDic.preparingTheDico(
EscapedUnicodeEmoticonsDic.getTxt(
EscapedUnicodeEmoticonsDic.getTxtName()))
self._EscapedUnicodeEmoticonsDic = EscapedUnicodeEmoticonsDic.dico
#print(EscapedUnicodeEmoticonsDic.txtName + " was created")
def NegativeStopWordsListBuilder(self):
NegativeStopWordsList = Dictionaries.listDictionary("NegativeStopWordsList")
NegativeStopWordsList.preparingTheListDico(
NegativeStopWordsList.getTxt(
NegativeStopWordsList.getTxtName()))
self._NegativeStopWordsList = NegativeStopWordsList.listDico
#print(NegativeStopWordsList.txtName + " was created")
def NickNamesListBuilder(self):
NickNamesList = Dictionaries.nickNamesDictionary("NickNamesList")
NickNamesList.preparingTheNickDico(
NickNamesList.getTxt(
NickNamesList.getTxtName()))
self._NickNamesList = NickNamesList.nickDico
#print(NickNamesList.txtName + " was created")
def NRCBuilder(self):
NRC = Dictionaries.nrcDictionary("NRC")
NRC.preparingTheNrcDico(
NRC.getTxt(
NRC.getTxtName()))
self._NRC = NRC.nrcDico
self._NRC = NRC.Catagorise(self._NRC)
#pp.pprint(self._NRC)
#print(NRC.txtName + " was created")
def SlangDicBuilder(self):
SlangDic = Dictionaries.Dictionary("SlangDic")
SlangDic.preparingTheDico(
SlangDic.getTxt(
SlangDic.getTxtName()))
self._SlangDic = SlangDic.dico
#print(SlangDic.txtName + " was created")
def StopWordsListBuilder(self):
StopWordsList = Dictionaries.listDictionary("StopWordsList")
StopWordsList.preparingTheListDico(
StopWordsList.getTxt(
StopWordsList.getTxtName()))
self._StopWordsList = StopWordsList.listDico
#print(StopWordsList.txtName + " was created")
def InitializeDictsBuilder(self):
self.EscapedUnicodeEmoticonsDicBuilder()
self.NegativeStopWordsListBuilder()
self.NickNamesListBuilder()
self.NRCBuilder()
self.SlangDicBuilder()
self.StopWordsListBuilder()
|
[
"noreply@github.com"
] |
SoftwareBound.noreply@github.com
|
2621d432474e7927bf4834d87da959177a7aea3d
|
3abdb4694b18c97cd3954e4b51a7390540c6be2d
|
/app.py
|
ed4e7cd36a5fe357195070f2a3c1c039eb713c8b
|
[] |
no_license
|
Zinia96/Flask_Restful_Api
|
09d8bb5bd0c0075400503469bb1e4bd4dda1d68e
|
c9badc64b4560f1228c677c992a45fc18ec8dcee
|
refs/heads/master
| 2023-03-13T23:01:19.526600
| 2020-04-14T15:52:17
| 2020-04-14T15:52:17
| 255,183,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from models import db, ma
from config import Config
app = Flask(__name__)
def create_app():
#app = Flask(__name__, instance_relative_config=False)
app.config.from_object(Config)
db.init_app(app)
ma.init_app(app)
from routes import api_route
app.register_blueprint(api_route, url_prefix='/api')
with app.app_context():
# Create tables for our models
# from models.base import BaseModel
db.create_all()
return app
|
[
"ziniajahan96@gmail.com"
] |
ziniajahan96@gmail.com
|
fba6f63119d80358ed974127cbcd8fbca287a8dc
|
4fc8fe478a6a5b8b82a7f4df9e041d9a4b04e7b8
|
/tariffs/admin.py
|
0bd24d93c5400f840686f077bbed4f864a028e97
|
[] |
no_license
|
vadosl/thesame
|
92f5f4debc5e645244e79a5e2713b7c3bd791585
|
ec3d54f2cf6204c8761536ac9813bc18dfae98ea
|
refs/heads/master
| 2020-12-28T09:31:29.317622
| 2014-06-03T04:26:07
| 2014-06-03T04:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
# -*- coding:utf-8 -*-
from django.contrib import admin
from .models import Tariff
class TariffAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'is_best')
search_fields = ['title', 'slug']
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Tariff, TariffAdmin)
|
[
"yesnik@yandex.ru"
] |
yesnik@yandex.ru
|
65e8f78f3a5867d100fe988a915c1ce5af44854a
|
71db5bd517d3cd01a7d529d54eafc6a75808a845
|
/venv/Scripts/pip3.6-script.py
|
e71a440a94a12161a5771d1da0dc6b8f1d29f1df
|
[] |
no_license
|
rahulkhanna2/djangoProject
|
c1d3c20f11c55ba5b6fe40e6028067f0fa4b90e1
|
77a827a5ff655b80cf3a012ac47977eef9085250
|
refs/heads/master
| 2021-07-07T06:00:39.113270
| 2018-10-29T11:04:19
| 2018-10-29T11:04:19
| 155,196,748
| 0
| 1
| null | 2020-07-22T22:59:16
| 2018-10-29T10:58:30
|
Python
|
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!C:\Users\twinkle.arora\PycharmProjects\djangoproject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"Twinkle.Arora@igglobal.com"
] |
Twinkle.Arora@igglobal.com
|
710c732859dc60851588862c31a1d5d9204b7e18
|
8773daa6983d12a9675aa1f22327b38f567efd12
|
/cos-python-sdk-v4.0.0.24/test.py
|
793651415433e506505ef3f3fc3b39ebe70c207a
|
[] |
no_license
|
StorageLab/cos-python-sdk-ut
|
31e4300557d307facf85028a523c2ecb4bb18fcf
|
7158813cdc57a07a54f3d889770a723d75d75832
|
refs/heads/master
| 2021-09-13T05:26:27.120856
| 2018-04-25T11:35:02
| 2018-04-25T11:35:02
| 119,129,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from qcloud_cos import CosClient
from qcloud_cos import UploadFileRequest
from qcloud_cos import UploadFileFromBufferRequest
from qcloud_cos import UpdateFileRequest
from qcloud_cos import UpdateFolderRequest
from qcloud_cos import DelFileRequest
from qcloud_cos import DelFolderRequest
from qcloud_cos import CreateFolderRequest
from qcloud_cos import StatFileRequest
from qcloud_cos import StatFolderRequest
from qcloud_cos import ListFolderRequest
from qcloud_cos import DownloadFileRequest
from qcloud_cos import DownloadObjectRequest
from qcloud_cos import MoveFileRequest
import logging
import sys
import os
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logger = logging.getLogger(__name__)
secret_id = os.environ["SECRET_ID"].decode('utf-8')
secret_key = os.environ["SECRET_KEY"].decode('utf-8')
appid = int(os.environ["APPID"])
region = os.environ["REGION"].decode('utf-8')
bucket = os.environ["BUCKET"].decode('utf-8')
cos_client = CosClient(appid, secret_id, secret_key, region)
def setUp():
print "start test..."
def tearDown():
print "function teardown"
def test_upload_file_default():
"""上传默认不覆盖"""
with open('local_file_1.txt', 'w') as f:
f.write("hello world1")
request = UploadFileRequest(bucket, u'/sample_file.txt', u'local_file_1.txt')
upload_file_ret = cos_client.upload_file(request)
assert upload_file_ret['code'] == 0
def test_upload_file_insert_only():
"""上传设置insert_only为0覆盖"""
with open('local_file_2.txt', 'w') as f:
f.write("hello world2")
request = UploadFileRequest(bucket, u'/sample_file.txt', u'local_file_2.txt')
request.set_insert_only(0) # 设置允许覆盖
upload_file_ret = cos_client.upload_file(request)
assert upload_file_ret['code'] == 0
def test_upload_file_from_buffer_insert_only():
"""从内存上传文件"""
data = "i am from buffer"
request = UploadFileFromBufferRequest(bucket, u'/sample_file.txt', data)
request.set_insert_only(0) # 设置允许覆盖
upload_file_ret = cos_client.upload_file_from_buffer(request)
assert upload_file_ret['code'] == 0
def test_uploda_file_verify_sha1():
"""上传文件验证sha1"""
request = UploadFileRequest(bucket, u'/sample_file.txt', u'local_file_2.txt')
request.set_insert_only(0) # 设置允许覆盖
request.set_verify_sha1(True)
upload_file_ret = cos_client.upload_file(request)
assert upload_file_ret['code'] == 0
def test_download_file_local():
"""下载文件到本地"""
request = DownloadFileRequest(bucket, u'/sample_file.txt', u'local_file_3.txt')
download_file_ret = cos_client.download_file(request)
assert download_file_ret['code'] == 0
def test_download_object():
"""下载文件到内存"""
request = DownloadObjectRequest(bucket, u'/sample_file.txt')
fp = cos_client.download_object(request)
data = fp.read()
assert data
def test_get_obj_attr():
"""获取文件属性"""
request = StatFileRequest(bucket, u'/sample_file.txt')
stat_file_ret = cos_client.stat_file(request)
assert stat_file_ret['code'] == 0
def test_update_obj_attr():
"""更新文件属性"""
request = UpdateFileRequest(bucket, u'/sample_file.txt')
request.set_biz_attr(u'this is demo') # 设置文件biz_attr属性
request.set_authority(u'eWRPrivate') # 设置文件的权限
request.set_cache_control(u'cache_xxx') # 设置Cache-Control
request.set_content_type(u'application/text') # 设置Content-Type
request.set_content_disposition(u'ccccxxx.txt') # 设置Content-Disposition
request.set_content_language(u'english') # 设置Content-Language
request.set_x_cos_meta(u'x-cos-meta-xxx', u'xxx') # 设置自定义的x-cos-meta-属性
request.set_x_cos_meta(u'x-cos-meta-yyy', u'yyy') # 设置自定义的x-cos-meta-属性
update_file_ret = cos_client.update_file(request)
assert update_file_ret['code'] == 0
def test_move_file():
"""移动文件"""
request = MoveFileRequest(bucket, u'/sample_file.txt', u'/sample_file_move.txt')
move_ret = cos_client.move_file(request)
assert move_ret['code'] == 0
def test_create_folder():
"""生成目录, 目录名为sample_folder"""
request = CreateFolderRequest(bucket, u'/sample_folder/')
create_folder_ret = cos_client.create_folder(request)
assert create_folder_ret['code'] == 0
def test_update_folder_biz_attr():
"""更新目录的biz_attr属性"""
request = UpdateFolderRequest(bucket, u'/sample_folder/', u'this is a test folder')
update_folder_ret = cos_client.update_folder(request)
assert update_folder_ret['code'] == 0
def test_get_folder_biz_attr():
"""获取目录的属性"""
request = StatFolderRequest(bucket, u'/sample_folder/')
stat_folder_ret = cos_client.stat_folder(request)
assert stat_folder_ret['code'] == 0
def test_list_folder():
"""list目录, 获取目录下的成员"""
request = ListFolderRequest(bucket, u'/sample_folder/')
list_folder_ret = cos_client.list_folder(request)
assert list_folder_ret['code'] == 0
def test_list_folder_use_delimiter():
"""list目录, 使用delimiter"""
request = ListFolderRequest(bucket, u'/sample_folder/')
request.set_prefix(u'test')
request.set_delimiter(u'/')
list_folder_ret = cos_client.list_folder(request)
assert list_folder_ret['code'] == 0
def test_delete_folder():
"""删除目录"""
request = DelFolderRequest(bucket, u'/sample_folder/')
delete_folder_ret = cos_client.del_folder(request)
assert delete_folder_ret['code'] == 0
def test_delete_file():
"""删除文件"""
request = DelFileRequest(bucket, u'/sample_file_move.txt')
del_ret = cos_client.del_file(request)
assert del_ret['code'] == 0
def test_upload_file_chinese():
"""上传中文文件"""
with open('local_file_1.txt', 'w') as f:
f.write("hello world1")
request = UploadFileRequest(bucket, u'/中文.txt', u'local_file_1.txt')
upload_file_ret = cos_client.upload_file(request)
assert upload_file_ret['code'] == 0
def test_download_file_chinese():
"""下载中文文件"""
request = DownloadFileRequest(bucket, u'/中文.txt', u'local_file_3.txt')
download_file_ret = cos_client.download_file(request)
assert download_file_ret['code'] == 0
if __name__ == '__main__':
test_upload_file_default()
pass
|
[
"dutie123@qq.com"
] |
dutie123@qq.com
|
a39c9a964d0c9bf4f46583a60e85ab9ac89b78a8
|
a8459d984202c84c95348966dc01e75a09dad9f3
|
/tfplus/data/synset.py
|
8cef9c9992cf422ac92decba821d951d56405d13
|
[
"MIT"
] |
permissive
|
ziyu-zhang/tfplus
|
9a538f4dd5dd0f074efb3ad8b44703fbbec74b7a
|
0629b955c2f461b5ad75cfde81547acccb476e45
|
refs/heads/master
| 2020-12-14T18:44:51.229344
| 2016-08-03T17:22:42
| 2016-08-03T17:22:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,076
|
py
|
synset = [
"n01440764 tench, Tinca tinca",
"n01443537 goldfish, Carassius auratus",
"n01484850 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias",
"n01491361 tiger shark, Galeocerdo cuvieri",
"n01494475 hammerhead, hammerhead shark",
"n01496331 electric ray, crampfish, numbfish, torpedo",
"n01498041 stingray",
"n01514668 cock",
"n01514859 hen",
"n01518878 ostrich, Struthio camelus",
"n01530575 brambling, Fringilla montifringilla",
"n01531178 goldfinch, Carduelis carduelis",
"n01532829 house finch, linnet, Carpodacus mexicanus",
"n01534433 junco, snowbird",
"n01537544 indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"n01558993 robin, American robin, Turdus migratorius",
"n01560419 bulbul",
"n01580077 jay",
"n01582220 magpie",
"n01592084 chickadee",
"n01601694 water ouzel, dipper",
"n01608432 kite",
"n01614925 bald eagle, American eagle, Haliaeetus leucocephalus",
"n01616318 vulture",
"n01622779 great grey owl, great gray owl, Strix nebulosa",
"n01629819 European fire salamander, Salamandra salamandra",
"n01630670 common newt, Triturus vulgaris",
"n01631663 eft",
"n01632458 spotted salamander, Ambystoma maculatum",
"n01632777 axolotl, mud puppy, Ambystoma mexicanum",
"n01641577 bullfrog, Rana catesbeiana",
"n01644373 tree frog, tree-frog",
"n01644900 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui",
"n01664065 loggerhead, loggerhead turtle, Caretta caretta",
"n01665541 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea",
"n01667114 mud turtle",
"n01667778 terrapin",
"n01669191 box turtle, box tortoise",
"n01675722 banded gecko",
"n01677366 common iguana, iguana, Iguana iguana",
"n01682714 American chameleon, anole, Anolis carolinensis",
"n01685808 whiptail, whiptail lizard",
"n01687978 agama",
"n01688243 frilled lizard, Chlamydosaurus kingi",
"n01689811 alligator lizard",
"n01692333 Gila monster, Heloderma suspectum",
"n01693334 green lizard, Lacerta viridis",
"n01694178 African chameleon, Chamaeleo chamaeleon",
"n01695060 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis",
"n01697457 African crocodile, Nile crocodile, Crocodylus niloticus",
"n01698640 American alligator, Alligator mississipiensis",
"n01704323 triceratops",
"n01728572 thunder snake, worm snake, Carphophis amoenus",
"n01728920 ringneck snake, ring-necked snake, ring snake",
"n01729322 hognose snake, puff adder, sand viper",
"n01729977 green snake, grass snake",
"n01734418 king snake, kingsnake",
"n01735189 garter snake, grass snake",
"n01737021 water snake",
"n01739381 vine snake",
"n01740131 night snake, Hypsiglena torquata",
"n01742172 boa constrictor, Constrictor constrictor",
"n01744401 rock python, rock snake, Python sebae",
"n01748264 Indian cobra, Naja naja",
"n01749939 green mamba",
"n01751748 sea snake",
"n01753488 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus",
"n01755581 diamondback, diamondback rattlesnake, Crotalus adamanteus",
"n01756291 sidewinder, horned rattlesnake, Crotalus cerastes",
"n01768244 trilobite",
"n01770081 harvestman, daddy longlegs, Phalangium opilio",
"n01770393 scorpion",
"n01773157 black and gold garden spider, Argiope aurantia",
"n01773549 barn spider, Araneus cavaticus",
"n01773797 garden spider, Aranea diademata",
"n01774384 black widow, Latrodectus mactans",
"n01774750 tarantula",
"n01775062 wolf spider, hunting spider",
"n01776313 tick",
"n01784675 centipede",
"n01795545 black grouse",
"n01796340 ptarmigan",
"n01797886 ruffed grouse, partridge, Bonasa umbellus",
"n01798484 prairie chicken, prairie grouse, prairie fowl",
"n01806143 peacock",
"n01806567 quail",
"n01807496 partridge",
"n01817953 African grey, African gray, Psittacus erithacus",
"n01818515 macaw",
"n01819313 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita",
"n01820546 lorikeet",
"n01824575 coucal",
"n01828970 bee eater",
"n01829413 hornbill",
"n01833805 hummingbird",
"n01843065 jacamar",
"n01843383 toucan",
"n01847000 drake",
"n01855032 red-breasted merganser, Mergus serrator",
"n01855672 goose",
"n01860187 black swan, Cygnus atratus",
"n01871265 tusker",
"n01872401 echidna, spiny anteater, anteater",
"n01873310 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus",
"n01877812 wallaby, brush kangaroo",
"n01882714 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus",
"n01883070 wombat",
"n01910747 jellyfish",
"n01914609 sea anemone, anemone",
"n01917289 brain coral",
"n01924916 flatworm, platyhelminth",
"n01930112 nematode, nematode worm, roundworm",
"n01943899 conch",
"n01944390 snail",
"n01945685 slug",
"n01950731 sea slug, nudibranch",
"n01955084 chiton, coat-of-mail shell, sea cradle, polyplacophore",
"n01968897 chambered nautilus, pearly nautilus, nautilus",
"n01978287 Dungeness crab, Cancer magister",
"n01978455 rock crab, Cancer irroratus",
"n01980166 fiddler crab",
"n01981276 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica",
"n01983481 American lobster, Northern lobster, Maine lobster, Homarus americanus",
"n01984695 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish",
"n01985128 crayfish, crawfish, crawdad, crawdaddy",
"n01986214 hermit crab",
"n01990800 isopod",
"n02002556 white stork, Ciconia ciconia",
"n02002724 black stork, Ciconia nigra",
"n02006656 spoonbill",
"n02007558 flamingo",
"n02009229 little blue heron, Egretta caerulea",
"n02009912 American egret, great white heron, Egretta albus",
"n02011460 bittern",
"n02012849 crane",
"n02013706 limpkin, Aramus pictus",
"n02017213 European gallinule, Porphyrio porphyrio",
"n02018207 American coot, marsh hen, mud hen, water hen, Fulica americana",
"n02018795 bustard",
"n02025239 ruddy turnstone, Arenaria interpres",
"n02027492 red-backed sandpiper, dunlin, Erolia alpina",
"n02028035 redshank, Tringa totanus",
"n02033041 dowitcher",
"n02037110 oystercatcher, oyster catcher",
"n02051845 pelican",
"n02056570 king penguin, Aptenodytes patagonica",
"n02058221 albatross, mollymawk",
"n02066245 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus",
"n02071294 killer whale, killer, orca, grampus, sea wolf, Orcinus orca",
"n02074367 dugong, Dugong dugon",
"n02077923 sea lion",
"n02085620 Chihuahua",
"n02085782 Japanese spaniel",
"n02085936 Maltese dog, Maltese terrier, Maltese",
"n02086079 Pekinese, Pekingese, Peke",
"n02086240 Shih-Tzu",
"n02086646 Blenheim spaniel",
"n02086910 papillon",
"n02087046 toy terrier",
"n02087394 Rhodesian ridgeback",
"n02088094 Afghan hound, Afghan",
"n02088238 basset, basset hound",
"n02088364 beagle",
"n02088466 bloodhound, sleuthhound",
"n02088632 bluetick",
"n02089078 black-and-tan coonhound",
"n02089867 Walker hound, Walker foxhound",
"n02089973 English foxhound",
"n02090379 redbone",
"n02090622 borzoi, Russian wolfhound",
"n02090721 Irish wolfhound",
"n02091032 Italian greyhound",
"n02091134 whippet",
"n02091244 Ibizan hound, Ibizan Podenco",
"n02091467 Norwegian elkhound, elkhound",
"n02091635 otterhound, otter hound",
"n02091831 Saluki, gazelle hound",
"n02092002 Scottish deerhound, deerhound",
"n02092339 Weimaraner",
"n02093256 Staffordshire bullterrier, Staffordshire bull terrier",
"n02093428 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier",
"n02093647 Bedlington terrier",
"n02093754 Border terrier",
"n02093859 Kerry blue terrier",
"n02093991 Irish terrier",
"n02094114 Norfolk terrier",
"n02094258 Norwich terrier",
"n02094433 Yorkshire terrier",
"n02095314 wire-haired fox terrier",
"n02095570 Lakeland terrier",
"n02095889 Sealyham terrier, Sealyham",
"n02096051 Airedale, Airedale terrier",
"n02096177 cairn, cairn terrier",
"n02096294 Australian terrier",
"n02096437 Dandie Dinmont, Dandie Dinmont terrier",
"n02096585 Boston bull, Boston terrier",
"n02097047 miniature schnauzer",
"n02097130 giant schnauzer",
"n02097209 standard schnauzer",
"n02097298 Scotch terrier, Scottish terrier, Scottie",
"n02097474 Tibetan terrier, chrysanthemum dog",
"n02097658 silky terrier, Sydney silky",
"n02098105 soft-coated wheaten terrier",
"n02098286 West Highland white terrier",
"n02098413 Lhasa, Lhasa apso",
"n02099267 flat-coated retriever",
"n02099429 curly-coated retriever",
"n02099601 golden retriever",
"n02099712 Labrador retriever",
"n02099849 Chesapeake Bay retriever",
"n02100236 German short-haired pointer",
"n02100583 vizsla, Hungarian pointer",
"n02100735 English setter",
"n02100877 Irish setter, red setter",
"n02101006 Gordon setter",
"n02101388 Brittany spaniel",
"n02101556 clumber, clumber spaniel",
"n02102040 English springer, English springer spaniel",
"n02102177 Welsh springer spaniel",
"n02102318 cocker spaniel, English cocker spaniel, cocker",
"n02102480 Sussex spaniel",
"n02102973 Irish water spaniel",
"n02104029 kuvasz",
"n02104365 schipperke",
"n02105056 groenendael",
"n02105162 malinois",
"n02105251 briard",
"n02105412 kelpie",
"n02105505 komondor",
"n02105641 Old English sheepdog, bobtail",
"n02105855 Shetland sheepdog, Shetland sheep dog, Shetland",
"n02106030 collie",
"n02106166 Border collie",
"n02106382 Bouvier des Flandres, Bouviers des Flandres",
"n02106550 Rottweiler",
"n02106662 German shepherd, German shepherd dog, German police dog, alsatian",
"n02107142 Doberman, Doberman pinscher",
"n02107312 miniature pinscher",
"n02107574 Greater Swiss Mountain dog",
"n02107683 Bernese mountain dog",
"n02107908 Appenzeller",
"n02108000 EntleBucher",
"n02108089 boxer",
"n02108422 bull mastiff",
"n02108551 Tibetan mastiff",
"n02108915 French bulldog",
"n02109047 Great Dane",
"n02109525 Saint Bernard, St Bernard",
"n02109961 Eskimo dog, husky",
"n02110063 malamute, malemute, Alaskan malamute",
"n02110185 Siberian husky",
"n02110341 dalmatian, coach dog, carriage dog",
"n02110627 affenpinscher, monkey pinscher, monkey dog",
"n02110806 basenji",
"n02110958 pug, pug-dog",
"n02111129 Leonberg",
"n02111277 Newfoundland, Newfoundland dog",
"n02111500 Great Pyrenees",
"n02111889 Samoyed, Samoyede",
"n02112018 Pomeranian",
"n02112137 chow, chow chow",
"n02112350 keeshond",
"n02112706 Brabancon griffon",
"n02113023 Pembroke, Pembroke Welsh corgi",
"n02113186 Cardigan, Cardigan Welsh corgi",
"n02113624 toy poodle",
"n02113712 miniature poodle",
"n02113799 standard poodle",
"n02113978 Mexican hairless",
"n02114367 timber wolf, grey wolf, gray wolf, Canis lupus",
"n02114548 white wolf, Arctic wolf, Canis lupus tundrarum",
"n02114712 red wolf, maned wolf, Canis rufus, Canis niger",
"n02114855 coyote, prairie wolf, brush wolf, Canis latrans",
"n02115641 dingo, warrigal, warragal, Canis dingo",
"n02115913 dhole, Cuon alpinus",
"n02116738 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
"n02117135 hyena, hyaena",
"n02119022 red fox, Vulpes vulpes",
"n02119789 kit fox, Vulpes macrotis",
"n02120079 Arctic fox, white fox, Alopex lagopus",
"n02120505 grey fox, gray fox, Urocyon cinereoargenteus",
"n02123045 tabby, tabby cat",
"n02123159 tiger cat",
"n02123394 Persian cat",
"n02123597 Siamese cat, Siamese",
"n02124075 Egyptian cat",
"n02125311 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor",
"n02127052 lynx, catamount",
"n02128385 leopard, Panthera pardus",
"n02128757 snow leopard, ounce, Panthera uncia",
"n02128925 jaguar, panther, Panthera onca, Felis onca",
"n02129165 lion, king of beasts, Panthera leo",
"n02129604 tiger, Panthera tigris",
"n02130308 cheetah, chetah, Acinonyx jubatus",
"n02132136 brown bear, bruin, Ursus arctos",
"n02133161 American black bear, black bear, Ursus americanus, Euarctos americanus",
"n02134084 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus",
"n02134418 sloth bear, Melursus ursinus, Ursus ursinus",
"n02137549 mongoose",
"n02138441 meerkat, mierkat",
"n02165105 tiger beetle",
"n02165456 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle",
"n02167151 ground beetle, carabid beetle",
"n02168699 long-horned beetle, longicorn, longicorn beetle",
"n02169497 leaf beetle, chrysomelid",
"n02172182 dung beetle",
"n02174001 rhinoceros beetle",
"n02177972 weevil",
"n02190166 fly",
"n02206856 bee",
"n02219486 ant, emmet, pismire",
"n02226429 grasshopper, hopper",
"n02229544 cricket",
"n02231487 walking stick, walkingstick, stick insect",
"n02233338 cockroach, roach",
"n02236044 mantis, mantid",
"n02256656 cicada, cicala",
"n02259212 leafhopper",
"n02264363 lacewing, lacewing fly",
"n02268443 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
"n02268853 damselfly",
"n02276258 admiral",
"n02277742 ringlet, ringlet butterfly",
"n02279972 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus",
"n02280649 cabbage butterfly",
"n02281406 sulphur butterfly, sulfur butterfly",
"n02281787 lycaenid, lycaenid butterfly",
"n02317335 starfish, sea star",
"n02319095 sea urchin",
"n02321529 sea cucumber, holothurian",
"n02325366 wood rabbit, cottontail, cottontail rabbit",
"n02326432 hare",
"n02328150 Angora, Angora rabbit",
"n02342885 hamster",
"n02346627 porcupine, hedgehog",
"n02356798 fox squirrel, eastern fox squirrel, Sciurus niger",
"n02361337 marmot",
"n02363005 beaver",
"n02364673 guinea pig, Cavia cobaya",
"n02389026 sorrel",
"n02391049 zebra",
"n02395406 hog, pig, grunter, squealer, Sus scrofa",
"n02396427 wild boar, boar, Sus scrofa",
"n02397096 warthog",
"n02398521 hippopotamus, hippo, river horse, Hippopotamus amphibius",
"n02403003 ox",
"n02408429 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis",
"n02410509 bison",
"n02412080 ram, tup",
"n02415577 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis",
"n02417914 ibex, Capra ibex",
"n02422106 hartebeest",
"n02422699 impala, Aepyceros melampus",
"n02423022 gazelle",
"n02437312 Arabian camel, dromedary, Camelus dromedarius",
"n02437616 llama",
"n02441942 weasel",
"n02442845 mink",
"n02443114 polecat, fitch, foulmart, foumart, Mustela putorius",
"n02443484 black-footed ferret, ferret, Mustela nigripes",
"n02444819 otter",
"n02445715 skunk, polecat, wood pussy",
"n02447366 badger",
"n02454379 armadillo",
"n02457408 three-toed sloth, ai, Bradypus tridactylus",
"n02480495 orangutan, orang, orangutang, Pongo pygmaeus",
"n02480855 gorilla, Gorilla gorilla",
"n02481823 chimpanzee, chimp, Pan troglodytes",
"n02483362 gibbon, Hylobates lar",
"n02483708 siamang, Hylobates syndactylus, Symphalangus syndactylus",
"n02484975 guenon, guenon monkey",
"n02486261 patas, hussar monkey, Erythrocebus patas",
"n02486410 baboon",
"n02487347 macaque",
"n02488291 langur",
"n02488702 colobus, colobus monkey",
"n02489166 proboscis monkey, Nasalis larvatus",
"n02490219 marmoset",
"n02492035 capuchin, ringtail, Cebus capucinus",
"n02492660 howler monkey, howler",
"n02493509 titi, titi monkey",
"n02493793 spider monkey, Ateles geoffroyi",
"n02494079 squirrel monkey, Saimiri sciureus",
"n02497673 Madagascar cat, ring-tailed lemur, Lemur catta",
"n02500267 indri, indris, Indri indri, Indri brevicaudatus",
"n02504013 Indian elephant, Elephas maximus",
"n02504458 African elephant, Loxodonta africana",
"n02509815 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens",
"n02510455 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca",
"n02514041 barracouta, snoek",
"n02526121 eel",
"n02536864 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch",
"n02606052 rock beauty, Holocanthus tricolor",
"n02607072 anemone fish",
"n02640242 sturgeon",
"n02641379 gar, garfish, garpike, billfish, Lepisosteus osseus",
"n02643566 lionfish",
"n02655020 puffer, pufferfish, blowfish, globefish",
"n02666196 abacus",
"n02667093 abaya",
"n02669723 academic gown, academic robe, judge's robe",
"n02672831 accordion, piano accordion, squeeze box",
"n02676566 acoustic guitar",
"n02687172 aircraft carrier, carrier, flattop, attack aircraft carrier",
"n02690373 airliner",
"n02692877 airship, dirigible",
"n02699494 altar",
"n02701002 ambulance",
"n02704792 amphibian, amphibious vehicle",
"n02708093 analog clock",
"n02727426 apiary, bee house",
"n02730930 apron",
"n02747177 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
"n02749479 assault rifle, assault gun",
"n02769748 backpack, back pack, knapsack, packsack, rucksack, haversack",
"n02776631 bakery, bakeshop, bakehouse",
"n02777292 balance beam, beam",
"n02782093 balloon",
"n02783161 ballpoint, ballpoint pen, ballpen, Biro",
"n02786058 Band Aid",
"n02787622 banjo",
"n02788148 bannister, banister, balustrade, balusters, handrail",
"n02790996 barbell",
"n02791124 barber chair",
"n02791270 barbershop",
"n02793495 barn",
"n02794156 barometer",
"n02795169 barrel, cask",
"n02797295 barrow, garden cart, lawn cart, wheelbarrow",
"n02799071 baseball",
"n02802426 basketball",
"n02804414 bassinet",
"n02804610 bassoon",
"n02807133 bathing cap, swimming cap",
"n02808304 bath towel",
"n02808440 bathtub, bathing tub, bath, tub",
"n02814533 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon",
"n02814860 beacon, lighthouse, beacon light, pharos",
"n02815834 beaker",
"n02817516 bearskin, busby, shako",
"n02823428 beer bottle",
"n02823750 beer glass",
"n02825657 bell cote, bell cot",
"n02834397 bib",
"n02835271 bicycle-built-for-two, tandem bicycle, tandem",
"n02837789 bikini, two-piece",
"n02840245 binder, ring-binder",
"n02841315 binoculars, field glasses, opera glasses",
"n02843684 birdhouse",
"n02859443 boathouse",
"n02860847 bobsled, bobsleigh, bob",
"n02865351 bolo tie, bolo, bola tie, bola",
"n02869837 bonnet, poke bonnet",
"n02870880 bookcase",
"n02871525 bookshop, bookstore, bookstall",
"n02877765 bottlecap",
"n02879718 bow",
"n02883205 bow tie, bow-tie, bowtie",
"n02892201 brass, memorial tablet, plaque",
"n02892767 brassiere, bra, bandeau",
"n02894605 breakwater, groin, groyne, mole, bulwark, seawall, jetty",
"n02895154 breastplate, aegis, egis",
"n02906734 broom",
"n02909870 bucket, pail",
"n02910353 buckle",
"n02916936 bulletproof vest",
"n02917067 bullet train, bullet",
"n02927161 butcher shop, meat market",
"n02930766 cab, hack, taxi, taxicab",
"n02939185 caldron, cauldron",
"n02948072 candle, taper, wax light",
"n02950826 cannon",
"n02951358 canoe",
"n02951585 can opener, tin opener",
"n02963159 cardigan",
"n02965783 car mirror",
"n02966193 carousel, carrousel, merry-go-round, roundabout, whirligig",
"n02966687 carpenter's kit, tool kit",
"n02971356 carton",
"n02974003 car wheel",
"n02977058 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM",
"n02978881 cassette",
"n02979186 cassette player",
"n02980441 castle",
"n02981792 catamaran",
"n02988304 CD player",
"n02992211 cello, violoncello",
"n02992529 cellular telephone, cellular phone, cellphone, cell, mobile phone",
"n02999410 chain",
"n03000134 chainlink fence",
"n03000247 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour",
"n03000684 chain saw, chainsaw",
"n03014705 chest",
"n03016953 chiffonier, commode",
"n03017168 chime, bell, gong",
"n03018349 china cabinet, china closet",
"n03026506 Christmas stocking",
"n03028079 church, church building",
"n03032252 cinema, movie theater, movie theatre, movie house, picture palace",
"n03041632 cleaver, meat cleaver, chopper",
"n03042490 cliff dwelling",
"n03045698 cloak",
"n03047690 clog, geta, patten, sabot",
"n03062245 cocktail shaker",
"n03063599 coffee mug",
"n03063689 coffeepot",
"n03065424 coil, spiral, volute, whorl, helix",
"n03075370 combination lock",
"n03085013 computer keyboard, keypad",
"n03089624 confectionery, confectionary, candy store",
"n03095699 container ship, containership, container vessel",
"n03100240 convertible",
"n03109150 corkscrew, bottle screw",
"n03110669 cornet, horn, trumpet, trump",
"n03124043 cowboy boot",
"n03124170 cowboy hat, ten-gallon hat",
"n03125729 cradle",
"n03126707 crane",
"n03127747 crash helmet",
"n03127925 crate",
"n03131574 crib, cot",
"n03133878 Crock Pot",
"n03134739 croquet ball",
"n03141823 crutch",
"n03146219 cuirass",
"n03160309 dam, dike, dyke",
"n03179701 desk",
"n03180011 desktop computer",
"n03187595 dial telephone, dial phone",
"n03188531 diaper, nappy, napkin",
"n03196217 digital clock",
"n03197337 digital watch",
"n03201208 dining table, board",
"n03207743 dishrag, dishcloth",
"n03207941 dishwasher, dish washer, dishwashing machine",
"n03208938 disk brake, disc brake",
"n03216828 dock, dockage, docking facility",
"n03218198 dogsled, dog sled, dog sleigh",
"n03220513 dome",
"n03223299 doormat, welcome mat",
"n03240683 drilling platform, offshore rig",
"n03249569 drum, membranophone, tympan",
"n03250847 drumstick",
"n03255030 dumbbell",
"n03259280 Dutch oven",
"n03271574 electric fan, blower",
"n03272010 electric guitar",
"n03272562 electric locomotive",
"n03290653 entertainment center",
"n03291819 envelope",
"n03297495 espresso maker",
"n03314780 face powder",
"n03325584 feather boa, boa",
"n03337140 file, file cabinet, filing cabinet",
"n03344393 fireboat",
"n03345487 fire engine, fire truck",
"n03347037 fire screen, fireguard",
"n03355925 flagpole, flagstaff",
"n03372029 flute, transverse flute",
"n03376595 folding chair",
"n03379051 football helmet",
"n03384352 forklift",
"n03388043 fountain",
"n03388183 fountain pen",
"n03388549 four-poster",
"n03393912 freight car",
"n03394916 French horn, horn",
"n03400231 frying pan, frypan, skillet",
"n03404251 fur coat",
"n03417042 garbage truck, dustcart",
"n03424325 gasmask, respirator, gas helmet",
"n03425413 gas pump, gasoline pump, petrol pump, island dispenser",
"n03443371 goblet",
"n03444034 go-kart",
"n03445777 golf ball",
"n03445924 golfcart, golf cart",
"n03447447 gondola",
"n03447721 gong, tam-tam",
"n03450230 gown",
"n03452741 grand piano, grand",
"n03457902 greenhouse, nursery, glasshouse",
"n03459775 grille, radiator grille",
"n03461385 grocery store, grocery, food market, market",
"n03467068 guillotine",
"n03476684 hair slide",
"n03476991 hair spray",
"n03478589 half track",
"n03481172 hammer",
"n03482405 hamper",
"n03483316 hand blower, blow dryer, blow drier, hair dryer, hair drier",
"n03485407 hand-held computer, hand-held microcomputer",
"n03485794 handkerchief, hankie, hanky, hankey",
"n03492542 hard disc, hard disk, fixed disk",
"n03494278 harmonica, mouth organ, harp, mouth harp",
"n03495258 harp",
"n03496892 harvester, reaper",
"n03498962 hatchet",
"n03527444 holster",
"n03529860 home theater, home theatre",
"n03530642 honeycomb",
"n03532672 hook, claw",
"n03534580 hoopskirt, crinoline",
"n03535780 horizontal bar, high bar",
"n03538406 horse cart, horse-cart",
"n03544143 hourglass",
"n03584254 iPod",
"n03584829 iron, smoothing iron",
"n03590841 jack-o'-lantern",
"n03594734 jean, blue jean, denim",
"n03594945 jeep, landrover",
"n03595614 jersey, T-shirt, tee shirt",
"n03598930 jigsaw puzzle",
"n03599486 jinrikisha, ricksha, rickshaw",
"n03602883 joystick",
"n03617480 kimono",
"n03623198 knee pad",
"n03627232 knot",
"n03630383 lab coat, laboratory coat",
"n03633091 ladle",
"n03637318 lampshade, lamp shade",
"n03642806 laptop, laptop computer",
"n03649909 lawn mower, mower",
"n03657121 lens cap, lens cover",
"n03658185 letter opener, paper knife, paperknife",
"n03661043 library",
"n03662601 lifeboat",
"n03666591 lighter, light, igniter, ignitor",
"n03670208 limousine, limo",
"n03673027 liner, ocean liner",
"n03676483 lipstick, lip rouge",
"n03680355 Loafer",
"n03690938 lotion",
"n03691459 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
"n03692522 loupe, jeweler's loupe",
"n03697007 lumbermill, sawmill",
"n03706229 magnetic compass",
"n03709823 mailbag, postbag",
"n03710193 mailbox, letter box",
"n03710637 maillot",
"n03710721 maillot, tank suit",
"n03717622 manhole cover",
"n03720891 maraca",
"n03721384 marimba, xylophone",
"n03724870 mask",
"n03729826 matchstick",
"n03733131 maypole",
"n03733281 maze, labyrinth",
"n03733805 measuring cup",
"n03742115 medicine chest, medicine cabinet",
"n03743016 megalith, megalithic structure",
"n03759954 microphone, mike",
"n03761084 microwave, microwave oven",
"n03763968 military uniform",
"n03764736 milk can",
"n03769881 minibus",
"n03770439 miniskirt, mini",
"n03770679 minivan",
"n03773504 missile",
"n03775071 mitten",
"n03775546 mixing bowl",
"n03776460 mobile home, manufactured home",
"n03777568 Model T",
"n03777754 modem",
"n03781244 monastery",
"n03782006 monitor",
"n03785016 moped",
"n03786901 mortar",
"n03787032 mortarboard",
"n03788195 mosque",
"n03788365 mosquito net",
"n03791053 motor scooter, scooter",
"n03792782 mountain bike, all-terrain bike, off-roader",
"n03792972 mountain tent",
"n03793489 mouse, computer mouse",
"n03794056 mousetrap",
"n03796401 moving van",
"n03803284 muzzle",
"n03804744 nail",
"n03814639 neck brace",
"n03814906 necklace",
"n03825788 nipple",
"n03832673 notebook, notebook computer",
"n03837869 obelisk",
"n03838899 oboe, hautboy, hautbois",
"n03840681 ocarina, sweet potato",
"n03841143 odometer, hodometer, mileometer, milometer",
"n03843555 oil filter",
"n03854065 organ, pipe organ",
"n03857828 oscilloscope, scope, cathode-ray oscilloscope, CRO",
"n03866082 overskirt",
"n03868242 oxcart",
"n03868863 oxygen mask",
"n03871628 packet",
"n03873416 paddle, boat paddle",
"n03874293 paddlewheel, paddle wheel",
"n03874599 padlock",
"n03876231 paintbrush",
"n03877472 pajama, pyjama, pj's, jammies",
"n03877845 palace",
"n03884397 panpipe, pandean pipe, syrinx",
"n03887697 paper towel",
"n03888257 parachute, chute",
"n03888605 parallel bars, bars",
"n03891251 park bench",
"n03891332 parking meter",
"n03895866 passenger car, coach, carriage",
"n03899768 patio, terrace",
"n03902125 pay-phone, pay-station",
"n03903868 pedestal, plinth, footstall",
"n03908618 pencil box, pencil case",
"n03908714 pencil sharpener",
"n03916031 perfume, essence",
"n03920288 Petri dish",
"n03924679 photocopier",
"n03929660 pick, plectrum, plectron",
"n03929855 pickelhaube",
"n03930313 picket fence, paling",
"n03930630 pickup, pickup truck",
"n03933933 pier",
"n03935335 piggy bank, penny bank",
"n03937543 pill bottle",
"n03938244 pillow",
"n03942813 ping-pong ball",
"n03944341 pinwheel",
"n03947888 pirate, pirate ship",
"n03950228 pitcher, ewer",
"n03954731 plane, carpenter's plane, woodworking plane",
"n03956157 planetarium",
"n03958227 plastic bag",
"n03961711 plate rack",
"n03967562 plow, plough",
"n03970156 plunger, plumber's helper",
"n03976467 Polaroid camera, Polaroid Land camera",
"n03976657 pole",
"n03977966 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria",
"n03980874 poncho",
"n03982430 pool table, billiard table, snooker table",
"n03983396 pop bottle, soda bottle",
"n03991062 pot, flowerpot",
"n03992509 potter's wheel",
"n03995372 power drill",
"n03998194 prayer rug, prayer mat",
"n04004767 printer",
"n04005630 prison, prison house",
"n04008634 projectile, missile",
"n04009552 projector",
"n04019541 puck, hockey puck",
"n04023962 punching bag, punch bag, punching ball, punchball",
"n04026417 purse",
"n04033901 quill, quill pen",
"n04033995 quilt, comforter, comfort, puff",
"n04037443 racer, race car, racing car",
"n04039381 racket, racquet",
"n04040759 radiator",
"n04041544 radio, wireless",
"n04044716 radio telescope, radio reflector",
"n04049303 rain barrel",
"n04065272 recreational vehicle, RV, R.V.",
"n04067472 reel",
"n04069434 reflex camera",
"n04070727 refrigerator, icebox",
"n04074963 remote control, remote",
"n04081281 restaurant, eating house, eating place, eatery",
"n04086273 revolver, six-gun, six-shooter",
"n04090263 rifle",
"n04099969 rocking chair, rocker",
"n04111531 rotisserie",
"n04116512 rubber eraser, rubber, pencil eraser",
"n04118538 rugby ball",
"n04118776 rule, ruler",
"n04120489 running shoe",
"n04125021 safe",
"n04127249 safety pin",
"n04131690 saltshaker, salt shaker",
"n04133789 sandal",
"n04136333 sarong",
"n04141076 sax, saxophone",
"n04141327 scabbard",
"n04141975 scale, weighing machine",
"n04146614 school bus",
"n04147183 schooner",
"n04149813 scoreboard",
"n04152593 screen, CRT screen",
"n04153751 screw",
"n04154565 screwdriver",
"n04162706 seat belt, seatbelt",
"n04179913 sewing machine",
"n04192698 shield, buckler",
"n04200800 shoe shop, shoe-shop, shoe store",
"n04201297 shoji",
"n04204238 shopping basket",
"n04204347 shopping cart",
"n04208210 shovel",
"n04209133 shower cap",
"n04209239 shower curtain",
"n04228054 ski",
"n04229816 ski mask",
"n04235860 sleeping bag",
"n04238763 slide rule, slipstick",
"n04239074 sliding door",
"n04243546 slot, one-armed bandit",
"n04251144 snorkel",
"n04252077 snowmobile",
"n04252225 snowplow, snowplough",
"n04254120 soap dispenser",
"n04254680 soccer ball",
"n04254777 sock",
"n04258138 solar dish, solar collector, solar furnace",
"n04259630 sombrero",
"n04263257 soup bowl",
"n04264628 space bar",
"n04265275 space heater",
"n04266014 space shuttle",
"n04270147 spatula",
"n04273569 speedboat",
"n04275548 spider web, spider's web",
"n04277352 spindle",
"n04285008 sports car, sport car",
"n04286575 spotlight, spot",
"n04296562 stage",
"n04310018 steam locomotive",
"n04311004 steel arch bridge",
"n04311174 steel drum",
"n04317175 stethoscope",
"n04325704 stole",
"n04326547 stone wall",
"n04328186 stopwatch, stop watch",
"n04330267 stove",
"n04332243 strainer",
"n04335435 streetcar, tram, tramcar, trolley, trolley car",
"n04336792 stretcher",
"n04344873 studio couch, day bed",
"n04346328 stupa, tope",
"n04347754 submarine, pigboat, sub, U-boat",
"n04350905 suit, suit of clothes",
"n04355338 sundial",
"n04355933 sunglass",
"n04356056 sunglasses, dark glasses, shades",
"n04357314 sunscreen, sunblock, sun blocker",
"n04366367 suspension bridge",
"n04367480 swab, swob, mop",
"n04370456 sweatshirt",
"n04371430 swimming trunks, bathing trunks",
"n04371774 swing",
"n04372370 switch, electric switch, electrical switch",
"n04376876 syringe",
"n04380533 table lamp",
"n04389033 tank, army tank, armored combat vehicle, armoured combat vehicle",
"n04392985 tape player",
"n04398044 teapot",
"n04399382 teddy, teddy bear",
"n04404412 television, television system",
"n04409515 tennis ball",
"n04417672 thatch, thatched roof",
"n04418357 theater curtain, theatre curtain",
"n04423845 thimble",
"n04428191 thresher, thrasher, threshing machine",
"n04429376 throne",
"n04435653 tile roof",
"n04442312 toaster",
"n04443257 tobacco shop, tobacconist shop, tobacconist",
"n04447861 toilet seat",
"n04456115 torch",
"n04458633 totem pole",
"n04461696 tow truck, tow car, wrecker",
"n04462240 toyshop",
"n04465501 tractor",
"n04467665 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi",
"n04476259 tray",
"n04479046 trench coat",
"n04482393 tricycle, trike, velocipede",
"n04483307 trimaran",
"n04485082 tripod",
"n04486054 triumphal arch",
"n04487081 trolleybus, trolley coach, trackless trolley",
"n04487394 trombone",
"n04493381 tub, vat",
"n04501370 turnstile",
"n04505470 typewriter keyboard",
"n04507155 umbrella",
"n04509417 unicycle, monocycle",
"n04515003 upright, upright piano",
"n04517823 vacuum, vacuum cleaner",
"n04522168 vase",
"n04523525 vault",
"n04525038 velvet",
"n04525305 vending machine",
"n04532106 vestment",
"n04532670 viaduct",
"n04536866 violin, fiddle",
"n04540053 volleyball",
"n04542943 waffle iron",
"n04548280 wall clock",
"n04548362 wallet, billfold, notecase, pocketbook",
"n04550184 wardrobe, closet, press",
"n04552348 warplane, military plane",
"n04553703 washbasin, handbasin, washbowl, lavabo, wash-hand basin",
"n04554684 washer, automatic washer, washing machine",
"n04557648 water bottle",
"n04560804 water jug",
"n04562935 water tower",
"n04579145 whiskey jug",
"n04579432 whistle",
"n04584207 wig",
"n04589890 window screen",
"n04590129 window shade",
"n04591157 Windsor tie",
"n04591713 wine bottle",
"n04592741 wing",
"n04596742 wok",
"n04597913 wooden spoon",
"n04599235 wool, woolen, woollen",
"n04604644 worm fence, snake fence, snake-rail fence, Virginia fence",
"n04606251 wreck",
"n04612504 yawl",
"n04613696 yurt",
"n06359193 web site, website, internet site, site",
"n06596364 comic book",
"n06785654 crossword puzzle, crossword",
"n06794110 street sign",
"n06874185 traffic light, traffic signal, stoplight",
"n07248320 book jacket, dust cover, dust jacket, dust wrapper",
"n07565083 menu",
"n07579787 plate",
"n07583066 guacamole",
"n07584110 consomme",
"n07590611 hot pot, hotpot",
"n07613480 trifle",
"n07614500 ice cream, icecream",
"n07615774 ice lolly, lolly, lollipop, popsicle",
"n07684084 French loaf",
"n07693725 bagel, beigel",
"n07695742 pretzel",
"n07697313 cheeseburger",
"n07697537 hotdog, hot dog, red hot",
"n07711569 mashed potato",
"n07714571 head cabbage",
"n07714990 broccoli",
"n07715103 cauliflower",
"n07716358 zucchini, courgette",
"n07716906 spaghetti squash",
"n07717410 acorn squash",
"n07717556 butternut squash",
"n07718472 cucumber, cuke",
"n07718747 artichoke, globe artichoke",
"n07720875 bell pepper",
"n07730033 cardoon",
"n07734744 mushroom",
"n07742313 Granny Smith",
"n07745940 strawberry",
"n07747607 orange",
"n07749582 lemon",
"n07753113 fig",
"n07753275 pineapple, ananas",
"n07753592 banana",
"n07754684 jackfruit, jak, jack",
"n07760859 custard apple",
"n07768694 pomegranate",
"n07802026 hay",
"n07831146 carbonara",
"n07836838 chocolate sauce, chocolate syrup",
"n07860988 dough",
"n07871810 meat loaf, meatloaf",
"n07873807 pizza, pizza pie",
"n07875152 potpie",
"n07880968 burrito",
"n07892512 red wine",
"n07920052 espresso",
"n07930864 cup",
"n07932039 eggnog",
"n09193705 alp",
"n09229709 bubble",
"n09246464 cliff, drop, drop-off",
"n09256479 coral reef",
"n09288635 geyser",
"n09332890 lakeside, lakeshore",
"n09399592 promontory, headland, head, foreland",
"n09421951 sandbar, sand bar",
"n09428293 seashore, coast, seacoast, sea-coast",
"n09468604 valley, vale",
"n09472597 volcano",
"n09835506 ballplayer, baseball player",
"n10148035 groom, bridegroom",
"n10565667 scuba diver",
"n11879895 rapeseed",
"n11939491 daisy",
"n12057211 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
"n12144580 corn",
"n12267677 acorn",
"n12620546 hip, rose hip, rosehip",
"n12768682 buckeye, horse chestnut, conker",
"n12985857 coral fungus",
"n12998815 agaric",
"n13037406 gyromitra",
"n13040303 stinkhorn, carrion fungus",
"n13044778 earthstar",
"n13052670 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa",
"n13054560 bolete",
"n13133613 ear, spike, capitulum",
"n15075141 toilet tissue, toilet paper, bathroom tissue",
]
synset_map = {}
synset_list = {}
for i, l in enumerate(synset):
label, desc = l.split(' ', 1)
synset_map[label] = {"index": i, "desc": desc, }
synset_list[i] = {"label": label, "desc": desc}
def get_index(label):
return synset_map[label]['index']
def get_desc(label):
return synset_map[label]['desc']
def get_label(index):
return synset_list[index]['desc']
|
[
"renmengye@gmail.com"
] |
renmengye@gmail.com
|
6ed44f67958f79a7134e38728c3e883ea0abf140
|
bb31c0062354bbb0df70692e904c949a00973503
|
/37_while_1.py
|
b462a9aa6885cd401da0ea0fee90a49dfd5977a2
|
[] |
no_license
|
millanmilu/Learn-Python-with-Milu-
|
c42df5aa7832fba75015b7af29d6009489e00ec5
|
3b4714b849dff0a0ef3cc91fd102840fbcf00e43
|
refs/heads/master
| 2022-04-26T03:29:38.990189
| 2020-04-28T13:17:28
| 2020-04-28T13:17:28
| 259,634,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
i = 0
while i <6:
print(i)
i+= 1
|
[
"noreply@github.com"
] |
millanmilu.noreply@github.com
|
0eba5c2872ed5bfd2a2e9f47d8ddbd71dee310a3
|
9af701670dcbfb4500fd2d49775fb7dee9bc4b11
|
/belajar-python/keyword_argument_list.py
|
886c7e6ec4e062bc93fc43a4fed40d0a79163c18
|
[] |
no_license
|
prisetio/Python
|
1e5223087754a13c3a852e045bd27f3ff7508433
|
9f1da560ec47bfb47587980d731ce35ab5c26845
|
refs/heads/master
| 2022-12-11T01:04:15.210763
| 2020-09-15T02:08:08
| 2020-09-15T02:08:08
| 292,768,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# belajar Keyword Argument List
def create_html(tag, text, **attributes):
html = f"<{tag}"
for key, value in attributes.items():
html = html + f" {key}='{value}'"
html = html + f">{text}</{tag}>"
return html
html = create_html("p", "Hello Python", style="paragraf")
print(html)
html = create_html("a", "Ini Link", href="www.google.com", style="link")
print(html)
html = create_html("div", "Ini Div", style="contoh")
print(html)
|
[
"noreply@github.com"
] |
prisetio.noreply@github.com
|
1d8a899277896a76ca310712e1c39d52a3f6f64f
|
ad2ad3e42ecd201e3004cc03518dbef245e57b5a
|
/1week.py
|
2f6ab88c3517e290dd484891baf972ae8bbe851a
|
[] |
no_license
|
choichiwo/Pstudy
|
5ed595c9e9b9dd0857b9216fb4f31eebb58574aa
|
d42df9a0e3773cd00c8262a3c6ef589a2234acf5
|
refs/heads/master
| 2023-06-27T00:56:28.355066
| 2021-07-10T07:14:50
| 2021-07-10T07:14:50
| 371,884,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
x="Hello"
y="world"
print("Hello")
print(x)
# 변수형에는 정수형(3)int, 실수형(3.14)float, 문자열("Hello")str
print(type(x))
print(7//2) #정수나누기 몫
print(7%2) #나머지연산자 나머지
print(7/3)
print(x+y)
x=3
print(5*"hello") #이항연사자중 *만 정수와 문자열로만 가능
print("x =", x)
x = 13
y = 25
print("x =", x,",","y =", y,",", "sum =", x+y,",", "multi =" ,x*y,",", "rem =", x%y )
pi = 3.14159265
r = 5
print("원주율 =", pi)
print("반지름 =", r)
print("원의 둘레 =", 2*pi*r)
print("원의 넓이 =", pi*r*r )
#num = input("숫자를 입력하세요")
#num = int(num)
#print(num +3)
n = str(14)
print(type(n))
x = 12
print('x = {}'.format(x))
print('x = {},y={}'.format(x,12))
print('x = {},y={}'.format(x,24))
print('x = {},y={}'.format(x,12))
y = 5
print('x = {},y={}'.format(x,5))
print('x[{}]'.format(x))
print('x[{:5d}]'.format(x)) # decimal(십진수)
print('x[{}], x[{:5d}]'.format(x,x))
fmtstr = 'x[{}], x[{:5d}]'.format(x,x)
print(fmtstr)
print(type(fmtstr))
# :d (decimal:십진수,정수)
pstr = 'x[{}], x[{:5d}]'
fmtstr = pstr.format(x,x)
print(fmtstr)
print('z[{:5.2f}]'.format(3.14))
# m.nf m:소수점을 포함한 전체길이 n: 소수점이하 자리수 f:실수
x = "Hello"
print(x)
print(x.upper())
x = x.upper()
#upper: 대문자로 변환 lower: 소문자로 전환 <- mathod 메소드, 함수
print(x.lower())
#strip: 불필요 공백 삭제 l좌 r우
x = " Hello World "
print('['+x+']')
print(x.strip())
#is000 체킹 함수
print("TrainA10".isalnum()) #문자열이 알파벳 또는 숫자로 구성되는시 확인
x = "Hello world"
# find: 왼쪽부터 rfind: 오를쪽부터 모든 문자열은 배열.(인덱스번호를 갖는다.)
print(x.rfind('l'))
# in = contain(포함)
# split() 문자열 자르기 공백을 기준으로 자르기, (',') ,기준으로 자르기
# 불 연산자 비교연산자 == 같다 , != 다르다,
# >크다, <작다, <=작거나 같다 >=크거나 같다 b=true; b=false;
a=1
b=2
if(a==b):
print(a)
else:
print(b)
# 불 논리연산자 not(반대로), and(둘다)~이고~고~그리고, or(둘중하나)~거나~또는~혹은
# 문자열 사전에 나오는 순서 "a" < "z" true "a" < "aa" true "abyss" < "abroad" false
# "HelloWorld" < "Hello World" false "공백"ABCD...Zabcd....z 순서대로 (공백<대문자<소문자)
# if 논리비교: 조건문
# elif 또 다른 조건
# else 아니면
if a != 1:
print('true')
else:
print('false')
import datetime
now = datetime.datetime.now()
print("{}년 {}월 {}일".format(now.year,now.month,now.day))
if 5< now.month <9:
print("이번달은 {}월로 봄입니다".format(now.month))
x = input("x=")
y = input("y=")
x=int(x)
y=int(y)
5
if x < y:
print(x)
else:
print(y)
|
[
"ccu12345@naver.com"
] |
ccu12345@naver.com
|
a8ea2400ab3bd4077d9515a83ac1e63f8b1cf550
|
f1488619d32e72360deb25c2ae709fdb299e3269
|
/ogbg-code/tg/dataloader.py
|
a613503fcecd948894477323bd23d54f37560500
|
[
"MIT"
] |
permissive
|
animeshbchowdhury/DAGNN
|
5cf0794d56ff4d989da36e4deab87e9536ddcad3
|
02062bd2b24c6a23ef1fa8093d082df72ece98cd
|
refs/heads/main
| 2023-04-29T10:47:36.050410
| 2021-05-07T17:41:11
| 2021-05-07T17:41:11
| 365,308,808
| 0
| 0
|
MIT
| 2021-05-07T17:36:10
| 2021-05-07T17:36:10
| null |
UTF-8
|
Python
| false
| false
| 5,335
|
py
|
import torch.utils.data
from torch.utils.data.dataloader import default_collate
from torch_geometric.data import Data, Batch
from torch._six import container_abcs, string_classes, int_classes
# from src.tg.batch import Batch
class Collater(object):
def __init__(self, follow_batch, ndevices):
self.follow_batch = follow_batch
self.ndevices = ndevices
def collate(self, batch):
elem = batch[0]
if isinstance(elem, Data):
data = batch
count = torch.tensor([data.num_nodes for data in data])
cumsum = count.cumsum(0)
cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0)
device_id = self.ndevices * cumsum.to(torch.float) / cumsum[-1].item()
device_id = (device_id[:-1] + device_id[1:]) / 2.0
device_id = device_id.to(torch.long) # round.
split = device_id.bincount().cumsum(
0) # Count the frequency of each value in an array of non-negative ints.
split = torch.cat([split.new_zeros(1), split], dim=0)
split = torch.unique(split, sorted=True)
split = split.tolist()
graphs = []
for i in range(len(split) - 1):
data1 = data[split[i]:split[i + 1]]
graph = Batch.from_data_list(data1, self.follow_batch)
graphs += [graph]
return graphs #Batch.from_data_list(batch, self.follow_batch)
elif isinstance(elem, torch.Tensor):
return default_collate(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: self.collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(self.collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [self.collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(type(elem)))
def __call__(self, batch):
return self.collate(batch)
class DataLoader(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch. (default: :obj:`False`)
follow_batch (list or tuple, optional): Creates assignment batch
vectors for each key in the list. (default: :obj:`[]`)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, follow_batch=[], n_devices=1,
**kwargs):
super(DataLoader,
self).__init__(dataset, batch_size, shuffle,
collate_fn=Collater(follow_batch, n_devices), **kwargs)
class DataListLoader(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a python list.
.. note::
This data loader should be used for multi-gpu support via
:class:`torch_geometric.nn.DataParallel`.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`False`)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):
super(DataListLoader, self).__init__(
dataset, batch_size, shuffle,
collate_fn=lambda data_list: data_list, **kwargs)
class DenseCollater(object):
def collate(self, data_list):
batch = Batch()
for key in data_list[0].keys:
batch[key] = default_collate([d[key] for d in data_list])
return batch
def __call__(self, batch):
return self.collate(batch)
class DenseDataLoader(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a mini-batch.
.. note::
To make use of this data loader, all graphs in the dataset needs to
have the same shape for each its attributes.
Therefore, this data loader should only be used when working with
*dense* adjacency matrices.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`False`)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):
super(DenseDataLoader, self).__init__(
dataset, batch_size, shuffle, collate_fn=DenseCollater(), **kwargs)
|
[
"veronika.thost@ibm.com"
] |
veronika.thost@ibm.com
|
2ed52b5c3a91f498507c3864bdca88031dafc7d2
|
2c678444e3798a2b0d23a6f37690051a5217ffd0
|
/account/admin.py
|
2786157161025fa9930fc76920ac2f764b446566
|
[] |
no_license
|
rahimifh/online-store
|
e5b1768ede0aeea890c4e1a554b6e183ee1ee6e4
|
25b59724121072743f120a42eb62fccea911f034
|
refs/heads/master
| 2023-05-24T08:33:46.544565
| 2021-05-31T04:14:38
| 2021-05-31T04:14:38
| 369,862,898
| 0
| 0
| null | 2021-05-31T04:14:39
| 2021-05-22T17:02:32
|
CSS
|
UTF-8
|
Python
| false
| false
| 310
|
py
|
from django.contrib import admin
#****003
from .models import Profile,wholesale_pass
#****003
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'date_of_birth', 'photo']
@admin.register(wholesale_pass)
class Profilepass(admin.ModelAdmin):
list_display = ['wpass']
|
[
"rahimi.f.h@gmail.com"
] |
rahimi.f.h@gmail.com
|
794a350ce9733a4e7139b34e2046f03fd88ea508
|
068994631929aafd4a16e02844748a13da375258
|
/xos/cord/models.py
|
5a81109f5687dc09714ae4b96cb28de782a61478
|
[
"Apache-2.0"
] |
permissive
|
Intellifora/xos
|
a2e01e150f8dde37025e0dd22aebe472543bdcd5
|
192da22e69130873310c092c7a64e53a90f277a7
|
refs/heads/master
| 2021-01-21T16:21:56.699323
| 2015-07-09T02:01:56
| 2015-07-09T02:01:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,435
|
py
|
from django.db import models
from core.models import Service, PlCoreBase, Slice, Sliver, Tenant, Node, Image, User, Flavor, Subscriber
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
from django.forms.models import model_to_dict
from django.db.models import Q
from operator import itemgetter, attrgetter, methodcaller
import traceback
from xos.exceptions import *
"""
import os
import sys
sys.path.append("/opt/xos")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
import django
from core.models import *
from hpc.models import *
from cord.models import *
django.setup()
t = VOLTTenant()
t.caller = User.objects.all()[0]
t.save()
for v in VOLTTenant.get_tenant_objects().all():
v.caller = User.objects.all()[0]
v.delete()
for v in VCPETenant.get_tenant_objects().all():
v.caller = User.objects.all()[0]
v.delete()
for v in VOLTTenant.get_tenant_objects().all():
v.caller = User.objects.all()[0]
v.delete()
for v in VOLTTenant.get_tenant_objects().all():
if not v.creator:
v.creator= User.objects.all()[0]
v.save()
for v in VCPETenant.get_tenant_objects().all():
if not v.creator:
v.creator= User.objects.all()[0]
v.save()
"""
class ConfigurationError(Exception):
pass
VOLT_KIND = "vOLT"
VCPE_KIND = "vCPE"
VBNG_KIND = "vBNG"
CORD_SUBSCRIBER_KIND = "CordSubscriberRoot"
# -------------------------------------------
# CordSubscriberRoot
# -------------------------------------------
class CordSubscriberRoot(Subscriber):
class Meta:
proxy = True
KIND = CORD_SUBSCRIBER_KIND
default_attributes = {"firewall_enable": False,
"firewall_rules": "accept all anywhere anywhere",
"url_filter_enable": False,
"url_filter_rules": "allow all",
"url_filter_level": "PG",
"cdn_enable": False,
"users": [],
"is_demo_user": False }
sync_attributes = ("firewall_enable",
"firewall_rules",
"url_filter_enable",
"url_filter_rules",
"cdn_enable",)
def __init__(self, *args, **kwargs):
super(CordSubscriberRoot, self).__init__(*args, **kwargs)
self.cached_volt = None
self._initial_url_filter_enable = self.url_filter_enable
@property
def volt(self):
volt = self.get_newest_subscribed_tenant(VOLTTenant)
if not volt:
return None
# always return the same object when possible
if (self.cached_volt) and (self.cached_volt.id == volt.id):
return self.cached_volt
#volt.caller = self.creator
self.cached_volt = volt
return volt
@property
def firewall_enable(self):
return self.get_attribute("firewall_enable", self.default_attributes["firewall_enable"])
@firewall_enable.setter
def firewall_enable(self, value):
self.set_attribute("firewall_enable", value)
@property
def firewall_rules(self):
return self.get_attribute("firewall_rules", self.default_attributes["firewall_rules"])
@firewall_rules.setter
def firewall_rules(self, value):
self.set_attribute("firewall_rules", value)
@property
def url_filter_enable(self):
return self.get_attribute("url_filter_enable", self.default_attributes["url_filter_enable"])
@url_filter_enable.setter
def url_filter_enable(self, value):
self.set_attribute("url_filter_enable", value)
@property
def url_filter_level(self):
return self.get_attribute("url_filter_level", self.default_attributes["url_filter_level"])
@url_filter_level.setter
def url_filter_level(self, value):
self.set_attribute("url_filter_level", value)
@property
def url_filter_rules(self):
return self.get_attribute("url_filter_rules", self.default_attributes["url_filter_rules"])
@url_filter_rules.setter
def url_filter_rules(self, value):
self.set_attribute("url_filter_rules", value)
@property
def cdn_enable(self):
return self.get_attribute("cdn_enable", self.default_attributes["cdn_enable"])
@cdn_enable.setter
def cdn_enable(self, value):
self.set_attribute("cdn_enable", value)
@property
def users(self):
return self.get_attribute("users", self.default_attributes["users"])
@users.setter
def users(self, value):
self.set_attribute("users", value)
def find_user(self, uid):
uid = int(uid)
for user in self.users:
if user["id"] == uid:
return user
return None
def update_user(self, uid, **kwargs):
# kwargs may be "level" or "mac"
# Setting one of these to None will cause None to be stored in the db
uid = int(uid)
users = self.users
for user in users:
if user["id"] == uid:
for arg in kwargs.keys():
user[arg] = kwargs[arg]
self.users = users
return user
raise ValueError("User %d not found" % uid)
def create_user(self, **kwargs):
if "name" not in kwargs:
raise XOSMissingField("The name field is required")
for user in self.users:
if kwargs["name"] == user["name"]:
raise XOSDuplicateKey("User %s already exists" % kwargs["name"])
uids = [x["id"] for x in self.users]
if uids:
uid = max(uids)+1
else:
uid = 0
newuser = kwargs.copy()
newuser["id"] = uid
users = self.users
users.append(newuser)
self.users = users
return newuser
def delete_user(self, uid):
uid = int(uid)
users = self.users
for user in users:
if user["id"]==uid:
users.remove(user)
self.users = users
return
raise ValueError("Users %d not found" % uid)
@property
def services(self):
return {"cdn": self.cdn_enable,
"url_filter": self.url_filter_enable,
"firewall": self.firewall_enable}
@services.setter
def services(self, value):
pass
def save(self, *args, **kwargs):
super(CordSubscriberRoot, self).save(*args, **kwargs)
if (self.volt) and (self.volt.vcpe): # and (self._initial_url_filter_enabled != self.url_filter_enable):
# 1) trigger manage_bbs_account to run
# 2) trigger vcpe observer to wake up
self.volt.vcpe.save()
@property
def is_demo_user(self):
return self.get_attribute("is_demo_user", self.default_attributes["is_demo_user"])
@is_demo_user.setter
def is_demo_user(self, value):
self.set_attribute("is_demo_user", value)
# -------------------------------------------
# VOLT
# -------------------------------------------
class VOLTService(Service):
KIND = VOLT_KIND
class Meta:
app_label = "cord"
verbose_name = "vOLT Service"
proxy = True
class VOLTTenant(Tenant):
class Meta:
proxy = True
KIND = VOLT_KIND
default_attributes = {"vlan_id": None, }
def __init__(self, *args, **kwargs):
volt_services = VOLTService.get_service_objects().all()
if volt_services:
self._meta.get_field("provider_service").default = volt_services[0].id
super(VOLTTenant, self).__init__(*args, **kwargs)
self.cached_vcpe = None
@property
def vlan_id(self):
return self.get_attribute("vlan_id", self.default_attributes["vlan_id"])
@vlan_id.setter
def vlan_id(self, value):
self.set_attribute("vlan_id", value)
@property
def vcpe(self):
vcpe = self.get_newest_subscribed_tenant(VCPETenant)
if not vcpe:
return None
# always return the same object when possible
if (self.cached_vcpe) and (self.cached_vcpe.id == vcpe.id):
return self.cached_vcpe
vcpe.caller = self.creator
self.cached_vcpe = vcpe
return vcpe
@vcpe.setter
def vcpe(self, value):
raise XOSConfigurationError("vOLT.vCPE cannot be set this way -- create a new vCPE object and set its subscriber_tenant instead")
@property
def subscriber(self):
if not self.subscriber_root:
return None
subs = CordSubscriberRoot.objects.filter(id=self.subscriber_root.id)
if not subs:
return None
return subs[0]
@property
def creator(self):
if getattr(self, "cached_creator", None):
return self.cached_creator
creator_id=self.get_attribute("creator_id")
if not creator_id:
return None
users=User.objects.filter(id=creator_id)
if not users:
return None
user=users[0]
self.cached_creator = users[0]
return user
@creator.setter
def creator(self, value):
if value:
value = value.id
if (value != self.get_attribute("creator_id", None)):
self.cached_creator=None
self.set_attribute("creator_id", value)
def manage_vcpe(self):
# Each VOLT object owns exactly one VCPE object
if self.deleted:
return
if self.vcpe is None:
vcpeServices = VCPEService.get_service_objects().all()
if not vcpeServices:
raise XOSConfigurationError("No VCPE Services available")
vcpe = VCPETenant(provider_service = vcpeServices[0],
subscriber_tenant = self)
vcpe.caller = self.creator
vcpe.save()
def manage_subscriber(self):
if (self.subscriber_root is None):
# The vOLT is not connected to a Subscriber, so either find an
# existing subscriber with the same SSID, or autogenerate a new
# subscriber.
#
# TODO: This probably goes away when we rethink the ONOS-to-XOS
# vOLT API.
subs = CordSubscriberRoot.get_tenant_objects().filter(service_specific_id = self.service_specific_id)
if subs:
sub = subs[0]
else:
sub = CordSubscriberRoot(service_specific_id = self.service_specific_id,
name = "autogenerated-for-vOLT-%s" % self.id)
sub.save()
self.subscriber_root = sub
self.save()
def cleanup_vcpe(self):
if self.vcpe:
# print "XXX cleanup vcpe", self.vcpe
self.vcpe.delete()
def cleanup_orphans(self):
# ensure vOLT only has one vCPE
cur_vcpe = self.vcpe
for vcpe in list(self.get_subscribed_tenants(VCPETenant)):
if (not cur_vcpe) or (vcpe.id != cur_vcpe.id):
# print "XXX clean up orphaned vcpe", vcpe
vcpe.delete()
def save(self, *args, **kwargs):
self.validate_unique_service_specific_id()
if (self.subscriber_root is not None):
subs = self.subscriber_root.get_subscribed_tenants(VOLTTenant)
if (subs) and (self not in subs):
raise XOSDuplicateKey("Subscriber should only be linked to one vOLT")
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a vCPE since it creates a slice
raise XOSProgrammingError("VOLTTenant's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("VOLTTenant's self.creator was not set")
super(VOLTTenant, self).save(*args, **kwargs)
model_policy_volt(self.pk)
#self.manage_vcpe()
#self.manage_subscriber()
#self.cleanup_orphans()
def delete(self, *args, **kwargs):
self.cleanup_vcpe()
super(VOLTTenant, self).delete(*args, **kwargs)
def model_policy_volt(pk):
# TODO: this should be made in to a real model_policy
with transaction.atomic():
volt = VOLTTenant.objects.select_for_update().filter(pk=pk)
if not volt:
return
volt = volt[0]
volt.manage_vcpe()
volt.manage_subscriber()
volt.cleanup_orphans()
# -------------------------------------------
# VCPE
# -------------------------------------------
class VCPEService(Service):
KIND = VCPE_KIND
class Meta:
app_label = "cord"
verbose_name = "vCPE Service"
proxy = True
def allocate_bbs_account(self):
vcpes = VCPETenant.get_tenant_objects().all()
bbs_accounts = [vcpe.bbs_account for vcpe in vcpes]
# There's a bit of a race here; some other user could be trying to
# allocate a bbs_account at the same time we are.
for i in range(2,21):
account_name = "bbs%02d@onlab.us" % i
if (account_name not in bbs_accounts):
return account_name
raise XOSConfigurationError("We've run out of available broadbandshield accounts. Delete some vcpe and try again.")
class VCPETenant(Tenant):
class Meta:
proxy = True
KIND = VCPE_KIND
sync_attributes = ("nat_ip",
"lan_ip",
"wan_ip",
"private_ip",
"hpc_client_ip",
"wan_mac")
default_attributes = {"sliver_id": None,
"users": [],
"bbs_account": None,
"last_ansible_hash": None}
def __init__(self, *args, **kwargs):
super(VCPETenant, self).__init__(*args, **kwargs)
self.cached_vbng=None
self.cached_sliver=None
self.orig_sliver_id = self.get_initial_attribute("sliver_id")
@property
def image(self):
LOOK_FOR_IMAGES=["ubuntu-vcpe4", # ONOS demo machine -- preferred vcpe image
"Ubuntu 14.04 LTS", # portal
"Ubuntu-14.04-LTS", # ONOS demo machine
]
for image_name in LOOK_FOR_IMAGES:
images = Image.objects.filter(name = image_name)
if images:
return images[0]
raise XOSProgrammingError("No VPCE image (looked for %s)" % str(LOOK_FOR_IMAGES))
@property
def sliver(self):
if getattr(self, "cached_sliver", None):
return self.cached_sliver
sliver_id=self.get_attribute("sliver_id")
if not sliver_id:
return None
slivers=Sliver.objects.filter(id=sliver_id)
if not slivers:
return None
sliver=slivers[0]
sliver.caller = self.creator
self.cached_sliver = sliver
return sliver
@sliver.setter
def sliver(self, value):
if value:
value = value.id
if (value != self.get_attribute("sliver_id", None)):
self.cached_sliver=None
self.set_attribute("sliver_id", value)
@property
def creator(self):
if getattr(self, "cached_creator", None):
return self.cached_creator
creator_id=self.get_attribute("creator_id")
if not creator_id:
return None
users=User.objects.filter(id=creator_id)
if not users:
return None
user=users[0]
self.cached_creator = users[0]
return user
@creator.setter
def creator(self, value):
if value:
value = value.id
if (value != self.get_attribute("creator_id", None)):
self.cached_creator=None
self.set_attribute("creator_id", value)
@property
def vbng(self):
vbng = self.get_newest_subscribed_tenant(VBNGTenant)
if not vbng:
return None
# always return the same object when possible
if (self.cached_vbng) and (self.cached_vbng.id == vbng.id):
return self.cached_vbng
vbng.caller = self.creator
self.cached_vbng = vbng
return vbng
@vbng.setter
def vbng(self, value):
raise XOSConfigurationError("vCPE.vBNG cannot be set this way -- create a new vBNG object and set it's subscriber_tenant instead")
@property
def volt(self):
if not self.subscriber_tenant:
return None
volts = VOLTTenant.objects.filter(id=self.subscriber_tenant.id)
if not volts:
return None
return volts[0]
@property
def bbs_account(self):
return self.get_attribute("bbs_account", self.default_attributes["bbs_account"])
@bbs_account.setter
def bbs_account(self, value):
return self.set_attribute("bbs_account", value)
@property
def last_ansible_hash(self):
return self.get_attribute("last_ansible_hash", self.default_attributes["last_ansible_hash"])
@last_ansible_hash.setter
def last_ansible_hash(self, value):
return self.set_attribute("last_ansible_hash", value)
@property
def ssh_command(self):
if self.sliver:
return self.sliver.get_ssh_command()
else:
return "no-sliver"
@ssh_command.setter
def ssh_command(self, value):
pass
@property
def addresses(self):
if not self.sliver:
return {}
addresses = {}
for ns in self.sliver.networkslivers.all():
if "lan" in ns.network.name.lower():
addresses["lan"] = ns.ip
elif "wan" in ns.network.name.lower():
addresses["wan"] = ns.ip
elif "private" in ns.network.name.lower():
addresses["private"] = ns.ip
elif "nat" in ns.network.name.lower():
addresses["nat"] = ns.ip
elif "hpc_client" in ns.network.name.lower():
addresses["hpc_client"] = ns.ip
return addresses
@property
def nat_ip(self):
return self.addresses.get("nat",None)
@property
def lan_ip(self):
return self.addresses.get("lan",None)
@property
def wan_ip(self):
return self.addresses.get("wan",None)
@property
def wan_mac(self):
ip = self.wan_ip
if not ip:
return None
try:
(a,b,c,d) = ip.split('.')
wan_mac = "02:42:%2x:%2x:%2x:%2x" % (int(a), int(b), int(c), int(d))
except:
wan_mac = "Exception"
return wan_mac
@property
def private_ip(self):
return self.addresses.get("private",None)
@property
def hpc_client_ip(self):
return self.addresses.get("hpc_client",None)
@property
def is_synced(self):
return (self.enacted is not None) and (self.enacted >= self.updated)
@is_synced.setter
def is_synced(self, value):
pass
def pick_node(self):
nodes = list(Node.objects.all())
# TODO: logic to filter nodes by which nodes are up, and which
# nodes the slice can instantiate on.
nodes = sorted(nodes, key=lambda node: node.slivers.all().count())
return nodes[0]
def manage_sliver(self):
# Each VCPE object owns exactly one sliver.
if self.deleted:
return
if (self.sliver is not None) and (self.sliver.image != self.image):
self.sliver.delete()
self.sliver = None
if self.sliver is None:
if not self.provider_service.slices.count():
raise XOSConfigurationError("The VCPE service has no slices")
flavors = Flavor.objects.filter(name="m1.small")
if not flavors:
raise XOSConfigurationError("No m1.small flavor")
node =self.pick_node()
sliver = Sliver(slice = self.provider_service.slices.all()[0],
node = node,
image = self.image,
creator = self.creator,
deployment = node.site_deployment.deployment,
flavor = flavors[0])
sliver.save()
try:
self.sliver = sliver
super(VCPETenant, self).save()
except:
sliver.delete()
raise
def cleanup_sliver(self):
if self.sliver:
# print "XXX cleanup sliver", self.sliver
self.sliver.delete()
self.sliver = None
def manage_vbng(self):
# Each vCPE object owns exactly one vBNG object
if self.deleted:
return
if self.vbng is None:
vbngServices = VBNGService.get_service_objects().all()
if not vbngServices:
raise XOSConfigurationError("No VBNG Services available")
vbng = VBNGTenant(provider_service = vbngServices[0],
subscriber_tenant = self)
vbng.caller = self.creator
vbng.save()
def cleanup_vbng(self):
if self.vbng:
# print "XXX cleanup vnbg", self.vbng
self.vbng.delete()
def cleanup_orphans(self):
# ensure vCPE only has one vBNG
cur_vbng = self.vbng
for vbng in list(self.get_subscribed_tenants(VBNGTenant)):
if (not cur_vbng) or (vbng.id != cur_vbng.id):
# print "XXX clean up orphaned vbng", vbng
vbng.delete()
if self.orig_sliver_id and (self.orig_sliver_id != self.get_attribute("sliver_id")):
slivers=Sliver.objects.filter(id=self.orig_sliver_id)
if slivers:
# print "XXX clean up orphaned sliver", slivers[0]
slivers[0].delete()
def manage_bbs_account(self):
if self.deleted:
return
if self.volt and self.volt.subscriber and self.volt.subscriber.url_filter_enable:
if not self.bbs_account:
# make sure we use the proxied VCPEService object, not the generic Service object
vcpe_service = VCPEService.objects.get(id=self.provider_service.id)
self.bbs_account = vcpe_service.allocate_bbs_account()
super(VCPETenant, self).save()
else:
if self.bbs_account:
self.bbs_account = None
super(VCPETenant, self).save()
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a vCPE since it creates a slice
raise XOSProgrammingError("VCPETenant's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("VCPETenant's self.creator was not set")
super(VCPETenant, self).save(*args, **kwargs)
model_policy_vcpe(self.pk)
#self.manage_sliver()
#self.manage_vbng()
#self.manage_bbs_account()
#self.cleanup_orphans()
def delete(self, *args, **kwargs):
self.cleanup_vbng()
self.cleanup_sliver()
super(VCPETenant, self).delete(*args, **kwargs)
def model_policy_vcpe(pk):
# TODO: this should be made in to a real model_policy
with transaction.atomic():
vcpe = VCPETenant.objects.select_for_update().filter(pk=pk)
if not vcpe:
return
vcpe = vcpe[0]
vcpe.manage_sliver()
vcpe.manage_vbng()
vcpe.manage_bbs_account()
vcpe.cleanup_orphans()
#----------------------------------------------------------------------------
# vBNG
#----------------------------------------------------------------------------
class VBNGService(Service):
KIND = VBNG_KIND
class Meta:
app_label = "cord"
verbose_name = "vBNG Service"
proxy = True
class VBNGTenant(Tenant):
class Meta:
proxy = True
KIND = VBNG_KIND
default_attributes = {"routeable_subnet": "",
"mapped_ip": "",
"mapped_mac": "",
"mapped_hostname": ""}
@property
def routeable_subnet(self):
return self.get_attribute("routeable_subnet", self.default_attributes["routeable_subnet"])
@routeable_subnet.setter
def routeable_subnet(self, value):
self.set_attribute("routeable_subnet", value)
@property
def mapped_ip(self):
return self.get_attribute("mapped_ip", self.default_attributes["mapped_ip"])
@mapped_ip.setter
def mapped_ip(self, value):
self.set_attribute("mapped_ip", value)
@property
def mapped_mac(self):
return self.get_attribute("mapped_mac", self.default_attributes["mapped_mac"])
@mapped_mac.setter
def mapped_mac(self, value):
self.set_attribute("mapped_mac", value)
@property
def mapped_hostname(self):
return self.get_attribute("mapped_hostname", self.default_attributes["mapped_hostname"])
@mapped_hostname.setter
def mapped_hostname(self, value):
self.set_attribute("mapped_hostname", value)
|
[
"smbaker@gmail.com"
] |
smbaker@gmail.com
|
4186546486ee703fdb689e95487f89dd8bc981fd
|
b8f77ec8e4c6da4a2117140a3eeac0201ab08678
|
/script_Bonprix.py
|
30f164b32a0268527a813dfdced614b94fefdccf
|
[
"MIT"
] |
permissive
|
CyrilShch/SeleniumBase
|
b07571172c173278eaf49b88ee24dd0eac779936
|
269e367d2b7bcd3741840241c5da184d7a3b2bc7
|
refs/heads/master
| 2023-06-11T20:42:13.325171
| 2021-07-04T12:56:30
| 2021-07-04T12:56:30
| 381,798,302
| 0
| 0
|
MIT
| 2021-06-30T18:29:27
| 2021-06-30T18:29:27
| null |
UTF-8
|
Python
| false
| false
| 12,379
|
py
|
# imports
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import pandas as pd
import numpy as np
import time
import re
from tqdm import tqdm
import argparse
import warnings
from user_agents import parse
warnings.simplefilter("ignore")
# SCRIPT USAGE:
### without user-agent:
# python Personalization/script_Bonprix.py
# --exp_name BO_first_exp1
# --items_list tafellaken rok muts rugzak vloerkleed sweatshirt horloge ladekast overhemd jurk sneakers kussen colbert bank badjas badpak pyjama jas spiegel
# --web_page https://www.bonprix.nl/
# --exec_path Personalization/geckodriver.exe
### with user-agent:
# python Personalization/script_Bonprix.py
# --exp_name BO_second_exp2
# --items_list tafellaken rok muts rugzak vloerkleed sweatshirt horloge ladekast overhemd jurk sneakers kussen colbert bank badjas badpak pyjama jas spiegel
# --web_page https://www.bonprix.nl/
# --exec_path Personalization/geckodriver.exe
# --ua_string "Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30"
# LIST OF UA STRING:
### iPhone's user agent string
# ua_string = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3'
### Samsung Galaxy S3
# ua_string = 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
### non touch Blackberry device
# ua_string = 'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba'
### iPad's user agent string
# ua_string = 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10'
### Kindle Fire's user agent string
# ua_string = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us; Silk/1.1.0-80) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16 Silk-Accelerated=true'
### Touch capable Windows 8 device
# ua_string = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; Touch)'
def get_parser():
# parse parameters
parser = argparse.ArgumentParser(description='Scrape Lidl website')
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--items_list", nargs='+', default="", help="List of products to search")
parser.add_argument("--web_page", type=str, default="", help="Website url")
parser.add_argument("--exec_path", type=str, default="", help="Path to execute the webdriver")
parser.add_argument("--ua_string", type=str, default="", help="User agent string to specify to identify/detect devices and browsers")
parser.add_argument("--proxy", type=str, default="", help="Proxy to mimic IP Address Geolocation")
return parser
def iteration_mobile(driver, item, delays, collected_data):
# banner button BonPrix click to update the search bar
banner_button = driver.find_element_by_class_name('logo')
# randomly choose a delay and freeze the execution to mimic a person usage
delay = np.random.choice(delays)
time.sleep(delay)
banner_button.click() # press ENTER
# press the button to close an advertisement
try:
banner = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.ID, "dialogBox1042")))
banner_close_button = driver.find_element_by_class_name("closeButton")
delay = np.random.choice(delays)
time.sleep(delay)
banner_close_button.click() # press ENTER
except TimeoutException:
pass
delay = np.random.choice(delays)
time.sleep(delay)
# put a query in the search bar
search = driver.find_element_by_id("search")
search.click()
search = driver.find_element_by_id("search-input")
search.send_keys(item) # put it in the search field
search.send_keys(Keys.RETURN) # press ENTER
time.sleep(5)
timeout = 30
try:
main = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, "products")))
time.sleep(5)
articles = main.find_elements_by_class_name("product ") # get all products from the page
for article in tqdm(articles):
price_header = article.find_elements_by_class_name("price ") # get a price object
if len(price_header) == 0: # filter garbage from the scraped list of products
pass
else:
_price_header = price_header[0].text # get a price text
product_name = article.find_elements_by_class_name('details') # get a product name
# temporary dictionary of the product data
# print(product_name[0].text.split('\n')[0])
temp = {
'item': item,
'product': product_name[0].text.split('\n')[0],
'price': _price_header
}
collected_data.append(temp) # append the data
except TimeoutException:
# driver.quit()
print("driver has not found products on the webpage")
def iteration(driver, item, delays, collected_data):
# banner button BonPrix click to update the search bar
banner_button = driver.find_element_by_id('bonprix_logo')
# randomly choose a delay and freeze the execution to mimic a person usage
delay = np.random.choice(delays)
time.sleep(delay)
banner_button.click() # press ENTER
# press the button to close an advertisement
try:
banner = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.ID, "dialogBox1042")))
banner_close_button = driver.find_element_by_class_name("closeButton")
delay = np.random.choice(delays)
time.sleep(delay)
banner_close_button.click() # press ENTER
except TimeoutException:
pass
delay = np.random.choice(delays)
time.sleep(delay)
# put a query in the search bar
search = driver.find_element_by_id("header_live_search_field")
search.send_keys(item) # put it in the search field
search.send_keys(Keys.RETURN) # press ENTER
time.sleep(5)
timeout = 30
try:
main = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, "product-list-wrapper")))
time.sleep(5)
articles = main.find_elements_by_class_name("product-list-item") # get all products from the page
for article in tqdm(articles):
price_header = article.find_elements_by_class_name("product-price") # get a price object
if len(price_header) == 0: # filter garbage from the scraped list of products
pass
else:
_price_header = price_header[0].text # get a price text
product_name = article.find_elements_by_class_name('product-title') # get a product name
# temporary dictionary of the product data
temp = {
'item': item,
'product': product_name[0].text,
'price': _price_header
}
collected_data.append(temp) # append the data
except TimeoutException:
# driver.quit()
print("driver has not found products on the webpage")
def main(params):
# initialize a list of the possible delays to mimic user interaction with websites
delays = [1, 2, 3, 4, 5]
# initialize a list where we store all collected data
collected_data = []
# list of items to search
items_list = params.items_list
# initalize webdriver options
profile = webdriver.FirefoxProfile()
if params.ua_string != '':
# user agent string
ua_string = params.ua_string
# initialize user agent
user_agent = parse(ua_string)
print(f'Current user-agent: {user_agent}')
profile.set_preference("general.useragent.override", ua_string)
PROXY = params.proxy
if PROXY != '':
webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
"httpProxy": PROXY,
"ftpProxy": PROXY,
"sslProxy": PROXY,
"proxyType": "MANUAL",
}
# initialize a webdriver
driver = webdriver.Firefox(profile, executable_path=params.exec_path)
# get the url
driver.get(params.web_page)
# time to wait a response from the page
timeout = 30
# press the button to accept cookies
try:
cookies = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, "//button[contains(.,'" + "OK" + "')]")))
delay = np.random.choice(delays)
time.sleep(delay)
cookies.send_keys(Keys.RETURN) # press ENTER
except TimeoutException:
print("Didn't found the button accept cookies.")
pass
# press the button to accept cookie settings
try:
cookie_instellingen = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, "cookie-settings-button")))
delay = np.random.choice(delays)
time.sleep(delay)
cookie_instellingen.send_keys(Keys.RETURN) # press ENTER
except TimeoutException:
print("Didn't found the button accept cookie settings.")
pass
# initialize a list with failed items
skipped_items = []
mobile_users = ['Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3',
'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',
'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba']
# collect the data in a mobile version of the webpage
if params.ua_string in mobile_users:
for item in tqdm(items_list):
print("================")
print(item)
print("================")
print("\n")
try:
try:
try:
_ = iteration_mobile(driver, item, delays, collected_data)
except:
_ = iteration_mobile(driver, item, delays, collected_data)
except:
try:
_ = iteration_mobile(driver, item, delays, collected_data)
except:
_ = iteration_mobile(driver, item, delays, collected_data)
except:
print(f"{item} was skipped")
skipped_items.append(item)
pass
# PC version of website
else:
# collect the data
for item in tqdm(items_list):
print("================")
print(item)
print("================")
print("\n")
try:
try:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
print(f"{item} was skipped")
skipped_items.append(item)
pass
print("Writing csv file...")
df = pd.DataFrame(collected_data)
df.to_csv(f'{params.exp_name}.csv', index=False)
print("Writing finished.")
# # close the driver
# driver.quit()
if __name__ == '__main__':
parser = get_parser()
params, unknown = parser.parse_known_args()
# run the script
main(params)
|
[
"noreply@github.com"
] |
CyrilShch.noreply@github.com
|
7ae17b127984f7619dc186fbff81f71f10cf3c66
|
dac0aff3add7e53152f16f4c2900ac594b590b9f
|
/FunesDroid-master/AndroLeakPR.py
|
7a5f6c2ca11ee863bb2362e9df0aac0cda40780c
|
[] |
no_license
|
PorfirioTramontana/CodiceFiscale
|
7876e826411a3a23a4e76eb8cae3bab04e76c631
|
67fa1fabfc36f447fbc5b5ece5d3049d04f560d5
|
refs/heads/master
| 2022-05-03T15:27:05.887067
| 2022-03-17T17:52:19
| 2022-03-17T17:52:19
| 129,233,499
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,469
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import shutil
from subprocess import call
import time
sys.path.insert(0, 'Utils')
import AndroLeakUtil
# SINTASSI DEL COMANDO
# python AndroLeak.py [target device] [stimulus type] [stimulus number].
# target device: the device where to execute experiments on.
# stimulus type: type of stimulus (event lifecycle). It can be "doc" or 1, "bf" or 2 , "stai" or 3.
# stimulus number: number of event lifecycles to do.
# Warning. "InputAPKs\" folder must contain all the apk files to execute.
# EXAMPLE
# python AndroLeak.py emulator-5554 doc 4
help = 'python AndroLeak.py [target device] [stimulus type] [stimulus number].\n'
help += 'target device: the device where to execute experiments on.\n'
help += 'stimulus type: type of stimulus (event lifecycle). It can be \'doc\' or 1, \'bf\' or 2 , \'stai\' or 3.'
help += '\nstimulus number: number of event lifecycles to do.\n'
help += 'Warning. \'InputAPKs\\\' folder must contain all the apk files to execute.\n'
help += 'EXAMPLE: python AndroLeak.py emulator-5554 doc 4'
WAIT_TIME=1
LONG_WAIT_TIME=2
DEVICE="emulator-5554"
#Function that reboot the emulator.
def rebootEmulator():
#os.system("adb -s "+DEVICE+" -e reboot")
os.system("/Users/runner/Library/Android/sdk/emulator/emulator -avd "+DEVICE+" -wipe-data")
time.sleep(5)
waitDeviceHasBooted()
#Function that wait that the Emulator has booted.
def waitDeviceHasBooted():
maxiter=1000; count=0;
result=os.popen("/Users/runner/Library/Android/sdk/platform-tools/adb -s "+DEVICE+" shell getprop sys.boot_completed").read()
while("1" not in result):
print("/Users/runner/Library/Android/sdk/platform-tools/adb -s "+DEVICE+" shell getprop sys.boot_completed")
result=os.popen("/Users/runner/Library/Android/sdk/platform-tools/adb -s "+DEVICE+" shell getprop sys.boot_completed").read()
print("Waiting the Emulator")
time.sleep(2)
count+=1;
if(count==maxiter): #If maxites is reached probably the emulator is crashed.
print("ERROR: The emulator is offline.")
raise SystemExit(0);
#Aspetto, eventualmente, che il device finisca il boot
waitDeviceHasBooted()
os.system('echo Device Avviato e pronto!')
#Se la directory è vuota lancio un errore.
if os.listdir("InputAPKs") == []:
raise ValueError('InputAPKs\ is empty. You must put some apk files in InputAPKs\.')
#Recupero il numero di rotazioni e campionamenti dall'input
li = sys.argv
if(len(li)==2):
if(li[1]=='-h' or li[1]=='--help' or li[1]=='h' or li[1]=='help'):
print(help);
raise SystemExit(0);
else:
raise SyntaxError('You are using this command wrongly. Check the syntax (use the option help). ')
elif(len(li)==7):
DEVICE = li[1]
stimulus_type = li[2]
num_rotations = int(li[3])
WAIT_TIME = int(li[4])
APK = li[5]
num_apk = int(li[6])
sample_size = 1
REBOOT_TIME = 0
else:
raise SyntaxError(str(len(li))+' You are using this command wrongly. Check the syntax (use the option help). ')
# Input Validation.
if(sample_size>10000 or sample_size <=0):
raise SyntaxError('You are using this command wrongly. The Sample Size must be >0 and <10000. ')
if(num_rotations>10000 or num_rotations <=0):
raise SyntaxError('You are using this command wrongly. The Stimulus Numbers must be >0 and <10000. ')
if(sample_size>num_rotations):
raise SyntaxError('You are using this command wrongly. The Sample Size must be bigger than Stimulus Numbers. ')
# Avviso, su Windows, che il FileSystem non deve avere limiti sulla lunghezza dei nomi delle directory.
if os.name == 'nt':
print("---------------------------------------------------------")
print("WARNING. Be sure that NTFS has LongPathsEnabled.")
print("---------------------------------------------------------")
# Se la cartella InputAPKs/ è vuota lancio un errore.
if os.listdir("InputAPKs/")=="":
raise SyntaxError("InputAPKs/ folder is empty!")
#Se non esiste la directory InputAPK la creo.
if not os.path.exists("InputAPKs/"):
os.makedirs("InputAPKs")
#Se non esiste la directory Results la creo.
if not os.path.exists("Results/"):
os.makedirs("Results/")
print("---------------------------------------------------------")
print("WARNING. The experiments are going to be executed on "+DEVICE+".")
print("---------------------------------------------------------")
#Installa l'apk STAI sul dispositivo (utile per sollecitare l'evento STAI)
if(stimulus_type=="stai" or stimulus_type=="3"):
call(['adb','-s',DEVICE,'install','-g','-l','Utils/stai.apk'])
# Setting device's date and time MMDDhhmm[[CC]YY][.ss]
device_time = time.strftime("%d/%m/%Y "+"%I:%M:%S")
device_time_dformat = time.strftime("%m%d%H%M%Y.%S")
print("Setting Device's date and time to: "+device_time)
cmd = "/Users/runner/Library/Android/sdk/platform-tools/adb -s "+DEVICE+" shell \"su 0 toybox date "+device_time_dformat+"\""
os.system(cmd)
StartingExperimentsTime = time.strftime("%d/%m/%Y "+"%I:%M:%S")
print("Starting experiments: " + StartingExperimentsTime)
i = 0
count = 0
if(REBOOT_TIME>0 and count==REBOOT_TIME):
count=0
rebootEmulator()
print("----- Starting experiments on application " + APK + " -----")
time.sleep(WAIT_TIME)
apk_to_execute = "InputAPKs/"+APK
print(apk_to_execute)
cmd = "python TestExecutor.py "+DEVICE+" "+apk_to_execute+" "+stimulus_type+" "+str(num_rotations)+" "+str(WAIT_TIME)+" "
os.system(cmd)
i=i+1
count=count+1
AndroLeakUtil.printProgressBar(i,num_apk,length=30) # Displaying Progress Bar
waitDeviceHasBooted() # Useful in cases of Emulator crashes
EndingExperimentsTime = time.strftime("%d/%m/%Y "+"%I:%M:%S")
print("Ending experiments: " + EndingExperimentsTime)
#Riabilita accellerometro su dispositivo
os.system("/Users/runner/Library/Android/sdk/platform-tools/adb -s "+DEVICE+" shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1")
#Creo il file results.txt
#Se non esiste la directory di risultato la creo
if not os.path.exists("Results/"):
os.makedirs("Results")
file = open("Results/AndroLeak_results.txt","w")
file.write("--- RESULTS ---\n")
file.write("You can find results of each experiment into the various folders.\n")
file.write("START TIME: " + StartingExperimentsTime + "\n")
file.write("END TIME: " + EndingExperimentsTime + "\n")
file.close()
# Raffaele Sellitto. 09/12/2017.
|
[
"porfirio.tramontana@gmail.com"
] |
porfirio.tramontana@gmail.com
|
4e96e72e9b87a3d8603a2712197fd36e2f830b7f
|
0daf2422f8a6663b652d46e9e500be9419558ce7
|
/claim_frags.py
|
a3804d0e875069fc67f77d2d3a8bd4759a42e811
|
[
"Apache-2.0"
] |
permissive
|
aschamberlain/adfs-tools
|
6065d49a949fed4e645967fc2ac63c0bc0e19fa7
|
ab1e90903086d23f489c20d94241b2635f0bcbba
|
refs/heads/master
| 2022-12-28T15:16:47.226667
| 2020-10-01T11:48:18
| 2020-10-01T11:48:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
#! /usr/bin/python
from objects import Map
from utils import find_map
fd = open("Loader", "r")
fd.seek(0,2)
loader_len = fd.tell()
fd.close()
print "Loader is {0} bytes long.".format(loader_len)
if len(sys.argv) != 2:
print("Usage: claim_frags <device>")
exit(1)
fd = open(sys.argv[1], "r+b")
map_address, map_length = find_map(fd)
fd.seek(map_address)
fs_map = Map(fd.read(map_length))
loader_start = (fs_map.disc_record.idlen+1) * fs_map.disc_record.bpmb
bits_needed = loader_len / fs_map.disc_record.bpmb
start_bit = loader_start / fs_map.disc_record.bpmb
last_bit = start_bit + bits_needed
while start_bit * fs_map.disc_record.bpmb < loader_start:
start_bit += 1
while bits_needed * fs_map.disc_record.bpmb < loader_len:
bits_needed += 1
print "{0} map bits required for loader, from bit {1} to {2}.".format(bits_needed,start_bit,last_bit)
zone = 0
while True:
zone_start, zone_end = fs_map.zone_range(zone)
first_in_zone = zone_start
last_in_zone = zone_end
if zone_start < start_bit:
first_in_zone = start_bit
if last_bit < last_in_zone:
last_in_zone = last_bit
#note = ""
#if first_in_zone > zone_start:
# note = " ** {0} bits not used at start of zone".format(first_in_zone-zone_start)
#if last_in_zone < zone_end:
# note = " ** {0} bits not used at end of zone".format(zone_end-last_in_zone)
#print "Zone {0} - bits {1} to {2}{3}".format(zone,first_in_zone,last_in_zone,note)
#print zone_start
fs_map.allocate(zone, 3, first_in_zone-zone_start, last_in_zone-zone_start)
if zone_end > last_bit:
break
zone += 1
fd.seek(map_address)
fd.write(fs_map.data.tostring())
|
[
"cjohns@isotek.co.uk"
] |
cjohns@isotek.co.uk
|
85978ad9a31d7cda93360bf464bf792d3b7226bd
|
4f268ddcaf6e1b3bc7930b6fcfd5d2f1b5d0d492
|
/backend/dating/migrations/0001_initial.py
|
13b8666dc826be40e503ef6acf0a339c3b86731c
|
[] |
no_license
|
crowdbotics-apps/cuisine-culture-21918
|
00de77062779a583fc4ca1294ae7eec78a88d03f
|
c7366e19397a6f3f8ad3fc860a18be4f0b50bcd0
|
refs/heads/master
| 2023-02-08T02:57:00.029866
| 2020-12-14T22:43:54
| 2020-12-14T22:43:54
| 306,964,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
# Generated by Django 2.2.16 on 2020-10-24 20:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Inbox',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='UserPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userphoto_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Setting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maximum_distance', models.IntegerField()),
('gender', models.CharField(max_length=256)),
('age_range', models.IntegerField()),
('show_me_on_searches', models.BooleanField()),
('new_matches_notification', models.BooleanField()),
('message_notification', models.BooleanField()),
('message_likes_notification', models.BooleanField()),
('super_like_notification', models.BooleanField()),
('in_app_vibrations', models.BooleanField()),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='setting_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('school', models.TextField()),
('date_of_birth', models.DateField()),
('created', models.DateField(auto_now_add=True)),
('modified', models.DateField(auto_now=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='match_owner', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='match_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('super_liked', models.BooleanField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_owner', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dislike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislike_owner', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislike_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
eaf640b26e9f808e6e8a1f59b805d8fa3222252b
|
c3f8f9d6732427f3addbc11c820ac4f8d3243109
|
/server/routes/consumerRoute.py
|
8aab24898a2b1ca62e67a485229146f986152c8a
|
[] |
no_license
|
kirtfieldk/RamHacks
|
7588fab878387265c41d6a7e7871a63805c62306
|
17bfb5499f5445ef4d8b723afd0ac5510cdc2899
|
refs/heads/master
| 2023-05-11T07:59:13.602701
| 2019-10-24T17:08:45
| 2019-10-24T17:08:45
| 211,519,086
| 0
| 0
| null | 2023-05-01T19:38:09
| 2019-09-28T15:13:01
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
# TODO fetch all consumer
def get_consumer():
# TODO connect to SQL
# TODO check for errors
# TODO return list of consumers
# TODO Create middleware for quering
|
[
"keithryankirtfield@gmail.com"
] |
keithryankirtfield@gmail.com
|
ad9e1f55ee886837c2a1994ab3ce4d79b4ff88c4
|
aba4cb29a0861c1900b617da852a868004b17184
|
/app/filters.py
|
48488bf0b22022a1bb88b30582a65213b31d208b
|
[] |
no_license
|
userok17/flask-shop
|
bc58b5edf3a88e043f66e1135033448b2440506c
|
66f7caf84b24a7ba2ec0794210e69b37a1bd2a37
|
refs/heads/master
| 2020-04-01T21:40:13.326265
| 2018-10-18T18:42:31
| 2018-10-18T18:42:31
| 153,669,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
#!/usr/bin/env python3
import re
from jinja2 import evalcontextfilter, Markup, escape
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
def strftime_filter(value, formatDate):
'''
Фильтр формат даты
'''
return value.strftime(formatDate)
def format_currency(value):
return "{:,.2f}".format(value)
|
[
"admin@1rpi.ru"
] |
admin@1rpi.ru
|
3fac4557184a378f037a1f8a094f75ef73791d60
|
3728feabb1ff663d1c7f16e41b1f60df8c52affb
|
/gibdd_application/wsgi.py
|
3b7ea10d86d697a1fc3e3b2ed79818a3e36c2b85
|
[] |
no_license
|
MarinaChekulina002796/GIBDD
|
4ef63fef817251adb8b5c30997d71f35b449abac
|
b26e666b6fc6dd6434cc2bbdd1b868af86de4249
|
refs/heads/master
| 2021-04-26T22:17:44.984528
| 2018-05-15T06:48:57
| 2018-05-15T06:48:57
| 124,063,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for gibdd_application project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gibdd_application.settings")
application = get_wsgi_application()
|
[
"Marina_Chekulina"
] |
Marina_Chekulina
|
228f779cf45eb59c5458794d4fbc8c469920c1fb
|
e38b0288417f55640a14ffa28c1f76b8dead9df3
|
/5x5_gaussian/lib/datasets/loadDataset.py
|
36964dc6bc4074132c9f60e5d111bfe07349b8a6
|
[] |
no_license
|
B1ueber2y/NDiv
|
e706b9026134fad8bf4c6d50d4e2354c2079dfd1
|
0729eac3b9fdbf1bd586486c7685c0afcf484597
|
refs/heads/master
| 2023-07-20T00:52:25.851635
| 2019-07-25T20:51:53
| 2019-07-25T20:51:53
| 180,195,055
| 14
| 2
| null | 2023-07-06T21:35:25
| 2019-04-08T17:01:04
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from datasets.gaussianGridDataset import gaussianGridDataset
from datasets.gaussianRingDataset import gaussianRingDataset
def getDataset(dataset_config):
dataset_name = dataset_config['name']
if dataset_name == "gaussian_grid":
return gaussianGridDataset(dataset_config['n'], dataset_config['n_data'], dataset_config['sig'])
elif dataset_name == "gaussian_ring":
return gaussianRingDataset(dataset_config['n'], dataset_config['n_data'], dataset_config['sig'])
else:
raise ValueError("no such dataset called " + dataset_name)
|
[
"b1ueber2y@gmail.com"
] |
b1ueber2y@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.