blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abacc0bf4cee74f8e78ac4df2df3838d91209a33
|
44afb399a813cde601a4eafb484e15afdafc1069
|
/image-noise.py
|
aa557f7703d8f433215e7a7cb429ab27c3a1cbac
|
[] |
no_license
|
paalbra/image-noise
|
2b4f80fb84a10fe87c20e533e8400de3364ac0e9
|
203f2fd8e5b2e1ff97b57a7690e6218581c2f8df
|
refs/heads/master
| 2022-05-28T19:58:47.589662
| 2020-04-27T14:58:04
| 2020-04-27T14:58:04
| 259,361,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
import argparse
from PIL import Image
import numpy
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Produce a noisy image.")
parser.add_argument("output", help="Path to output image")
parser.add_argument("--width", type=int, default=1080, help="Image width in pixels")
parser.add_argument("--height", type=int, default=720, help="Image height in pixels")
args = parser.parse_args()
data = numpy.random.randint(0, 20, (args.height, args.width))
data += 50
img = Image.fromarray(numpy.uint8(data))
img.save(args.output)
|
[
"paalbra@gmail.com"
] |
paalbra@gmail.com
|
4c927ef002365aaa8bb8be42c42b73ecaf047dfd
|
99e514cc667236d970e970629b014325b571cfd2
|
/evaluation.py
|
ca9932249f027036c84ac343c9af59c5124d17a5
|
[] |
no_license
|
rmitbggroup/Liangjun-dasfaa-2017
|
69a3ae6dbe706a3e657db442b718798060d56959
|
19cf0d95a7b1a0a5232880715867c3af0b68288a
|
refs/heads/master
| 2020-03-29T08:04:09.255372
| 2018-09-21T01:51:50
| 2018-09-21T01:51:50
| 149,692,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
def readsummary(file,my=False):
summary = [[0]]
summary.remove([0])
f = open(file,"rb");
lines = f.readlines()
ct=0
summary.append([])
indexappend=1
if my:
indexappend=0
for l in lines:
numbers = l.split()
if l.split(',')[0].isdigit():
summary.append([])
if len(numbers) ==2 and numbers[0].isdigit():
if int(numbers[0])==ct+indexappend:
line_togo = int(numbers[1])
summary.append([])
else:
ct=ct+1
else: summary[ct].append(l)
return summary
def Coverage(l1,l2):
ct = 0.0
for i in l2:
if i in l1:
ct=ct+1.0
return (ct)/(len(l2)*1.0)
def dissimilarity(m1,m2):
k1 = m1.split()
k2 = m2.split()
ct = 0.0
for k in k1:
if k in k2:
ct= ct + 1.0
return 1.0 - ct / (len(k1)*1.0 + len(k2)*1.0 - ct)
def Diversity(filter,k):
l1 = []
div=0.0
num_div = 0
for s in summary:
if (len(s) < filter):
continue
l1 = s[0:k]
ct = 0.0
for i in l1:
di = 1.0
for j in l1:
if(i!=j):
d=dissimilarity(i,j)
if d < di:
di = d
ct = ct + di
div = div+ ct / (len(l1)*1.0)
num_div= num_div+1
return div / num_div
def Effectiveness(filter):
sumset = []
e = 0.0
ct_e = 0
for s in summary:
if(len(s)<filter):
continue
fulls = []
for ss in range(0,len(s)-1):
fulls = fulls+s[ss].split()
if(ss<k):
sumset=sumset+s[ss].split()
r = Coverage(sumset,fulls)
e=e+r
ct_e=ct_e+1.0
print e/ct_e*1.0
return
W = 10
global tau
tau = 5
T = []
def Efficiency(filter):
T = []
pre = 0
mx =0
for mt in summary:
if(len(mt)<2 or len(mt) < filter):
continue
if(len(mt)>mx):
mx = len(mt)
if mt[-1][0].isdigit()==False:
continue
dd = mt[-1].split(',')[1]
#print len(mt),(len(mt)*1.0+100.0/ta)*1.0/(filter*1.0)
T.append((int(dd)-pre))
pre = int(dd)
return T
# ssss = readsummary("centroid_res.txt")
# for s in range(0,len(ssss)):
# if len(ssss[s]) > 10 and len(ssss[s])<20:
# print s
def Efficiency_test():
for i in range(0,10):
n = i*10+10
tau = i*5+5
T = Efficiency(n)
ss = 0.0
ct = 0.0
for s in T:
ss= ss+s
ct = ct +1.0
if ct<0.1:
print i*10+5, 0.0;
continue
print ss/ct
def PickCases(no):
summary=readsummary("centroid_res.txt",my=False)
for i in range(0,len(summary)):
if i == no:
for item in summary[i]:
print item
summary=readsummary("eff_greedy_res.txt",my=True)
for i in range(0,len(summary)):
if i == no:
for item in summary[i]:
print item
summary=readsummary("lex_res.txt",my=False)
for i in range(0,len(summary)):
if i == no:
for item in summary[i]:
print item
PickCases(74)
|
[
"lycanlancelot@gmail.com"
] |
lycanlancelot@gmail.com
|
8b671cd00b665f2dc6ee967e7674832f91d0834c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_201/1486.py
|
398ac7880e41de436f54e88085b54b6d17c4cf50
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
import collections
import sys
import os
sys.setrecursionlimit(500000)
class ReadWrite:
def __init__(self, file_name=None, verbose=True):
self.verbose = verbose
if file_name is None:
self.in_file = sys.stdin
self.out_file = sys.stdout
else:
self.in_file = open(file_name)
self.out_file = open(os.path.splitext(file_name)[0] + '.out', 'w')
self.case_idx = 1
def msg(self, output, end='\n'):
sys.stderr.write(str(output) + end)
def read_line(self, *types, all=None):
words = self.in_file.readline().strip().split()
if all is not None:
return [all(w) for w in words]
if len(types) == 0:
return words
assert (len(words) == len(types))
if len(types) == 1:
return types[0](words[0])
return [t(w) for t, w in zip(types, words)]
def write_case(self, output, true="YES", false="NO", join='\n'):
pfx = "Case #%d:" % self.case_idx
self.case_idx += 1
if isinstance(output, list):
text = join.join([pfx] + output)
elif isinstance(output, bool):
text = pfx + ' ' + (true if output else false)
else:
text = pfx + ' ' + str(output)
self.out_file.write(text + '\n')
if self.verbose:
self.msg(text)
else:
self.msg(pfx)
def split_intervals(max_k, intervals):
new_i = collections.defaultdict(int)
new_mk = max_k
for k, v in intervals.items():
if k == 1:
new_mk += v
elif k == 2:
new_i[1] += v
new_mk += v
else:
right = k // 2
left = k - 1 - right
new_i[right] += v
new_i[left] += v
new_mk += 2 * v
return max_k, new_mk, new_i
def lr_interval(i):
right = i // 2
left = i - 1 - right
return (right, left)
def solve(N, K):
intervals = {N: 1}
min_k = 0
max_k = 1
while not (min_k <= K <= max_k):
min_k, max_k, intervals = split_intervals(max_k, intervals)
k = min_k
isizes = sorted(list(intervals.keys()))
isizes.reverse()
for i in isizes:
n = intervals[i]
if k + n >= K:
return lr_interval(i)
k += n
assert False
if __name__ == '__main__':
input_name = sys.argv[1] if len(sys.argv) > 1 else 'C-tiny-practice.in'
rw = ReadWrite(input_name)
T = rw.read_line(int)
for t in range(T):
N, K = rw.read_line(int, int)
rw.write_case("%d %d" % solve(N, K))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
a44d0519368984e18f08b83959161a479da50928
|
b1e6af8dffc4e88eb75a8061fd6dd903dc7ab262
|
/pokedex.py
|
d0d58b02819767636f755d8630e40cf2eb82dd21
|
[
"EFL-2.0"
] |
permissive
|
meew0/inumuta-modules
|
6c02afdf9c9c8870d9564f0cbff0e262ecea0193
|
0d0c7ee76ebb43503aa121c249c05007d5265a38
|
refs/heads/master
| 2023-08-22T02:50:42.213551
| 2014-11-09T20:35:12
| 2014-11-09T20:35:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,444
|
py
|
#coding:utf8
"""
pokedex.py - Get your Pokemon fix!
Copyright 2014 Max Gurela
Licensed under the Eiffel Forum License 2.
"""
from willie import web
from willie.module import commands, rule, thread, example
from util import timing
from bs4 import BeautifulSoup
import re
import urllib2
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/17.0' \
' Firefox/17.0'
@thread(True)
@commands('dex','pokedex',u'pok\u00e9dex')
@example('.pokedex Charmander', '8')
def pokedex(bot, trigger):
"""
.pokedex <query> - Search for a Pokemon and much, much more. See '.pokedex manual' for more information
"""
if not trigger.group(3):
bot.say(u'[Pok\u00E9dex] Please consult the Pok\u00E9dex manual (.pokedex manual).')
return
if trigger.group(3).lower() == 'welcome':
bot.say(u'<Prof. Oak> Hello there! Welcome to the world of Pok\u00E9mon! My name is Oak! People call me the Pok\u00E9mon Prof! This world is inhabited by creatures called Pok\u00E9mon! For some people, Pok\u00E9mon are pets. Other use them for fights. Myself... I study Pok\u00E9mon as a profession. {nick}! Your very own Pok\u00E9mon legend is about to unfold! A world of dreams and adventures with Pok\u00E9mon awaits! Let\'s go!'.replace('{nick}', trigger.nick))
bot.say(u'<Prof. Oak> First things first, lets get your pok\u00E9dex set up. I\'ll need you to run a couple queries to make sure it\'s working properly.')
return
if trigger.group(3).lower() == 'manual':
bot.action(u'opens the Pok\u00E9dex man pages (transmitted via notice)')
bot.notice(u'The pok\u00e9dex is operated with two base commands, either .dex or .pokedex', recipient=trigger.nick)
bot.notice(u'\u200b', recipient=trigger.nick)
bot.notice(u'You may use the pokedex to research the following things:', recipient=trigger.nick)
bot.notice(u' - Pok\u00e9mon .dex Abra or .dex 63', recipient=trigger.nick)
bot.notice(u' - Pok\u00e9mon stats .dex -s Abra or .dex -s 63', recipient=trigger.nick)
bot.notice(u' - Pok\u00e9mon moves .dex move:tackle', recipient=trigger.nick)
bot.notice(u' - Pok\u00e9mon types .dex type:psychic', recipient=trigger.nick)
bot.notice(u' - Items .dex item:master ball', recipient=trigger.nick)
bot.notice(u'For language-specific results, just prepend @<lang code>. (e.g. .dex @en:charge)', recipient=trigger.nick)
return
query = trigger.group(2).strip();
all_stats = (trigger.group(3).lower() == '-s')
if all_stats:
query = query.replace(trigger.group(3), '').strip()
##TODO: REMOVE
#bot.say(u'[Pok\u00E9dex] There\'s a time and a place for everything. But not now. http://puu.sh/cvW4m/e510f8be5b.jpg')
#return
##TODO: REMOVE
url = 'http://veekun.com/dex/lookup?lookup='
if trigger.group(3).isdigit() and 'pokemon:' not in trigger.group(3):
url = 'http://veekun.com/dex/lookup?lookup=pokemon:'
url = follow_redirects(bot, url+query)
url = urllib2.unquote(url)
if not url:
bot.say(u'[Pok\u00E9dex] Invalid query, please try again.')
return
soup = get_soup(url)
if soup:
try:
crumb = soup.find('ul', id='breadcrumbs').text.lower()
except:
bot.say(u'[Pok\u00E9dex] Please return to Professor Oak, you are missing an important software patch. (404)')
return
if 'moves' in crumb:
parse_move(bot, soup)
elif u'pok\u00E9mon' in crumb:
parse_poke(bot, soup, all_stats)
elif 'item' in crumb:
parse_item(bot, soup)
elif 'type' in crumb:
parse_type(bot, soup)
elif 'disambiguation' in crumb:
parse_disambig(bot, soup, trigger.nick)
elif 'abilities' in crumb:
parse_abilities(bot, soup)
else:
bot.say(u'[Pok\u00E9dex] There is no Pok\u00E9dex function that sufficiently matches what you\'re trying.')
return
else:
bot.say(u'[Pok\u00E9dex] http://puu.sh/cvW4m/e510f8be5b.jpg')
return
def follow_redirects(bot, url):
"""
Follow HTTP 3xx redirects, and return the actual URL. Return None if
there's a problem.
"""
try:
connection = web.get_urllib_object(url, 60)
url = connection.geturl() or url
connection.close()
except Exception as e:
return None
return url
def get_soup(url):
return BeautifulSoup(web.get(url, headers={'User-Agent':user_agent}), 'lxml')
def parse_move(bot, soup):
soup = soup.find('div', id='content')
title = soup.find('p', id='dex-page-name').text
types = [ img['title'].split(':')[0] for img in soup.find('p', id='dex-page-types').find_all('img') ]
summary = soup.find('div', class_='dex-page-beside-portrait').find('p').text
soup = soup.find('div', class_='dex-column-container')
power = soup.find_all('div', class_='dex-column')[0].find_all('dd')[0].text
accuracy = soup.find_all('div', class_='dex-column')[0].find_all('dd')[1].text
pp = soup.find_all('div', class_='dex-column')[0].find_all('dd')[2].text
target = soup.find_all('div', class_='dex-column')[0].find_all('dd')[3].text
effect_chance = soup.find_all('div', class_='dex-column')[0].find_all('dd')[4].text
output = [u'[Pok\u00E9dex] ',title,' | ',types[1],'/',types[0],' | ',summary]
output+=' | Dmg: '
output+=power.split(';')[0].strip()
output+=' | Acc: '
output+=accuracy.strip()
output+=' | PP: '
output+=pp.split(',')[0].strip()
output+=' | Flags: '
flags = [ item.find('a').text for item in soup.find_all('div', class_='dex-column')[1].find('ul').find_all('li') if not item.has_attr('class') ]
for flag in flags:
output+=flag
output+=', '
bot.say(''.join(output[:-2]))
def parse_poke(bot, soup, stats=False):
pokemon = dict()
soup = soup.find('div', id='content')
part = soup.find('div', class_='dex-page-portrait')
pokemon['name'] = part.find('p', id='dex-page-name').text
pokemon['types'] = [type.get('alt', '') for type in part.find('p', id='dex-page-types').find_all('img')]
part = soup.find('div', class_='dex-page-beside-portrait')
pokemon['abilities'] = [ability.text for ability in part.find('dl', class_='pokemon-abilities').find_all('dt')]
part = soup.find('div', class_='dex-column-container')
pokemon['gen'] = part.find_all('div', class_='dex-column')[0].find_all('dd')[0].find('img').get('alt', '')
pokemon['number'] = part.find_all('div', class_='dex-column')[0].find_all('dd')[1].text.replace('\n','').strip().zfill(3)
pokemon['gender'] = part.find_all('div', class_='dex-column')[1].find_all('dd')[0].text.replace('\n','').strip()
pokemon['egg_types'] = [egg.text for egg in part.find_all('div', class_='dex-column')[1].find('ul', class_='inline-commas').find_all('li')]
pokemon['steps_hatch'] = ' '.join(part.find_all('div', class_='dex-column')[1].find_all('dd')[3].text.replace('\n','').strip().split())
pokemon['base_exp'] = part.find_all('div', class_='dex-column')[2].find_all('dd')[0].text.replace('\n','').strip()
part = soup.find('table', class_='dex-pokemon-stats')
pokemon['base_hp'] = part.find_all('div', class_='dex-pokemon-stats-bar')[0].text
pokemon['base_atk'] = part.find_all('div', class_='dex-pokemon-stats-bar')[1].text
pokemon['base_def'] = part.find_all('div', class_='dex-pokemon-stats-bar')[2].text
pokemon['base_SpAtk'] = part.find_all('div', class_='dex-pokemon-stats-bar')[3].text
pokemon['base_SpDef'] = part.find_all('div', class_='dex-pokemon-stats-bar')[4].text
pokemon['base_speed'] = part.find_all('div', class_='dex-pokemon-stats-bar')[5].text
pokemon['base_total'] = part.find_all('div', class_='dex-pokemon-stats-bar')[6].text
output = []
if not stats:
output+=u'[Pok\u00E9dex] #'
output+=pokemon['number']
output+=' '
output+=pokemon['name']
output+=' | '
for type in pokemon['types']:
output+=type
output+='/'
output.pop()
output+=' | '
for ability in pokemon['abilities']:
output+=ability
output+='/'
output.pop()
output+=' | '
output+=pokemon['gen']
output+=' | '
output+=pokemon['gender']
output+=' | Egg: '
for ability in pokemon['egg_types']:
output+=ability
output+='/'
output.pop()
else:
output+=u'[Pok\u00E9dex] #'
output+=pokemon['number']
output+=' '
output+=pokemon['name']
output+=' | '
output+=pokemon['base_exp']
output+=' EXP | Speed '
output+=pokemon['base_speed']
output+=' | '
output+=pokemon['base_hp']
output+=' HP | Attack '
output+=pokemon['base_atk']
output+=' | Defense '
output+=pokemon['base_def']
output+=' | Sp. Atk '
output+=pokemon['base_SpAtk']
output+=' | Sp. Def '
output+=pokemon['base_SpDef']
output+=' | Total '
output+=pokemon['base_total']
bot.say(''.join(output))
return
def parse_item(bot, soup):
soup = soup.find('div', id='content')
title = soup.find('p', id='dex-page-name').text
soup = soup.find('div', class_='dex-page-beside-portrait')
summary = soup.find('p').text
cost = soup.find('dl').find_all('dd')[0].text.strip()
flags = [ item.text for item in soup.find('dl').find('ul', class_='classic-list').find_all('li') ]
output = [ u'[Pok\u00E9dex] ', title, ' | ', cost, ' | ', summary, ' | Flags: ' ]
for flag in flags:
output+=flag.lower()
output+=', '
bot.say(''.join(output[:-2]))
def parse_type(bot, soup):
soup = soup.find('div', id='content')
title = soup.find('p', id='dex-page-name').text
soup = soup.find('div', class_='dex-page-beside-portrait')
dealt = soup.find_all('ul', class_='dex-type-list')[0].find_all('li')
taken = soup.find_all('ul', class_='dex-type-list')[1].find_all('li')
for i, li in enumerate(dealt):
dealt[i] = (li.find('a').find('img').get('title'), li.text.strip())
dealt.sort(key=lambda x: x[1])
for i, li in enumerate(taken):
taken[i] = (li.find('a').find('img').get('title'), li.text.strip())
taken.sort(key=lambda x: x[1])
red=u'\x034'
green=u'\x033'
yellow=u'\x038'
reset=u'\x03'
output = [u'[Pok\u00E9dex] ',title,u' | Legend:',green,' 2x',reset,yellow,u' \xbdx',reset,red,' 0x',reset,' | Damage Dealt: ']
for type,value in dealt:
if value != '1' and type != title:
if value == '0': type = red+type+reset
elif value == u'\xbd': type = yellow+type+reset
elif value == '2': type = green+type+reset
output+=type
output+=', '
output = output[:-2]
output+=' | Damage Taken: '
for type,value in taken:
if value != '1' and type != title:
if value == '0': type = red+type+reset
elif value == u'\xbd': type = yellow+type+reset
elif value == '2': type = green+type+reset
output+=type
output+=', '
bot.say(''.join(output[:-2]))
def parse_abilities(bot, soup):
soup = soup.find('div', id='content')
title = soup.find('p', id='dex-page-name').text
summary = re.sub(r'\(.+\)$', '', soup.find('div', class_='dex-page-beside-portrait').find('div', class_="markdown").text)
bot.say(u'[Pok\u00E9dex] '+title+': '+summary)
def parse_disambig(bot, soup, sender=None):
soup = soup.find('div', id='content')
things = [' '.join(thing.text.replace('\n','').split()) for thing in soup.find('ul', class_="classic-list").find_all('li') ]
things = [thing for thing in things if 'Conquest' not in thing]
if (len(things) > 10):
things = things[:10]
bot.say(u'[Pok\u00E9dex] Sorry, I couldn\'t find exactly what you\'re looking for. I did find ' + str(len(things)) + ' possible results though. (transmitted via notice)')
[ bot.notice(' - '+re.sub(r'\(.+\)$', '', thing), recipient=sender) for thing in things ]
|
[
"maxpowa1@gmail.com"
] |
maxpowa1@gmail.com
|
43bebf0e9cd615a907f2a5930fc5f162e543af5f
|
504d181ad8c77fb2338c55773bf70dfcc28620ae
|
/lesson_1/B/solution_b.py
|
6ff8e9efbb0b533a261f900a867e22a686f17495
|
[] |
no_license
|
RomanMatiiv/training_algorithms
|
f419ecea34cf66f66321898180d5fdf59b333d36
|
9827911e02d937f2949cf15cf13172f2e3720c27
|
refs/heads/main
| 2023-06-12T06:57:50.536198
| 2021-06-25T04:58:27
| 2021-06-25T04:58:27
| 373,376,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
"""
https://contest.yandex.ru/contest/27393/problems/B/
"""
def triangle_exist(a: int, b: int, c: int) -> bool:
if (a + b) <= c:
return False
if (a + c) <= b:
return False
if (b + c) <= a:
return False
else:
return True
if __name__ == "__main__":
a = int(input())
b = int(input())
c = int(input())
if triangle_exist(a, b, c):
print("YES")
else:
print("NO")
|
[
"matiiv.r.a@gmail.com"
] |
matiiv.r.a@gmail.com
|
42cae87b3c1b229ac8ce61c70b75cbcec5ac9622
|
ad6ba311b47de42115ec23e839e77adf005c5456
|
/Python_Solutions/1176. Diet Plan Performance.py
|
ab1853ab581f1cce3519317b93ca89ce331ef458
|
[] |
no_license
|
ajinkyad13/LeetCode_For_Interviews
|
07fbcfe25cbcac76406afc927f0ee2bf03fde16e
|
d250697262386ea59301a41b8abe689b95303502
|
refs/heads/master
| 2023-06-18T20:44:55.359673
| 2021-07-09T04:19:07
| 2021-07-09T04:19:07
| 384,317,286
| 0
| 0
| null | 2021-07-09T04:06:52
| 2021-07-09T04:06:51
| null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
class Solution:
def dietPlanPerformance(calories, k, lower, upper):
countUp = 0
countLow = 0
for i in range(len(calories)):
if i<k:
temp = (calories[:i])
else:
temp = (calories[i-k:i])
if sum(temp) > upper:
countUp+=1
elif sum(temp) < lower:
countLow+=1
print(temp,countUp,countLow)
return abs(abs(countUp) - abs(countLow))
a = dietPlanPerformance([1,2,3,4,5],1,3,3)
print(a)
|
[
"ajinkyad13@gmail.com"
] |
ajinkyad13@gmail.com
|
cb1ecbcd603ed49c58fa65634f71bfc6f4943ea5
|
b7893e31c29e37619b2217633739cc20e3f3ff11
|
/src/compositekey/db/models/signals.py
|
ca014e6f6e241766ca72132825fd534c09f0b844
|
[] |
no_license
|
KACAH/django-compositekey
|
9818ce6dfee3dd319675b6ec82144d4dd75b0c44
|
ea1a29a50401a2d0f93d056fd5565ff66f7b7975
|
refs/heads/master
| 2021-01-16T19:34:52.578849
| 2012-03-06T13:56:42
| 2012-03-06T13:56:42
| 3,638,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
from compositekey.utils import disassemble_pk
__author__ = 'aldaran'
from django.dispatch.dispatcher import receiver
from django.db.models.signals import class_prepared
import operator
@receiver(class_prepared)
def prepare_model_and_meta(sender, **kwargs):
opts = sender._meta
unique_together = []
for field_constraints in opts.unique_together:
fields = [opts.get_field(name) for name in field_constraints]
fields = [f.fields if hasattr(f, "fields") else [f] for f in fields]
fields = reduce(operator.add, fields)
unique_together.append([f.name for f in fields])
opts.unique_together = tuple(unique_together)
# implement automatically the django natural keys
# if is not yet implemented
if getattr(sender._meta, "has_composite_primarykeys_field", False):
if not hasattr(sender, 'natural_key'):
def natural_key(self):
return disassemble_pk(self.pk, len(self._meta.composite_primarykeys_field.get_key_fields()))
sender.natural_key = natural_key
if not hasattr(sender._default_manager, 'get_by_natural_key'):
class NaturalCompositeManager(sender._default_manager.__class__):
def get_by_natural_key(self, *args):
names = [f.name for f in self.model._meta.composite_primarykeys_field.get_key_fields()]
return self.get(**dict(zip(names, args)))
sender._default_manager.__class__ = NaturalCompositeManager
# add special fields to the child
for parent, field in sender._meta.parents.items():
if hasattr(parent._meta, "composite_special_fields"):
sender._meta.composite_special_fields = list(getattr(sender._meta, "composite_special_fields", []))
sender._meta.composite_special_fields += list(parent._meta.composite_special_fields)
sender._meta.composite_special_fields = set(sender._meta.composite_special_fields)
# if getattr(opts, "enable_composite", False):
# for m2m in opts.local_many_to_many:
# m2m.rel.through._meta.enable_composite = True
|
[
"s.federici@gmail.com"
] |
s.federici@gmail.com
|
98e50f6c665078875c857f905133443663eb1620
|
d476f2a1ec5dcc292ee2bf7207634ad9fae14091
|
/routes.py
|
01338fbd40906c6af7cf4537af61ba004cfbc53c
|
[
"MIT"
] |
permissive
|
gustavohvs-dev/Simple-FlaskPython
|
16155756c2ac96029083de592768166f1972cc6e
|
845528a3d3df21b6c8a46fc46840bb19aa5b8446
|
refs/heads/main
| 2023-06-19T01:55:59.989164
| 2021-07-18T18:00:37
| 2021-07-18T18:00:37
| 387,238,210
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
from flask import Flask, request
from cadastraUsuario import insertUsuario
app = Flask("Server")
@app.route("/hello", methods=["GET"])
def hello():
return {"app": "Hello World"}
@app.route("/cadastra/usuario", methods=["POST"])
def cadastra():
body = request.get_json()
if("nome" not in body):
return {"status": 400, "info": "O parâmetro nome é obrigatório"}
usuario = insertUsuario(body["nome"], body["email"], body["senha"])
return usuario
app.run()
|
[
"gustavohvs.dev@gmail.com"
] |
gustavohvs.dev@gmail.com
|
326b7893a94f523502df4f6c1da9e60607d7e4d1
|
94cc0a21642f2938b9e818354ec7748454ab4b28
|
/serverApp/src/database/tables/servicesTable/servicesTable.py
|
ee1454f172e448c72d3f3968645e89edd220c3e5
|
[] |
no_license
|
Mishal-Alajmi/ChocoAnon
|
5dbdfbcfe45b5920c980d667ae29a91e514a2be9
|
ab8b4ad96a4274f7fd1bfccbeefbf4566c2a0ea2
|
refs/heads/master
| 2020-06-07T10:09:22.110149
| 2019-06-24T03:13:13
| 2019-06-24T03:13:13
| 192,995,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
import constants.consts as consts
from database.tables.table.table import Table
import datetime
class ServicesTable(Table):
def __init__(self, connection, cursor, lock):
super().__init__(connection, cursor, lock, consts.SERVICES_TABLE)
self.create()
"""
Contains all of the services that have been purchased
services have the following key/value pairs:
id: a unique string, serves as the entry's primary key
code: the 6 digit service code
date: date the service was purchased
provider: the id of the provider who sold the service
member: the id of the member who bought the service
received: date and time the server received the purchase notification
comment: arbitrary text
status: consts.STATUS_PAID or consts.STATUS_UNPAID
no other keys should be defined and all of these keys must be defined
"""
def create(self):
self.lock.acquire()
try:
command = "CREATE TABLE IF NOT EXISTS '" + consts.SERVICES_TABLE + "' (" +\
"id TEXT PRIMARY KEY, " +\
"code TEXT, " +\
"date TEXT, " +\
"provider TEXT, " +\
"member TEXT, " +\
"received TEXT, " +\
"comment TEXT, " +\
"status TEXT)"
self.cursor.execute(command)
self.connection.commit()
return {"error": consts.NO_ERROR, "result": None}
finally:
self.lock.release()
# inject a received timestamp
def add(self, dict):
dict["received"] = str(datetime.datetime.now())
return super(ServicesTable, self).add(dict)
|
[
"mishal.ajm@gmail.com"
] |
mishal.ajm@gmail.com
|
3d499f76f455c25cf01c210a73ac6ee7cc2f8518
|
61319c600e18acbb0ca9c2f722cf6d180b19bade
|
/Curso_Em_Video_Python/ex046.py
|
736e1a144e960fd89ddecc2cb25c4c20cd1908a3
|
[
"MIT"
] |
permissive
|
ThallesTorres/Curso_Em_Video_Python
|
a5e93bd33387684830ed081dd127754d1223f90b
|
95ffbff5a03f11fee41df746604dfe435f385a3b
|
refs/heads/master
| 2020-09-29T06:59:33.226325
| 2020-04-19T21:17:22
| 2020-04-19T21:17:22
| 226,981,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Ex: 046 - Faça um programa que mostre na tela uma contagem regressiva para o
# estouro de fogos de artificio, indo de 10 até 0, com uma pausa de 1segundo
# entre eles.
from time import sleep
import emoji
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 046
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
for contagem in range(10, 0, -1):
sleep(1)
print(contagem)
print(emoji.emojize(':boom:' * 15, use_aliases=True))
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por Thalles Torres
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
|
[
"thalles.torres@pm.me"
] |
thalles.torres@pm.me
|
4fe0c671a71ebf8b6d077cbf8113dd9dbf259363
|
5edf69877e49b5fae3b888ebe6e42aa857b3f261
|
/sql/plugins/schemasync.py
|
c4dbf50a55dd3c4f1a0130f55bdd250053a9f1a3
|
[
"Apache-2.0"
] |
permissive
|
yzypals/Archery
|
86843bb94748a0ef4f9f966b40f5cd8bc9b0f947
|
b5d90dfb91526a92f22d5c2d5959cd747c2ea4bd
|
refs/heads/master
| 2021-11-23T07:48:01.412314
| 2021-11-10T01:03:58
| 2021-11-10T01:03:58
| 183,379,295
| 1
| 0
|
Apache-2.0
| 2019-05-06T14:31:52
| 2019-04-25T07:22:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
# -*- coding: UTF-8 -*-
"""
@author: hhyo
@license: Apache Licence
@file: schemasync.py
@time: 2019/03/05
"""
__author__ = 'hhyo'
import shlex
from sql.plugins.plugin import Plugin
class SchemaSync(Plugin):
def __init__(self):
self.path = 'schemasync'
self.required_args = []
self.disable_args = []
super(Plugin, self).__init__()
def generate_args2cmd(self, args, shell):
"""
转换请求参数为命令行
:param args:
:param shell:
:return:
"""
k_options = ['sync-auto-inc', 'sync-comments']
kv_options = ['tag', 'output-directory', 'log-directory']
v_options = ['source', 'target']
if shell:
cmd_args = self.path if self.path else ''
for name, value in args.items():
if name in k_options and value:
cmd_args += f' --{name}'
elif name in kv_options:
cmd_args += f' --{name}={shlex.quote(str(value))}'
elif name in v_options:
cmd_args += f' {value}'
else:
cmd_args = [self.path]
for name, value in args.items():
if name in k_options and value:
cmd_args.append(f'--{name}')
elif name in kv_options:
cmd_args.append(f'--{name}')
cmd_args.append(f'{value}')
elif name in ['source', 'target']:
cmd_args.append(f'{value}')
return cmd_args
|
[
"rtttte@qq.com"
] |
rtttte@qq.com
|
b0dad0912a44c1ff07ae343164c94deb41b18acf
|
bbf025a5f8596e5513bd723dc78aa36c46e2c51b
|
/array + Design/41 FindMissingPos.py
|
2aa6c7b477ac6fc50a9f28c39f663bba77b8d545
|
[] |
no_license
|
AlanFermat/leetcode
|
6209bb5cf2d1b19e3fe7b619e1230f75bb0152ab
|
cacba4abaca9c4bad8e8d12526336115067dc6a0
|
refs/heads/master
| 2021-07-11T04:00:00.594820
| 2020-06-22T21:31:02
| 2020-06-22T21:31:02
| 142,341,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 1
m = min(nums)
M = max(nums)
if M < 1 or m > 1:
return 1
temp = list(range(1,M+1))
for i in range(len(nums)):
if nums[i] > 0:
temp[nums[i]-1] = -1
idx = 0
while idx < len(temp):
if temp[idx] != -1:
return temp[idx]
idx += 1
return M+1
|
[
"zy19@rice.edu"
] |
zy19@rice.edu
|
f145b2808be87627c1b6ab9f58a04dfa5a31d524
|
4a3e87c736b60b9e209095677a67aa96235331e0
|
/utilities/util.py
|
1a3d50af9bd5902ccf543c6f2e9664bdbb88ffad
|
[] |
no_license
|
joselitan/ViasatWebPOM
|
6078389255abd2547d1e9dd7757ae3a8a31c7aae
|
2ffb669f48471a1b3821ae470075e180981bf662
|
refs/heads/master
| 2020-05-22T18:33:19.996159
| 2019-05-30T09:10:27
| 2019-05-30T09:27:11
| 186,474,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
"""
@package utilities
Util class implementation
All most commonly used utilities should be implemented in this class
Example:
name = self.util.getUniqueName()
"""
import time
import traceback
import random, string
import utilities.custom_logger as cl
import logging
class Util(object):
log = cl.customLogger(logging.INFO)
def sleep(self, sec, info=""):
"""
Put the program to wait for the specified amount of time
"""
if info is not None:
self.log.info("Wait :: '" + str(sec) + "' seconds for " + info)
try:
time.sleep(sec)
except InterruptedError:
traceback.print_stack()
def getAlphaNumeric(self, length, type ="letters"):
"""
Get random string of characters
:param length: Length of string, number of characters string should have
:param type: Type of characters string should have. Default is letters
Provide lower/upper/digits for different types
"""
alpha_num = ''
if type == 'lower':
case = string.ascii_lowercase
elif type == 'upper':
case = string.ascii_uppercase
elif type == 'digits':
case = string.digits
elif type == 'mix':
case = string.ascii_letters + string.digits
else:
case = string.ascii_letters
return alpha_num.join(random.choice(case) for i in range(length))
def getUniqueName(self, charCount=10):
"""
Get a unique name
"""
return self.getAlphaNumeric(charCount, 'lower')
def getUniqueNameList(self, listSize=5, itemLength=None):
"""
Get a list of valid email ids
Parameters:
listSize: Number of names. Default is 5 names in a list
itemLength: It should be a list containing number of items equal to the listSize
This determines the lenght of the each item in the list[1,2,3,4,5]
"""
nameList = []
for i in range(0, listSize):
nameList.append(self.getUniqueName(itemLength[i]))
return nameList
def verifyTextContains(self, actualText, expectedText):
"""
Verify actual text contains expected text string
Parameters:
expectedList: expected Text
actualList: actual Text
"""
self.log.info("Actual Text From Application Web UI --> :: " + actualText)
self.log.info("Expected Text From Application Web UI --> :: " + expectedText)
if expectedText.lower() in actualText.lower():
self.log.info("### VERICATION CONTAINS")
return True
else:
self.log.info("### VERIFICATION DOES NOT CONTAINS!!!")
return False
def verifyTextMatch(self, actualText, expectedText):
"""
Verify actual text contains expected text string
Parameters:
expectedList: expected Text
actualList: actual Text
"""
self.log.info("Actual Text From Application Web UI --> :: " + actualText)
self.log.info("Expected Text From Application Web UI --> :: " + expectedText)
if expectedText.lower() == actualText.lower():
self.log.info("### VERICATION CONTAINS")
return True
else:
self.log.info("### VERIFICATION DOES NOT CONTAINS!!!")
return False
def verifyListMatch(self, expectedList, actualList):
"""
Verify actual list contains elements of expected list
Parameters:
expectedList: Expected List
actualLlist: Actual List
"""
return set(expectedList) == set(actualList)
def verifyListContains(self, expectedList, actualList):
"""
Verify actual list contains elements of expected list
Parameters:
expectedList: Expected List
actualList: Actual List
"""
# self.log.info("Actual Text From Application Web UI --> :: ", actualList)
# self.log.info("Expected Text From Application Web UI --> :: ", expectedList)
length = len(expectedList)
for i in range(0, length):
if expectedList[i] not in actualList:
return False
else:
return True
|
[
"joselitan@gmail.com"
] |
joselitan@gmail.com
|
1fed8ac776f807d122b9792c35a09d219c922bbe
|
d867242f0676ec23a85c52320fad586b9d7af9d5
|
/mysite/application/migrations/0035_auto_20190206_1556.py
|
87bafb2c09ac6561da0a1d7b76171bb18bb43be6
|
[] |
no_license
|
RUI123/SR-Novel-Website
|
eedab866a871453e5b8039c7771196c3726f67ee
|
3f6204c5c7dd454bf7fb8e6944c008f2842ed0b0
|
refs/heads/master
| 2020-04-22T07:40:20.725636
| 2019-02-08T23:36:35
| 2019-02-08T23:36:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
# Generated by Django 2.1.5 on 2019-02-06 20:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0034_auto_20190206_1513'),
]
operations = [
migrations.AlterField(
model_name='book',
name='bookImage',
field=models.ImageField(default='default_book.jpg', upload_to='book_images'),
),
migrations.AlterField(
model_name='profile',
name='profile_image',
field=models.FileField(default='default_profile.jpg', upload_to='profile_images'),
),
]
|
[
"ruifeng.zhang1993@gmail.com"
] |
ruifeng.zhang1993@gmail.com
|
cb2263c8fe218f6f5f158ea1d9013992b03d3e76
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/right_case.py
|
5e6a73c6b6a8142e8333f357b70921eab9ceefe4
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
#! /usr/bin/env python
def person(str_arg):
place(str_arg)
print('good_company_or_thing')
def place(str_arg):
print(str_arg)
if __name__ == '__main__':
person('give_man_beneath_eye')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
4a3ccf037f048fbbeaf6108b50afcae33968e981
|
1a08284924a1eeacca6509eae29002c3e949e585
|
/script/huntapi/targets/getspawns.py
|
9f8298719527b1f6f1ebbbc23bb8a6e90d202db7
|
[] |
no_license
|
Bemoliph/FFXIV-Hunts
|
e287f99c069bc441ba23c727501556fd3675ed34
|
204b8235f918be4bf307d7257798666244ae41ef
|
refs/heads/master
| 2021-05-02T13:06:57.696926
| 2015-09-24T18:40:04
| 2017-03-01T07:52:56
| 43,085,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
import cherrypy
import time
import huntdb
import huntdocs
import huntutils
def getInputs(params):
inputData = {}
if "targetIDs" not in params or not params["targetIDs"]:
raise ValueError("Missing mandatory input 'targetIDs'.")
inputData["targetIDs"] = huntutils.parseCSV(int, "targetIDs", params.get("targetIDs"))
inputData["maxRecords"] = huntutils.getConstrainedValue(1, huntutils.parseValue(int, "maxRecords", params.get("maxRecords", 10)), 1000)
inputData["before"] = huntutils.getDatetime("before", huntutils.parseValue(float, "before", params.get("before", time.time())))
if "after" in params:
inputData["after"] = huntutils.getDatetime("after", huntutils.parseValue(float, "after", params.get("after")))
return inputData
def getSpawns(inputData):
dbConn, dbCursor = huntdb.getConnectionWithCursor(dictCursor = True)
query = """
SELECT DISTINCT ON (xCoord, yCoord) xCoord, yCoord
FROM hunts.sightings
WHERE targetID = %s AND xCoord IS NOT NULL AND yCoord IS NOT NULL
ORDER BY xCoord, yCoord ASC;"""
targetSpawns = {}
for tID in inputData["targetIDs"]:
queryInput = (tID, )
dbCursor.execute(query, queryInput)
targetSpawns[tID] = [dict(x) for x in dbCursor.fetchall()]
huntdb.putConnectionWithCursor(dbConn, dbCursor)
return targetSpawns
@cherrypy.expose
@cherrypy.tools.json_out()
@huntdocs.apiDoc("GET", "/api/target/getSpawns/",
"""Returns map coordinates of all spawn points of each indicated target.
* Inputs:
+ **targetIDs** - Comma separated list of target IDs.
* Output:
+ **data** - Dictionary of targetID:list pairs containing the following:
- **xCoord** - x coordinate of target on in-game map.
- **yCoord** - y coordinate of target on in-game map.
""")
def renderPage(self, **params):
try:
inputData = getInputs(params)
except ValueError as e:
return {"success": False, "message": e.message}
targetSpawns = getSpawns(inputData)
return {"success": True, "message": "OK", "data": targetSpawns}
|
[
"bemoliph@gmail.com"
] |
bemoliph@gmail.com
|
662603bdfdfc3a1706b2e401d17b085001b55f4e
|
6ba9e90cfd2a3d03806f730fbbca574c2f8de9f8
|
/extensions/.stubs/clrclasses/__clrclasses__/System/Linq/__init__.py
|
46bee5eb923ad78e189b7f427e931782e762a617
|
[] |
no_license
|
anshe80/Pycad
|
696932a2e1eb720fec83790966b64cc4ff821426
|
8c238b23269fe70279a88626e9776a9b1ae9b9a2
|
refs/heads/master
| 2023-01-29T11:17:33.286871
| 2020-12-12T05:33:34
| 2020-12-12T05:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77,692
|
py
|
import __clrclasses__.System.Linq.Expressions as Expressions
from __clrclasses__.System import Func as _n_0_t_0
from __clrclasses__.System import Array as _n_0_t_1
from __clrclasses__.System import Action as _n_0_t_2
from __clrclasses__.System import Enum as _n_0_t_3
from __clrclasses__.System import IComparable as _n_0_t_4
from __clrclasses__.System import IFormattable as _n_0_t_5
from __clrclasses__.System import IConvertible as _n_0_t_6
from __clrclasses__.System.Collections import IEnumerable as _n_1_t_0
from __clrclasses__.System.Collections.Concurrent import Partitioner as _n_2_t_0
from __clrclasses__.System.Collections.Generic import IEnumerable as _n_3_t_0
from __clrclasses__.System.Collections.Generic import IEqualityComparer as _n_3_t_1
from __clrclasses__.System.Collections.Generic import IComparer as _n_3_t_2
from __clrclasses__.System.Collections.Generic import Dictionary as _n_3_t_3
from __clrclasses__.System.Collections.Generic import HashSet as _n_3_t_4
from __clrclasses__.System.Collections.Generic import List as _n_3_t_5
from __clrclasses__.System.Linq.Expressions import Expression as _n_4_t_0
from __clrclasses__.System.Threading import CancellationToken as _n_5_t_0
import typing
T = typing.TypeVar('T')
TKey = typing.TypeVar('TKey')
TElement = typing.TypeVar('TElement')
TSource = typing.TypeVar('TSource')
class Enumerable(object):
@staticmethod
def Aggregate(source: _n_3_t_0[typing.Any], seed: typing.Any, func: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: _n_3_t_0[typing.Any], seed: typing.Any, func: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: _n_3_t_0[typing.Any], func: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def All(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> bool:...
@staticmethod
def Any(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> bool:...
@staticmethod
def Any(source: _n_3_t_0[typing.Any]) -> bool:...
@staticmethod
def Append(source: _n_3_t_0[typing.Any], element: typing.Any) -> _n_3_t_0[typing.Any]:...
@staticmethod
def AsEnumerable(source: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Average(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> float:...
@staticmethod
def Average(source: _n_3_t_0[int]) -> float:...
@staticmethod
def Cast(source: _n_1_t_0) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Concat(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Contains(source: _n_3_t_0[typing.Any], value: typing.Any, comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def Contains(source: _n_3_t_0[typing.Any], value: typing.Any) -> bool:...
@staticmethod
def Count(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> int:...
@staticmethod
def Count(source: _n_3_t_0[typing.Any]) -> int:...
@staticmethod
def DefaultIfEmpty(source: _n_3_t_0[typing.Any], defaultValue: typing.Any) -> _n_3_t_0[typing.Any]:...
@staticmethod
def DefaultIfEmpty(source: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Distinct(source: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Distinct(source: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def ElementAt(source: _n_3_t_0[typing.Any], index: int) -> typing.Any:...
@staticmethod
def ElementAtOrDefault(source: _n_3_t_0[typing.Any], index: int) -> typing.Any:...
@staticmethod
def Empty() -> _n_3_t_0[typing.Any]:...
@staticmethod
def Except(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Except(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def First(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def First(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_0[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_0[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupJoin(outer: _n_3_t_0[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def GroupJoin(outer: _n_3_t_0[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Intersect(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Intersect(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Join(outer: _n_3_t_0[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Join(outer: _n_3_t_0[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Last(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def Last(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def LongCount(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> int:...
@staticmethod
def LongCount(source: _n_3_t_0[typing.Any]) -> int:...
@staticmethod
def Max(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Max(source: _n_3_t_0[int]) -> int:...
@staticmethod
def Min(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Min(source: _n_3_t_0[int]) -> int:...
@staticmethod
def OfType(source: _n_1_t_0) -> _n_3_t_0[typing.Any]:...
@staticmethod
def OrderBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def OrderBy(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def OrderByDescending(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def OrderByDescending(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def Prepend(source: _n_3_t_0[typing.Any], element: typing.Any) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Range(start: int, count: int) -> _n_3_t_0[int]:...
@staticmethod
def Repeat(element: typing.Any, count: int) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Reverse(source: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Select(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def SelectMany(source: _n_3_t_0[typing.Any], collectionSelector: _n_0_t_0[typing.Any, int, _n_3_t_0[typing.Any]], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def SelectMany(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any]]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def SequenceEqual(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def SequenceEqual(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any]) -> bool:...
@staticmethod
def Single(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def Single(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: _n_3_t_0[typing.Any]) -> typing.Any:...
@staticmethod
def Skip(source: _n_3_t_0[typing.Any], count: int) -> _n_3_t_0[typing.Any]:...
@staticmethod
def SkipWhile(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Sum(source: _n_3_t_0[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Sum(source: _n_3_t_0[int]) -> int:...
@staticmethod
def Take(source: _n_3_t_0[typing.Any], count: int) -> _n_3_t_0[typing.Any]:...
@staticmethod
def TakeWhile(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def ThenBy(source: IOrderedEnumerable[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def ThenBy(source: IOrderedEnumerable[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def ThenByDescending(source: IOrderedEnumerable[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def ThenByDescending(source: IOrderedEnumerable[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:...
@staticmethod
def ToArray(source: _n_3_t_0[typing.Any]) -> _n_0_t_1[typing.Any]:...
@staticmethod
def ToDictionary(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToHashSet(source: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_4[typing.Any]:...
@staticmethod
def ToHashSet(source: _n_3_t_0[typing.Any]) -> _n_3_t_4[typing.Any]:...
@staticmethod
def ToList(source: _n_3_t_0[typing.Any]) -> _n_3_t_5[typing.Any]:...
@staticmethod
def ToLookup(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: _n_3_t_0[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def Union(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Union(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Where(source: _n_3_t_0[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def Zip(first: _n_3_t_0[typing.Any], second: _n_3_t_0[typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> _n_3_t_0[typing.Any]:...
class EnumerableExecutor(EnumerableExecutor, typing.Generic[T]):
def __init__(self, expression: _n_4_t_0) -> EnumerableExecutor:...
class EnumerableQuery(EnumerableQuery, IOrderedQueryable[T], IQueryProvider, typing.Generic[T]):
def __init__(self, expression: _n_4_t_0) -> EnumerableQuery:...
def __init__(self, enumerable: _n_3_t_0[T]) -> EnumerableQuery:...
class IGrouping(_n_3_t_0[TElement], typing.Generic[TKey, TElement]):
@property
def Key(self) -> TKey:"""Key { get; } -> TKey"""
class ILookup(_n_3_t_0[IGrouping[TKey, TElement]], typing.Generic[TKey, TElement], typing.Iterable[_n_3_t_0[TElement]]):
@property
def Count(self) -> int:"""Count { get; } -> int"""
@property
def Item(self) -> _n_3_t_0[TElement]:"""Item { get; } -> IEnumerable"""
def Contains(self, key: TKey) -> bool:...
class IOrderedEnumerable(_n_3_t_0[TElement], typing.Generic[TElement]):
def CreateOrderedEnumerable(self, keySelector: _n_0_t_0[TElement, typing.Any], comparer: _n_3_t_2[typing.Any], descending: bool) -> IOrderedEnumerable[TElement]:...
def ThenBy(self, keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ThenBy(self, keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ThenByDescending(self, keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> IOrderedEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ThenByDescending(self, keySelector: _n_0_t_0[typing.Any, typing.Any]) -> IOrderedEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
class IOrderedQueryable(IQueryable[T], IOrderedQueryable, typing.Generic[T]):
def ThenBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def ThenBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def ThenByDescending(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def ThenByDescending(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
class IQueryable(_n_3_t_0[T], IQueryable, typing.Generic[T]):
def Aggregate(self, seed: typing.Any, func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]], selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Aggregate(self, seed: typing.Any, func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Aggregate(self, func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def All(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> bool:
"""Extension from: System.Linq.Queryable"""
def Any(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> bool:
"""Extension from: System.Linq.Queryable"""
def Any(self) -> bool:
"""Extension from: System.Linq.Queryable"""
def Average(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, int]]) -> float:
"""Extension from: System.Linq.Queryable"""
def Average(self) -> float:
"""Extension from: System.Linq.Queryable"""
def Cast(self) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Concat(self, source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Contains(self, item: typing.Any, comparer: _n_3_t_1[typing.Any]) -> bool:
"""Extension from: System.Linq.Queryable"""
def Contains(self, item: typing.Any) -> bool:
"""Extension from: System.Linq.Queryable"""
def Count(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> int:
"""Extension from: System.Linq.Queryable"""
def Count(self) -> int:
"""Extension from: System.Linq.Queryable"""
def DefaultIfEmpty(self, defaultValue: typing.Any) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def DefaultIfEmpty(self) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Distinct(self, comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Distinct(self) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def ElementAt(self, index: int) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def ElementAtOrDefault(self, index: int) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Except(self, source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Except(self, source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def First(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def First(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def FirstOrDefault(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def FirstOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Queryable"""
def GroupBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Queryable"""
def GroupJoin(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def GroupJoin(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Intersect(self, source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Intersect(self, source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Join(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Join(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Last(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Last(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def LastOrDefault(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def LastOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def LongCount(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> int:
"""Extension from: System.Linq.Queryable"""
def LongCount(self) -> int:
"""Extension from: System.Linq.Queryable"""
def Max(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Max(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Min(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Min(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def OfType(self) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def OrderBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def OrderBy(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def OrderByDescending(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def OrderByDescending(self, keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Reverse(self) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Select(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def SelectMany(self, collectionSelector: _n_4_t_0[_n_0_t_0[typing.Any, int, _n_3_t_0[typing.Any]]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def SelectMany(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any]]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def SequenceEqual(self, source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> bool:
"""Extension from: System.Linq.Queryable"""
def SequenceEqual(self, source2: _n_3_t_0[typing.Any]) -> bool:
"""Extension from: System.Linq.Queryable"""
def Single(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Single(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def SingleOrDefault(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def SingleOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Queryable"""
def Skip(self, count: int) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def SkipWhile(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Sum(self, selector: _n_4_t_0[_n_0_t_0[typing.Any, int]]) -> int:
"""Extension from: System.Linq.Queryable"""
def Sum(self) -> int:
"""Extension from: System.Linq.Queryable"""
def Take(self, count: int) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def TakeWhile(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Union(self, source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Union(self, source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Where(self, predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Zip(self, source2: _n_3_t_0[typing.Any], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:
"""Extension from: System.Linq.Queryable"""
class IQueryProvider():
def CreateQuery(self, expression: _n_4_t_0) -> IQueryable:...
def Execute(self, expression: _n_4_t_0) -> object:...
class Lookup(_n_3_t_0[IGrouping[TKey, TElement]], ILookup[TKey, TElement], typing.Generic[TKey, TElement], typing.Iterable[_n_3_t_0[TElement]]):
def ApplyResultSelector(self, resultSelector: _n_0_t_0[TKey, _n_3_t_0[TElement], typing.Any]) -> _n_3_t_0[typing.Any]:...
class OrderedParallelQuery(ParallelQuery[TSource], _n_1_t_0, _n_3_t_0[TSource], typing.Generic[TSource]):
def ThenBy(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ThenBy(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ThenByDescending(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ThenByDescending(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
class ParallelEnumerable(object):
@staticmethod
def Aggregate(source: ParallelQuery[typing.Any], seedFactory: _n_0_t_0[typing.Any], updateAccumulatorFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], combineAccumulatorsFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: ParallelQuery[typing.Any], seed: typing.Any, updateAccumulatorFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], combineAccumulatorsFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: ParallelQuery[typing.Any], seed: typing.Any, func: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: ParallelQuery[typing.Any], seed: typing.Any, func: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def Aggregate(source: ParallelQuery[typing.Any], func: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> typing.Any:...
@staticmethod
def All(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> bool:...
@staticmethod
def Any(source: ParallelQuery[typing.Any]) -> bool:...
@staticmethod
def Any(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> bool:...
@staticmethod
def AsEnumerable(source: ParallelQuery[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def AsOrdered(source: ParallelQuery) -> ParallelQuery:...
@staticmethod
def AsOrdered(source: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def AsParallel(source: _n_1_t_0) -> ParallelQuery:...
@staticmethod
def AsParallel(source: _n_2_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def AsParallel(source: _n_3_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def AsSequential(source: ParallelQuery[typing.Any]) -> _n_3_t_0[typing.Any]:...
@staticmethod
def AsUnordered(source: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Average(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> float:...
@staticmethod
def Average(source: ParallelQuery[int]) -> float:...
@staticmethod
def Cast(source: ParallelQuery) -> ParallelQuery[typing.Any]:...
@staticmethod
def Concat(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Concat(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Contains(source: ParallelQuery[typing.Any], value: typing.Any, comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def Contains(source: ParallelQuery[typing.Any], value: typing.Any) -> bool:...
@staticmethod
def Count(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> int:...
@staticmethod
def Count(source: ParallelQuery[typing.Any]) -> int:...
@staticmethod
def DefaultIfEmpty(source: ParallelQuery[typing.Any], defaultValue: typing.Any) -> ParallelQuery[typing.Any]:...
@staticmethod
def DefaultIfEmpty(source: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Distinct(source: ParallelQuery[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Distinct(source: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def ElementAt(source: ParallelQuery[typing.Any], index: int) -> typing.Any:...
@staticmethod
def ElementAtOrDefault(source: ParallelQuery[typing.Any], index: int) -> typing.Any:...
@staticmethod
def Empty() -> ParallelQuery[typing.Any]:...
@staticmethod
def Except(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Except(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Except(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Except(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def First(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def First(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def ForAll(source: ParallelQuery[typing.Any], action: _n_0_t_2[typing.Any]):...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupJoin(outer: ParallelQuery[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def GroupJoin(outer: ParallelQuery[typing.Any], inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def GroupJoin(outer: ParallelQuery[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def GroupJoin(outer: ParallelQuery[typing.Any], inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Intersect(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Intersect(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Intersect(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Intersect(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Join(outer: ParallelQuery[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Join(outer: ParallelQuery[typing.Any], inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Join(outer: ParallelQuery[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Join(outer: ParallelQuery[typing.Any], inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Last(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def Last(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def LongCount(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> int:...
@staticmethod
def LongCount(source: ParallelQuery[typing.Any]) -> int:...
@staticmethod
def Max(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Max(source: ParallelQuery[int]) -> int:...
@staticmethod
def Min(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Min(source: ParallelQuery[int]) -> int:...
@staticmethod
def OfType(source: ParallelQuery) -> ParallelQuery[typing.Any]:...
@staticmethod
def OrderBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def OrderBy(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def OrderByDescending(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def OrderByDescending(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def Range(start: int, count: int) -> ParallelQuery[int]:...
@staticmethod
def Repeat(element: typing.Any, count: int) -> ParallelQuery[typing.Any]:...
@staticmethod
def Reverse(source: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Select(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def SelectMany(source: ParallelQuery[typing.Any], collectionSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any]], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def SelectMany(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any]]) -> ParallelQuery[typing.Any]:...
@staticmethod
def SequenceEqual(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def SequenceEqual(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any], comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def SequenceEqual(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any]) -> bool:...
@staticmethod
def SequenceEqual(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any]) -> bool:...
@staticmethod
def Single(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def Single(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: ParallelQuery[typing.Any]) -> typing.Any:...
@staticmethod
def Skip(source: ParallelQuery[typing.Any], count: int) -> ParallelQuery[typing.Any]:...
@staticmethod
def SkipWhile(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Sum(source: ParallelQuery[typing.Any], selector: _n_0_t_0[typing.Any, int]) -> int:...
@staticmethod
def Sum(source: ParallelQuery[int]) -> int:...
@staticmethod
def Take(source: ParallelQuery[typing.Any], count: int) -> ParallelQuery[typing.Any]:...
@staticmethod
def TakeWhile(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> ParallelQuery[typing.Any]:...
@staticmethod
def ThenBy(source: OrderedParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def ThenBy(source: OrderedParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def ThenByDescending(source: OrderedParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def ThenByDescending(source: OrderedParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> OrderedParallelQuery[typing.Any]:...
@staticmethod
def ToArray(source: ParallelQuery[typing.Any]) -> _n_0_t_1[typing.Any]:...
@staticmethod
def ToDictionary(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToDictionary(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:...
@staticmethod
def ToList(source: ParallelQuery[typing.Any]) -> _n_3_t_5[typing.Any]:...
@staticmethod
def ToLookup(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], elementSelector: _n_0_t_0[typing.Any, typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def ToLookup(source: ParallelQuery[typing.Any], keySelector: _n_0_t_0[typing.Any, typing.Any]) -> ILookup[typing.Any, typing.Any]:...
@staticmethod
def Union(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Union(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Union(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Union(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Where(source: ParallelQuery[typing.Any], predicate: _n_0_t_0[typing.Any, bool]) -> ParallelQuery[typing.Any]:...
@staticmethod
def WithCancellation(source: ParallelQuery[typing.Any], cancellationToken: _n_5_t_0) -> ParallelQuery[typing.Any]:...
@staticmethod
def WithDegreeOfParallelism(source: ParallelQuery[typing.Any], degreeOfParallelism: int) -> ParallelQuery[typing.Any]:...
@staticmethod
def WithExecutionMode(source: ParallelQuery[typing.Any], executionMode: ParallelExecutionMode) -> ParallelQuery[typing.Any]:...
@staticmethod
def WithMergeOptions(source: ParallelQuery[typing.Any], mergeOptions: ParallelMergeOptions) -> ParallelQuery[typing.Any]:...
@staticmethod
def Zip(first: ParallelQuery[typing.Any], second: _n_3_t_0[typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
@staticmethod
def Zip(first: ParallelQuery[typing.Any], second: ParallelQuery[typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:...
class ParallelExecutionMode(_n_0_t_3, _n_0_t_4, _n_0_t_5, _n_0_t_6):
Default: int
ForceParallelism: int
value__: int
class ParallelMergeOptions(_n_0_t_3, _n_0_t_4, _n_0_t_5, _n_0_t_6):
AutoBuffered: int
Default: int
FullyBuffered: int
NotBuffered: int
value__: int
class ParallelQuery(ParallelQuery, _n_1_t_0, _n_3_t_0[TSource], typing.Generic[TSource]):
def Aggregate(self, seedFactory: _n_0_t_0[typing.Any], updateAccumulatorFunc: _n_0_t_0[typing.Any, TSource, typing.Any], combineAccumulatorsFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.ParallelEnumerable"""
def Aggregate(self, seed: typing.Any, updateAccumulatorFunc: _n_0_t_0[typing.Any, TSource, typing.Any], combineAccumulatorsFunc: _n_0_t_0[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.ParallelEnumerable"""
def Aggregate(self, seed: typing.Any, func: _n_0_t_0[typing.Any, TSource, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.ParallelEnumerable"""
def Aggregate(self, seed: typing.Any, func: _n_0_t_0[typing.Any, TSource, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.ParallelEnumerable"""
def Aggregate(self, func: _n_0_t_0[TSource, TSource, TSource]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def All(self, predicate: _n_0_t_0[TSource, bool]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def Any(self) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def Any(self, predicate: _n_0_t_0[TSource, bool]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def AsEnumerable(self) -> _n_3_t_0[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def AsOrdered(self) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def AsSequential(self) -> _n_3_t_0[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def AsUnordered(self) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Average(self, selector: _n_0_t_0[TSource, int]) -> float:
"""Extension from: System.Linq.ParallelEnumerable"""
def Average(self) -> float:
"""Extension from: System.Linq.ParallelEnumerable"""
def Cast(self) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Concat(self, second: _n_3_t_0[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Concat(self, second: ParallelQuery[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Contains(self, value: TSource, comparer: _n_3_t_1[TSource]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def Contains(self, value: TSource) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def Count(self, predicate: _n_0_t_0[TSource, bool]) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Count(self) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def DefaultIfEmpty(self, defaultValue: TSource) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def DefaultIfEmpty(self) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Distinct(self, comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Distinct(self) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ElementAt(self, index: int) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def ElementAtOrDefault(self, index: int) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def Except(self, second: _n_3_t_0[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Except(self, second: ParallelQuery[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Except(self, second: _n_3_t_0[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Except(self, second: ParallelQuery[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def First(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def First(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def FirstOrDefault(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def FirstOrDefault(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def ForAll(self, action: _n_0_t_2[TSource]):
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any]) -> ParallelQuery[IGrouping[typing.Any, typing.Any]]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[IGrouping[typing.Any, TSource]]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupBy(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> ParallelQuery[IGrouping[typing.Any, TSource]]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupJoin(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupJoin(self, inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupJoin(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def GroupJoin(self, inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Intersect(self, second: _n_3_t_0[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Intersect(self, second: ParallelQuery[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Intersect(self, second: _n_3_t_0[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Intersect(self, second: ParallelQuery[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Join(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Join(self, inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Join(self, inner: _n_3_t_0[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Join(self, inner: ParallelQuery[typing.Any], outerKeySelector: _n_0_t_0[typing.Any, typing.Any], innerKeySelector: _n_0_t_0[typing.Any, typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Last(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def Last(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def LastOrDefault(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def LastOrDefault(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def LongCount(self, predicate: _n_0_t_0[TSource, bool]) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def LongCount(self) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Max(self, selector: _n_0_t_0[TSource, int]) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Max(self) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Min(self, selector: _n_0_t_0[TSource, int]) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Min(self) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def OfType(self) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def OrderBy(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def OrderBy(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def OrderByDescending(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_2[typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def OrderByDescending(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> OrderedParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Reverse(self) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Select(self, selector: _n_0_t_0[TSource, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def SelectMany(self, collectionSelector: _n_0_t_0[TSource, _n_3_t_0[typing.Any]], resultSelector: _n_0_t_0[TSource, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def SelectMany(self, selector: _n_0_t_0[TSource, _n_3_t_0[typing.Any]]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def SequenceEqual(self, second: _n_3_t_0[TSource], comparer: _n_3_t_1[TSource]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def SequenceEqual(self, second: ParallelQuery[TSource], comparer: _n_3_t_1[TSource]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def SequenceEqual(self, second: _n_3_t_0[TSource]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def SequenceEqual(self, second: ParallelQuery[TSource]) -> bool:
"""Extension from: System.Linq.ParallelEnumerable"""
def Single(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def Single(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def SingleOrDefault(self, predicate: _n_0_t_0[TSource, bool]) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def SingleOrDefault(self) -> TSource:
"""Extension from: System.Linq.ParallelEnumerable"""
def Skip(self, count: int) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def SkipWhile(self, predicate: _n_0_t_0[TSource, bool]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Sum(self, selector: _n_0_t_0[TSource, int]) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Sum(self) -> int:
"""Extension from: System.Linq.ParallelEnumerable"""
def Take(self, count: int) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def TakeWhile(self, predicate: _n_0_t_0[TSource, bool]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToArray(self) -> _n_0_t_1[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToDictionary(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToDictionary(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any]) -> _n_3_t_3[typing.Any, typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToDictionary(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> _n_3_t_3[typing.Any, TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToDictionary(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> _n_3_t_3[typing.Any, TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToList(self) -> _n_3_t_5[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToLookup(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToLookup(self, keySelector: _n_0_t_0[TSource, typing.Any], elementSelector: _n_0_t_0[TSource, typing.Any]) -> ILookup[typing.Any, typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToLookup(self, keySelector: _n_0_t_0[TSource, typing.Any], comparer: _n_3_t_1[typing.Any]) -> ILookup[typing.Any, TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def ToLookup(self, keySelector: _n_0_t_0[TSource, typing.Any]) -> ILookup[typing.Any, TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Union(self, second: _n_3_t_0[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Union(self, second: ParallelQuery[TSource], comparer: _n_3_t_1[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Union(self, second: _n_3_t_0[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Union(self, second: ParallelQuery[TSource]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Where(self, predicate: _n_0_t_0[TSource, bool]) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def WithCancellation(self, cancellationToken: _n_5_t_0) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def WithDegreeOfParallelism(self, degreeOfParallelism: int) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def WithExecutionMode(self, executionMode: ParallelExecutionMode) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def WithMergeOptions(self, mergeOptions: ParallelMergeOptions) -> ParallelQuery[TSource]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Zip(self, second: _n_3_t_0[typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def Zip(self, second: ParallelQuery[typing.Any], resultSelector: _n_0_t_0[typing.Any, typing.Any, typing.Any]) -> ParallelQuery[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
class Queryable(object):
@staticmethod
def Aggregate(source: IQueryable[typing.Any], seed: typing.Any, func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]], selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:...
@staticmethod
def Aggregate(source: IQueryable[typing.Any], seed: typing.Any, func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> typing.Any:...
@staticmethod
def Aggregate(source: IQueryable[typing.Any], func: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> typing.Any:...
@staticmethod
def All(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> bool:...
@staticmethod
def Any(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> bool:...
@staticmethod
def Any(source: IQueryable[typing.Any]) -> bool:...
@staticmethod
def AsQueryable(source: _n_1_t_0) -> IQueryable:...
@staticmethod
def AsQueryable(source: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Average(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, int]]) -> float:...
@staticmethod
def Average(source: IQueryable[int]) -> float:...
@staticmethod
def Cast(source: IQueryable) -> IQueryable[typing.Any]:...
@staticmethod
def Concat(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Contains(source: IQueryable[typing.Any], item: typing.Any, comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def Contains(source: IQueryable[typing.Any], item: typing.Any) -> bool:...
@staticmethod
def Count(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> int:...
@staticmethod
def Count(source: IQueryable[typing.Any]) -> int:...
@staticmethod
def DefaultIfEmpty(source: IQueryable[typing.Any], defaultValue: typing.Any) -> IQueryable[typing.Any]:...
@staticmethod
def DefaultIfEmpty(source: IQueryable[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Distinct(source: IQueryable[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Distinct(source: IQueryable[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def ElementAt(source: IQueryable[typing.Any], index: int) -> typing.Any:...
@staticmethod
def ElementAtOrDefault(source: IQueryable[typing.Any], index: int) -> typing.Any:...
@staticmethod
def Except(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Except(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def First(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def First(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def FirstOrDefault(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]]) -> IQueryable[typing.Any]:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], elementSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[IGrouping[typing.Any, typing.Any]]:...
@staticmethod
def GroupJoin(outer: IQueryable[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def GroupJoin(outer: IQueryable[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any], typing.Any]]) -> IQueryable[typing.Any]:...
@staticmethod
def Intersect(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Intersect(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Join(outer: IQueryable[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Join(outer: IQueryable[typing.Any], inner: _n_3_t_0[typing.Any], outerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], innerKeySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:...
@staticmethod
def Last(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def Last(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def LastOrDefault(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def LongCount(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> int:...
@staticmethod
def LongCount(source: IQueryable[typing.Any]) -> int:...
@staticmethod
def Max(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:...
@staticmethod
def Max(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def Min(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> typing.Any:...
@staticmethod
def Min(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def OfType(source: IQueryable) -> IQueryable[typing.Any]:...
@staticmethod
def OrderBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def OrderBy(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def OrderByDescending(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def OrderByDescending(source: IQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def Reverse(source: IQueryable[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Select(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IQueryable[typing.Any]:...
@staticmethod
def SelectMany(source: IQueryable[typing.Any], collectionSelector: _n_4_t_0[_n_0_t_0[typing.Any, int, _n_3_t_0[typing.Any]]], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:...
@staticmethod
def SelectMany(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, _n_3_t_0[typing.Any]]]) -> IQueryable[typing.Any]:...
@staticmethod
def SequenceEqual(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> bool:...
@staticmethod
def SequenceEqual(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any]) -> bool:...
@staticmethod
def Single(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def Single(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> typing.Any:...
@staticmethod
def SingleOrDefault(source: IQueryable[typing.Any]) -> typing.Any:...
@staticmethod
def Skip(source: IQueryable[typing.Any], count: int) -> IQueryable[typing.Any]:...
@staticmethod
def SkipWhile(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:...
@staticmethod
def Sum(source: IQueryable[typing.Any], selector: _n_4_t_0[_n_0_t_0[typing.Any, int]]) -> int:...
@staticmethod
def Sum(source: IQueryable[int]) -> int:...
@staticmethod
def Take(source: IQueryable[typing.Any], count: int) -> IQueryable[typing.Any]:...
@staticmethod
def TakeWhile(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:...
@staticmethod
def ThenBy(source: IOrderedQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def ThenBy(source: IOrderedQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def ThenByDescending(source: IOrderedQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]], comparer: _n_3_t_2[typing.Any]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def ThenByDescending(source: IOrderedQueryable[typing.Any], keySelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any]]) -> IOrderedQueryable[typing.Any]:...
@staticmethod
def Union(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any], comparer: _n_3_t_1[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Union(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any]) -> IQueryable[typing.Any]:...
@staticmethod
def Where(source: IQueryable[typing.Any], predicate: _n_4_t_0[_n_0_t_0[typing.Any, bool]]) -> IQueryable[typing.Any]:...
@staticmethod
def Zip(source1: IQueryable[typing.Any], source2: _n_3_t_0[typing.Any], resultSelector: _n_4_t_0[_n_0_t_0[typing.Any, typing.Any, typing.Any]]) -> IQueryable[typing.Any]:...
|
[
"34087817+xsfhlzh@users.noreply.github.com"
] |
34087817+xsfhlzh@users.noreply.github.com
|
e6aaa9cba2548a1a3b04c60ff3a0b58587969d90
|
7b1e8e56b7ffca5601604fcec9d4e1cf6bf77631
|
/343.py
|
1abd97706899bfddd0c0dfaf69bdd5a476bb2d11
|
[] |
no_license
|
sunyi1001/leetcode
|
fd834b60d4656534560bb4465bbe277c7d2819ac
|
426ce158c1fdc30147313200baee0493f41c8c96
|
refs/heads/master
| 2020-11-29T21:59:46.873438
| 2019-01-26T06:07:11
| 2019-01-26T06:07:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
class Solution:
def integerBreak(self, n):
"""
:type n: int
:rtype: int
"""
memo = [-1 for _ in range(n+1)]
memo[1] = 1
for i in range(1, n+1):
for j in range(1, i):
#拆分成j i-j
memo[i] = max(j*memo[i-j], j*(i-j), memo[i])
print(memo)
return memo[n]
sol = Solution()
res = sol.integerBreak(11)
|
[
"wenjunlin@100tal.com"
] |
wenjunlin@100tal.com
|
8c5fd690930389c4b4571406cc576e79ecebcd82
|
0440327cf15b19ec1ee0d70a030aac94e3aa4064
|
/PandaModel.py
|
c99723826e885ec00eb753a89c374f3bac3eab9f
|
[] |
no_license
|
jaylion321/UITest
|
6cf7ec7d3e8b0a2bad1238277a982bb41cb9f300
|
b330ecec5544da802d78643f864e53c5b63d7218
|
refs/heads/master
| 2023-06-07T00:47:02.270795
| 2021-06-27T03:39:47
| 2021-06-27T03:39:47
| 289,720,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,228
|
py
|
import os
import pyqtgraph as pg
import pandas as pd
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
from functools import partial
class DataFrameModel(QtCore.QAbstractTableModel):
DtypeRole = QtCore.Qt.UserRole + 1000
ValueRole = QtCore.Qt.UserRole + 1001
def __init__(self, df=pd.DataFrame(), parent=None):
super(DataFrameModel, self).__init__(parent)
self._dataframe = df
def setDataFrame(self, dataframe):
self.beginResetModel()
self._dataframe = dataframe.copy()
self.endResetModel()
def dataFrame(self):
return self._dataframe
dataFrame = QtCore.pyqtProperty(pd.DataFrame, fget=dataFrame, fset=setDataFrame)
@QtCore.pyqtSlot(int, QtCore.Qt.Orientation, result=str)
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = QtCore.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self._dataframe.columns[section]
else:
return str(self._dataframe.index[section])
return QtCore.QVariant()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
return len(self._dataframe.index)
def columnCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
return self._dataframe.columns.size
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < self.rowCount() \
and 0 <= index.column() < self.columnCount()):
return QtCore.QVariant()
row = self._dataframe.index[index.row()]
col = self._dataframe.columns[index.column()]
# print(type(self._dataframe[col]),col)
dt = self._dataframe[col].dtype
val = self._dataframe.iloc[row][col]
if role == QtCore.Qt.DisplayRole:
return str(val)
elif role == DataFrameModel.ValueRole:
return val
if role == DataFrameModel.DtypeRole:
return dt
return QtCore.QVariant()
def roleNames(self):
roles = {
QtCore.Qt.DisplayRole: b'display',
DataFrameModel.DtypeRole: b'dtype',
DataFrameModel.ValueRole: b'value'
}
return roles
class CustomNode(object):
def __init__(self, data):
self._data = data
if type(data) == tuple:
self._data = list(data)
if type(data) is str or not hasattr(data, '__getitem__'):
self._data = [data]
self._columncount = len(self._data)
self._children = []
self._parent = None
self._row = 0
def data(self, column):
if column >= 0 and column < len(self._data):
return self._data[column]
def columnCount(self):
return self._columncount
def childCount(self):
return len(self._children)
def child(self, row):
if row >= 0 and row < self.childCount():
return self._children[row]
def parent(self):
return self._parent
def row(self):
return self._row
def addChild(self, child):
child._parent = self
child._row = len(self._children)
self._children.append(child)
self._columncount = max(child.columnCount(), self._columncount)
class TreeViewModel(QtCore.QAbstractItemModel):
def __init__(self, header : list, nodes : list):
QtCore.QAbstractItemModel.__init__(self)
self._root = CustomNode(None)
for node in nodes:
self._root.addChild(node)
self.header = header
def flags(self, index):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def rowCount(self, index):
if index != None and index.isValid() :
return index.internalPointer().childCount()
return self._root.childCount()
def addChild(self, node, _parent):
if not _parent or not _parent.isValid():
parent = self._root
else:
parent = _parent.internalPointer()
parent.addChild(node)
def index(self, row, column, _parent=None):
if not _parent or not _parent.isValid():
parent = self._root
else:
parent = _parent.internalPointer()
# if not QtCore.QAbstractItemModel.hasIndex(self, row, column, _parent):
# return QtCore.QModelIndex()
child = parent.child(row)
if child:
return QtCore.QAbstractItemModel.createIndex(self, row, column, child)
else:
return QtCore.QModelIndex()
def parent(self, index):
if index.isValid():
p = index.internalPointer().parent()
if p:
return QtCore.QAbstractItemModel.createIndex(self, p.row(), 0, p)
return QtCore.QModelIndex()
def columnCount(self, index):
if index.isValid():
return index.internalPointer().columnCount()
return self._root.columnCount()
def data(self, index, role):
if not index.isValid():
return None
node = index.internalPointer()
if role == QtCore.Qt.DisplayRole:
return node.data(index.column())
return None
@QtCore.pyqtSlot(int, QtCore.Qt.Orientation, result=str)
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = QtCore.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
if self.header != None:
return self.header[section]
else:
# return str(self._dataframe.index[section])
return QtCore.QVariant()
return QtCore.QVariant()
class TableWidget():
def __init__(self,width,height,Tbl=None):
self.Tbl = Tbl
self.width = width
self.height = height
self.container = None
self.container = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHeightForWidth(self.container.sizePolicy().hasHeightForWidth())
self.container.setSizePolicy(sizePolicy)
self.container.setFocusPolicy(QtCore.Qt.NoFocus)
self.container.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.container.setObjectName("Proxy")
self.container.layout = QtWidgets.QHBoxLayout()
self.tableView = QtWidgets.QTableView()
self.construct_widget()
def construct_widget(self):
self.model = DataFrameModel(self.Tbl)
self.tableView.setModel(self.model)
self.fitsize()
self.container.layout.addWidget(self.tableView)
self.container.setLayout(self.container.layout)
return
def fitsize(self):
self.tableView.setFixedWidth(self.width-100)
print (self.width,self.height)
self.tableView.resizeColumnsToContents()
self.tableView.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
return
def ret_widget(self):
return self.container
|
[
"jaykeelung@gmail.com"
] |
jaykeelung@gmail.com
|
43465af14d1a4c65e8c2260bcbed63781848f850
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2410/58575/300641.py
|
d1db791c85930be8170843531285fd8900b6b8d1
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
nums=list(map(int,input().split(",")))
diff=int(input())
res={}
for i in nums:
if i not in res:
res[i]=""
if i-diff in res:
res[i-diff]=i
maxLength=1
for i in res:
length=1
next=res[i]
while next!="":
next=res[next]
length=length+1
maxLength=max(maxLength,length)
print(maxLength)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5643a9ebd599e7db19f2826393c2a5714feed09b
|
d17fabdb0045a81234ebd23e802a9368ec150638
|
/bank_details/models.py
|
006e4becc4558b310be97d5c8f1c46b2af964c8c
|
[] |
no_license
|
asperaa/bank-app-jwt
|
8d0d94903e153259779ce732d5a532e50cbb23cd
|
cd2b6597aba4d21f37dddd04f33f6e7377d3b2a1
|
refs/heads/master
| 2022-05-26T14:34:44.500669
| 2019-07-13T17:24:59
| 2019-07-13T17:24:59
| 195,416,035
| 0
| 0
| null | 2022-05-25T03:44:58
| 2019-07-05T13:42:19
|
TSQL
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
from django.db import models
class Banks(models.Model):
name = models.CharField(max_length=49, blank=True, null=True)
id = models.BigIntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'banks'
class Branches(models.Model):
ifsc = models.CharField(primary_key=True, max_length=11)
bank = models.ForeignKey(Banks, models.DO_NOTHING, blank=True, null=True)
branch = models.CharField(max_length=74, blank=True, null=True)
address = models.CharField(max_length=195, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
district = models.CharField(max_length=50, blank=True, null=True)
state = models.CharField(max_length=26, blank=True, null=True)
class Meta:
managed = False
db_table = 'branches'
|
[
"adityaankur44@gmail.com"
] |
adityaankur44@gmail.com
|
43eb6f73990c2320a544c9f99a6c739bad1e0a14
|
0cb3253af733efabd7e5d1b0214f34d4aa8052d5
|
/h_only_snli_kaushik.py
|
78fa9c19b5067802558e2ddea0382224f405808e
|
[] |
no_license
|
anony-hons/nli-measure-artifact
|
ea26cd3bf6bd3e54fb6245f875dbaa740929f364
|
82ea0aed9f2b05c5bc306a1b2df3eb5a1448b2eb
|
refs/heads/master
| 2023-01-31T22:47:37.197655
| 2020-12-14T15:10:50
| 2020-12-14T15:10:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,106
|
py
|
from transformers import AlbertTokenizer, AlbertForSequenceClassification, AdamW, get_linear_schedule_with_warmup
# from transformers import BertTokenizer, BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.dummy import DummyClassifier
from collections import defaultdict
import torch
import gc
import pandas as pd
import numpy as np
import time
import datetime
import random
device = torch.device("cuda:2")
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
def load_data(dataset, num_sample):
sampled_sentences = []
sampled_labels = []
sample_per_class = int(num_sample/12)
for label, sentences in dataset.items():
s_pairs = random.choices(sentences, k=sample_per_class)
for pair in s_pairs:
sampled_sentences += [pair[0], pair[1]]
sampled_labels += list(label) * sample_per_class
assert(len(sampled_sentences) == sample_per_class * 12)
assert(len(sampled_labels) == sample_per_class * 12)
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
encoding = tokenizer(sampled_sentences, return_tensors='pt', padding=True, truncation=True, max_length=128).to(device)
input_ids = encoding['input_ids']
attention_masks = encoding['attention_mask']
labels = torch.tensor(sampled_labels).unsqueeze(1).to(device)
return input_ids, attention_masks, labels
def load_sentences(file_path):
label_map ={
'entailment': 0,
'neutral': 1,
'contradiction': 2
}
df = pd.read_csv(file_path, sep='\t')
sentences = df.sentences.values
labels = df.labels.values
l = list(range(len(sentences)))
sentence_per_class = defaultdict(list)
for i in range(0, len(sentences), 2):
i1 = i
i2 = i + 1
sentence_per_class[(label_map[labels[i1]], label_map[labels[i2]])]\
.append((sentences[i1], sentences[i2]))
return sentence_per_class
def run(datasets, seed=42):
## META VARIABLES
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
epochs = 20
batch_size = 32
X_train, mask_train, y_train = load_data(datasets['train'], num_sample=int(625*0.7))
X_val, mask_val, y_val = load_data(datasets['val'], num_sample=int(625*0.1))
X_test, mask_test, y_test = load_data(datasets['test'], num_sample=int(625*0.2))
train_dataset = TensorDataset(X_train, mask_train, y_train)
val_dataset = TensorDataset(X_val, mask_val, y_val)
test_dataset = TensorDataset(X_test, mask_test, y_test)
# Dataloading
train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=batch_size)
validation_dataloader = DataLoader(val_dataset, sampler=RandomSampler(val_dataset), batch_size=batch_size)
prediction_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=prediction_sampler, batch_size=batch_size)
model = AlbertForSequenceClassification.from_pretrained('albert-base-v2', num_labels = 3).to(device)
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-5)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(train_dataloader) * epochs)
total_t0 = time.time()
best_state_dict = None
best_val = 0
for epoch_i in range(epochs):
# ========================================
# Training
# ========================================
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
total_train_loss = 0
model.train()
predictions_train = np.array([])
true_label_train = np.array([])
for step, batch in enumerate(train_dataloader):
# Progress update every 50 batches.
if step % 50 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item() * b_labels.shape[0]
loss.backward()
optimizer.step()
scheduler.step()
gc.collect()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions_train = np.append(predictions_train, np.argmax(logits, axis=1).flatten())
true_label_train = np.append(true_label_train, label_ids)
# Calculate the average loss over all of the batches.
accuracy_train = np.sum(predictions_train == true_label_train) / true_label_train.shape[0]
f1_macro_train = f1_score(true_label_train, predictions_train, average='macro')
f1_micro_train = f1_score(true_label_train, predictions_train, average='micro')
print("\n Training Accuracy: {0:.2f}".format(accuracy_train))
print(" Training F1-MACRO: {0:.2f}".format(f1_macro_train))
print(" Training F1-MICRO: {0:.2f}".format(f1_micro_train))
avg_train_loss = total_train_loss / true_label_train.shape[0]
training_time = format_time(time.time() - t0)
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
print("\nRunning Validation...")
t0 = time.time()
model.eval()
total_val_loss = 0
predictions_val = np.array([])
true_label_val = np.array([])
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad():
(loss, logits) = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_val_loss += loss.item() * b_labels.shape[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions_val = np.append(predictions_val, np.argmax(logits, axis=1).flatten())
true_label_val = np.append(true_label_val, label_ids)
accuracy_val = np.sum(predictions_val == true_label_val) / true_label_val.shape[0]
f1_macro_val = f1_score(true_label_val, predictions_val, average='macro')
f1_micro_val = f1_score(true_label_val, predictions_val, average='micro')
print(" Accuracy: {0:.2f}".format(accuracy_val))
print(" F1-MACRO: {0:.2f}".format(f1_macro_val))
print(" F1-MICRO: {0:.2f}".format(f1_micro_val))
performance_metric = f1_macro_val
if performance_metric > best_val:
print("Best Model Updated.")
best_val = performance_metric
best_state_dict = model.state_dict()
avg_val_loss = total_val_loss / true_label_val.shape[0]
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
print("\nTraining complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
# ========================================
# Test
# ========================================
model.load_state_dict(best_state_dict)
model.eval()
predictions_test = np.array([])
true_label_test = np.array([])
for batch in test_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions_test = np.append(predictions_test, np.argmax(logits, axis=1).flatten())
true_label_test = np.append(true_label_test, label_ids)
best_accr = np.sum(predictions_test == true_label_test) / true_label_test.shape[0]
best_macro_f1 = f1_score(true_label_test, predictions_test, average='macro')
best_micro_f1 = f1_score(true_label_test, predictions_test, average='micro')
best_confusion_matrix = confusion_matrix(true_label_test, predictions_test)
print(" Test Accuracy: {0:.2f}".format(best_accr))
print(" Test F1-MACRO: {0:.2f}".format(best_macro_f1))
print(" Test F1-MICRO: {0:.2f}".format(best_micro_f1))
# ========================================
# Dummy Test
# ========================================
X_train = X_train.detach().cpu().numpy()
X_test = X_test.detach().cpu().numpy()
y_train = y_train.detach().cpu().numpy().squeeze(1)
y_test = y_test.detach().cpu().numpy().squeeze(1)
dummy_clf = DummyClassifier(strategy="uniform")
dummy_clf.fit(X_train, y_train)
predictions_dummy = dummy_clf.predict(X_test)
dummy_accr = np.sum(predictions_dummy == y_test) / y_test.shape[0]
dummy_macro_f1 = f1_score(y_test, predictions_dummy, average='macro')
dummy_micro_f1 = f1_score(y_test, predictions_dummy, average='micro')
print(" Dummy Accuracy: {0:.2f}".format(dummy_accr))
print(" Dummy F1-MACRO: {0:.2f}".format(dummy_macro_f1))
print(" Dummy F1-MICRO: {0:.2f}".format(dummy_micro_f1))
# torch.save(best_state_dict, './output/best_model_kaushik_sample.pt')
return {
'seed': seed,
'best_accr': best_accr,
'best_macro_f1': best_macro_f1,
'best_micro_f1': best_micro_f1,
'dummy_accr': dummy_accr,
'dummy_macro_f1': dummy_macro_f1,
'dummy_micro_f1': dummy_micro_f1
}
def run_many_times():
run_time = 34
seed = 108
result_record = pd.DataFrame([], columns=['seed', 'best_accr', 'best_macro_f1', 'best_micro_f1', 'dummy_accr', 'dummy_macro_f1', 'dummy_micro_f1'])
# Data PREP
train_dict = load_sentences('data/kaushik_train.tsv')
val_dict = load_sentences('data/kaushik_dev.tsv')
test_dict = load_sentences('data/kaushik_test.tsv')
datasets = {
'train': train_dict,
'val': val_dict,
'test': test_dict
}
for i in range(run_time):
result_df = run(datasets, seed=seed+i)
result_record = result_record.append(result_df, ignore_index=True)
result_record.to_csv('./output/result_kaushik_sample_{0}.csv'.format(seed))
# result_record.to_pickle('./output/result_kaushik_sample.pkl')
print("RUNTIME is", i)
def main():
run_many_times()
if __name__ == "__main__":
main()
|
[
"hoonhan.d@gmail.com"
] |
hoonhan.d@gmail.com
|
9f595d6dec06c5be029b8edb0074663b91c4ce88
|
7f781c4739d75c302b8635ff953dff923b774dd1
|
/npcr_ScanNet.py
|
4475ebf96906536bc00b4a86ad29afbbb10c38b4
|
[
"MIT"
] |
permissive
|
daipengwa/Neural-Point-Cloud-Rendering-via-Multi-Plane-Projection
|
68cdc53651236532851310ad755f945867a1a371
|
62c7930de0131842bb2e948f3d1fb1ce242a9f5a
|
refs/heads/master
| 2023-08-30T11:45:37.147671
| 2023-08-22T13:11:04
| 2023-08-22T13:11:04
| 253,273,490
| 50
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,130
|
py
|
from network import *
import cv2, os, time, math
import glob
import scipy.io as io
from loss import *
from utils import *
is_training = False # if test, set this 'False'
use_viewdirection = True # use view direction
renew_input = True # optimize input point features.
constant_initial = True # use constant value for initialization.
use_RGB = True # use RGB information for initialization.
random_crop = True # crop image.
d = 32 # how many planes are used, identity with pre-processing.
h = 480 # image height, identity with pre-processing.
w = 640 # image width, identity with pre-processing.
top_left_v = 0 # top left position
top_left_u = 0 # top left position
h_croped = 240 # crop size height
w_croped = 320 # crop size width
forward_time = 4 # optimize input point features after cropping 4 times on one image.
overlap = 32 # size of overlap region of crops.
channels_i = int(8) # dimension of input point features
channels_o = 3 # output image dimensions
channels_v = 3 # view direction dimensions
gpu_id = 3
num_epoch = 21
decrease_epoch = 7 # epochs, learning_rate_1 decreased.
learning_rate = 0.0001 # learning rate for network parameters optimization
learning_rate_1 = 0.01 # initial learning rate for input point features.
dataset = 'ScanNet' # datasets
scene = 'scene0010_00' # scene name
task = '%s_npcr_%s' % (dataset, scene) # task name, also path of checkpoints file
dir1 = 'data/%s/%s/color/' % (dataset, scene) # path of color image
dir2 = 'data/%s/%s/pose/' % (dataset, scene) # path of camera poses.
dir3 = 'pre_processing_results/%s/%s/reproject_results_%s/' % (dataset, scene, d) # voxelization information path.
dir4 = 'pre_processing_results/%s/%s/weight_%s/' % (dataset, scene, d) # aggregation information path.
dir5 = 'pre_processing_results/%s/%s/point_clouds_simplified.ply' % (dataset, scene) # point clouds file path
num_image = len(glob.glob(os.path.join(dir1, '*.jpg')))
image_names_train, index_names_train, camera_names_train, index_names_1_train,\
image_names_test, index_names_test, camera_names_test, index_names_1_test = prepare_data_ScanNet(dir1, dir2, dir3, dir4, num_image)
# load point clouds information
point_clouds, point_clouds_colors = loadfile(dir5)
num_points = point_clouds.shape[1]
# initial descriptor
descriptors = np.random.normal(0, 1, (1, num_points, channels_i))
if os.path.isfile('%s/descriptor.mat' % task):
content = io.loadmat('%s/descriptor.mat' % task)
descriptors = content['descriptors']
print('loaded descriptors.')
else:
if constant_initial:
descriptors = np.ones((1, num_points, channels_i), dtype=np.float32) * 0.5
if use_RGB:
descriptors[0, :, 0:3] = np.transpose(point_clouds_colors) / 255.0
os.environ["CUDA_VISIBLE_DEVICES"] = "%s" % gpu_id
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
sess = tf.Session()
input1 = tf.placeholder(dtype=tf.float32, shape=[1, d, None, None, channels_i])
input2 = tf.placeholder(dtype=tf.float32, shape=[1, d, None, None, channels_v])
output = tf.placeholder(dtype=tf.float32, shape=[1, None, None, channels_o])
with tf.variable_scope(tf.get_variable_scope()):
inputs = input1
total_channels = channels_i
if use_viewdirection:
inputs = tf.concat([input1, input2], axis=4)
total_channels = total_channels + channels_v
color_layer, alpha, network = neural_render(input=inputs, reuse=False, use_dilation=True)
loss, p0, p1, p2, p3, p4, p5 = VGG_loss(network, output, reuse=False)
loss_all = loss
# calculate gradient for aggregated point features.
gradient = tf.gradients(loss_all, input1)
var_list_all = [var for var in tf.trainable_variables()]
opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_all, var_list=var_list_all)
saver = tf.train.Saver(var_list=var_list_all, max_to_keep=1000)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(task)
if ckpt:
print('load ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
##############################################################################################
if is_training:
print('begin training!')
all = np.zeros(20000, dtype=float)
cnt = 0
for epoch in range(num_epoch):
if epoch >= decrease_epoch:
learning_rate_1 = 0.005
if epoch >= decrease_epoch*2:
learning_rate_1 = 0.001
if os.path.isdir("%s/%04d" % (task, epoch)):
continue
for i in np.random.permutation(len(image_names_train)):
# for i in range(4):
st = time.time()
image_descriptor = np.zeros([1, d, h, w, channels_i], dtype=np.float32)
view_direction = np.zeros([1, d, h, w, channels_v], dtype=np.float32)
input_gradient_all = np.zeros([1, d, h, w, channels_i], dtype=np.float32)
count = np.zeros([1, d, h, w, 1], dtype=np.float32)
camera_name = camera_names_train[i]
index_name = index_names_train[i]
image_name = image_names_train[i]
index_name_1 = index_names_1_train[i]
if not (os.path.isfile(camera_name) and os.path.isfile(image_name) and os.path.isfile(index_name) and os.path.isfile(index_name_1)):
print("Missing file!")
continue
# we pre-process the voxelization and aggregation, in order to save time.
npzfile = np.load(index_name)
u = npzfile['u'] # u position on image plane
v = npzfile['v'] # v position on image plane
n = npzfile['d'] # indicates which plane
select_index = npzfile['select_index'] # select index of all points.
group_belongs = npzfile['group_belongs'] # points belong to which group/voxel
index_in_each_group = npzfile['index_in_each_group'] # index in each group/voxel
distance = npzfile['distance'] # distance to grid center
each_split_max_num = npzfile['each_split_max_num'] # max num of points in one group/voxel in each plane.
# load weight
npzfile_weight = np.load(index_name_1)
weight = npzfile_weight['weight_average'] # normalized weights for points aggregation.
distance_to_depth_min = npzfile_weight['distance_to_depth_min'] # distance to minimum depth value in one group/voxel.
# calculate update weight of each point feature
descriptor_renew_weight = (1-distance)*(1/(1+distance_to_depth_min))
extrinsic_matrix = CameraPoseRead(camera_name) # camera to world
camera_position = np.transpose(extrinsic_matrix[0:3, 3])
max_num = np.max(each_split_max_num) # max number of points in all group/voxel
group_descriptor = np.zeros([(max(group_belongs+1)), max_num, channels_i], dtype=np.float32)
group_descriptor[group_belongs, index_in_each_group, :] = descriptors[0, select_index, :] * np.expand_dims(weight, axis=1)
image_descriptor[0, n, v, u, :] = np.sum(group_descriptor, axis=1)[group_belongs, :]
view_direction[0, n, v, u, :] = np.transpose(point_clouds[0:3, select_index]) - camera_position
view_direction[0, n, v, u, :] = view_direction[0, n, v, u, :] / (np.tile(np.linalg.norm(view_direction[0, n, v, u, :], axis=1, keepdims=True), (1, 3)) + 1e-10)
image_output = np.expand_dims(cv2.resize(cv2.imread(image_name, -1), (w, h)), axis=0) / 255.0
if random_crop:
# limitation of memory etc, we crop the image.
# Also, we hope crops almost cover the whole image to uniformly optimize point features.
for j in np.random.permutation(forward_time):
movement_v = np.random.randint(0, overlap)
movement_u = np.random.randint(0, overlap)
if j==0:
top_left_u = 0 + movement_u
top_left_v = 0 + movement_v
if j==1:
top_left_u = w_croped - movement_u
top_left_v = 0 + movement_v
if j==2:
top_left_u = 0 + movement_u
top_left_v = h_croped - movement_v
if j==3:
top_left_u = w_croped - movement_u
top_left_v = h_croped - movement_v
[_, current_loss, l1, input_gradient] = sess.run([opt, loss_all, loss, gradient],
feed_dict={input1: image_descriptor[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :],
input2: view_direction[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :],
output: image_output[:, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :]
})
input_gradient_all[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :] = input_gradient[0] + input_gradient_all[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :]
count[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :] = count[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :] + 1
# print(np.max(count))
if renew_input:
input_gradient_all = input_gradient_all/(count+1e-10)
descriptors[0, select_index, :] = descriptors[0, select_index, :] - learning_rate_1 * np.expand_dims(descriptor_renew_weight, axis=1) * input_gradient_all[0, n, v, u, :]
else:
[_, current_loss, l1, input_gradient] = sess.run([opt, loss_all, loss, gradient],
feed_dict={input1: image_descriptor,
input2: view_direction,
output: image_output
})
if renew_input:
descriptors[0, select_index, :] = descriptors[0, select_index, :] - learning_rate_1 * np.expand_dims(descriptor_renew_weight, axis=1) * input_gradient[0][0, n, v, u, :]
all[i] = current_loss * 255.0
cnt = cnt + 1
print('%s %s %s %.2f %.2f %s' % (epoch, i, cnt, current_loss, np.mean(all[np.where(all)]), time.time() - st))
os.makedirs("%s/%04d" % (task, epoch))
saver.save(sess, "%s/model.ckpt" % (task))
io.savemat("%s/" % task + 'descriptor.mat', {'descriptors': descriptors})
if epoch % 5 == 0:
saver.save(sess, "%s/%04d/model.ckpt" % (task, epoch))
io.savemat("%s/%04d/" % (task, epoch) + 'descriptor.mat', {'descriptors': descriptors})
for id in range(len(image_names_test)):
top_left_v = 120
top_left_u = 160
st = time.time()
image_descriptor = np.zeros([1, d, h, w, channels_i])
view_direction = np.zeros([1, d, h, w, channels_v])
camera_name = camera_names_test[id]
index_name = index_names_test[id]
index_name_1 = index_names_1_test[id]
if not (os.path.isfile(index_name) and os.path.isfile(camera_name) and os.path.isfile(index_name_1)):
print('Missingg file 1!')
continue
npzfile = np.load(index_name)
u = npzfile['u']
v = npzfile['v']
n = npzfile['d']
select_index = npzfile['select_index']
group_belongs = npzfile['group_belongs']
index_in_each_group = npzfile['index_in_each_group']
distance = npzfile['distance']
each_split_max_num = npzfile['each_split_max_num']
# load weight
npzfile_weight = np.load(index_name_1)
weight = npzfile_weight['weight_average']
distance_to_depth_min = npzfile_weight['distance_to_depth_min']
extrinsic_matrix = CameraPoseRead(camera_name) # camera to world
camera_position = np.transpose(extrinsic_matrix[0:3, 3])
max_num = np.max(each_split_max_num)
group_descriptor = np.zeros([(max(group_belongs + 1)), max_num, channels_i], dtype=np.float32)
group_descriptor[group_belongs, index_in_each_group, :] = descriptors[0, select_index, :] * np.expand_dims(weight, axis=1)
image_descriptor[0, n, v, u, :] = np.sum(group_descriptor, axis=1)[group_belongs, :]
view_direction[0, n, v, u, :] = np.transpose(point_clouds[0:3, select_index]) - camera_position
view_direction[0, n, v, u, :] = view_direction[0, n, v, u, :] / (np.tile(np.linalg.norm(view_direction[0, n, v, u, :], axis=1, keepdims=True), (1, 3)) + 1e-10)
[result] = sess.run([network], feed_dict={input1: image_descriptor[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :],
input2: view_direction[:, :, top_left_v:(top_left_v + h_croped), top_left_u:(top_left_u + w_croped), :]})
result = np.minimum(np.maximum(result, 0.0), 1.0) * 255.0
cv2.imwrite('%s/%04d/%06d.png' % (task, epoch, id), np.uint8(result[0, :, :, :]))
print(time.time() - st)
else:
output_path = "%s/Test_Result/" % (task)
if not os.path.isdir(output_path):
os.makedirs(output_path)
for id in range(len(camera_names_test)):
st = time.time()
image_descriptor = np.zeros([1, d, h, w, channels_i])
view_direction = np.zeros([1, d, h, w, channels_v])
camera_name = camera_names_test[id]
index_name = index_names_test[id]
index_name_1 = index_names_1_test[id]
if not (os.path.isfile(index_name) and os.path.isfile(camera_name) and os.path.isfile(index_name_1)):
print('Missingg file 1!')
continue
npzfile = np.load(index_name)
u = npzfile['u']
v = npzfile['v']
n = npzfile['d']
select_index = npzfile['select_index']
group_belongs = npzfile['group_belongs']
index_in_each_group = npzfile['index_in_each_group']
distance = npzfile['distance']
each_split_max_num = npzfile['each_split_max_num']
# load weight
npzfile_weight = np.load(index_name_1)
weight = npzfile_weight['weight_average']
distance_to_depth_min = npzfile_weight['distance_to_depth_min']
extrinsic_matrix = CameraPoseRead(camera_name) # camera to world
camera_position = np.transpose(extrinsic_matrix[0:3, 3])
max_num = np.max(each_split_max_num)
group_descriptor = np.zeros([(max(group_belongs + 1)), max_num, channels_i], dtype=np.float32)
group_descriptor[group_belongs, index_in_each_group, :] = descriptors[0, select_index, :] * np.expand_dims(weight, axis=1)
image_descriptor[0, n, v, u, :] = np.sum(group_descriptor, axis=1)[group_belongs, :]
view_direction[0, n, v, u, :] = np.transpose(point_clouds[0:3, select_index]) - camera_position
view_direction[0, n, v, u, :] = view_direction[0, n, v, u, :] / (
np.tile(np.linalg.norm(view_direction[0, n, v, u, :], axis=1, keepdims=True), (1, 3)) + 1e-10)
[result] = sess.run([network], feed_dict={input1: image_descriptor, input2: view_direction})
result = np.minimum(np.maximum(result, 0.0), 1.0) * 255.0
cv2.imwrite(output_path + '%06d.png' % id, np.uint8(result[0, :, :, :]))
print(time.time() - st)
if __name__ == '__main__':
pass
|
[
"noreply@github.com"
] |
daipengwa.noreply@github.com
|
6e2e04254d994780052dbc45ed6cd356c70d8c58
|
837ee4c2f5ee891ef00d74113ceffad4da3abe42
|
/mapapp/migrations/0004_remove_uploadfile_title.py
|
1f305e5a4de281862ce0bf597611532b2216b839
|
[] |
no_license
|
Jayleh/map-lookup
|
21b3df6591af22824d21faff7f4b5d40c347b8c8
|
44d45a2c5c903478d3f50123642e5ba7f5629969
|
refs/heads/master
| 2022-12-11T01:32:56.705726
| 2018-11-08T00:55:46
| 2018-11-08T00:55:46
| 151,786,651
| 0
| 0
| null | 2022-12-08T01:15:06
| 2018-10-05T23:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 2.1.2 on 2018-10-29 21:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mapapp', '0003_uploadfile_title'),
]
operations = [
migrations.RemoveField(
model_name='uploadfile',
name='title',
),
]
|
[
"jaylehyun@gmail.com"
] |
jaylehyun@gmail.com
|
fcc52b34f0fbd988cb5dde877eff1c8999dac5ee
|
fc2d86ecfef05273b6195d8c4958dfda92c3f01c
|
/performance_engine/apis_performance/portfolio_performance_api.py
|
d84446794f218e2abbfaed4c53c3768e2310990c
|
[] |
no_license
|
finbourne/performance-engine-poc
|
fd9356667143569474838fb3cfab96d4e5643600
|
b15d2ba0b604ddb94848d5c0353129c2b7229f1e
|
refs/heads/master
| 2023-05-27T11:24:26.780388
| 2021-01-11T09:54:03
| 2021-01-11T09:54:03
| 282,113,867
| 0
| 1
| null | 2023-05-23T00:12:18
| 2020-07-24T03:31:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
from typing import List
import pandas as pd
from pandas import Timestamp
from config.config import global_config
from ext_fields import get_ext_fields
from fields import *
from interfaces import IPerformanceSource, IBlockStore
from lusid.utilities.api_client_factory import ApiClientFactory
from misc import as_dates, now
from perf import Performance
from performance_sources.lusid_src import LusidSource
class PortfolioPerformanceApi:
"""
The responsibility of this class is to produce performance reports for a portfolio
"""
def __init__(self, block_store: IBlockStore, portfolio_performance_source: IPerformanceSource,
api_factory: ApiClientFactory = None):
"""
:param IBlockStore block_store: The block store to use to get performance to generate reports
:param IPerformanceSource portfolio_performance_source: The source to use to get performance for a portfolio when
there are missing blocks or working with an unlocked period
:param ApiClientFactory api_factory: The API factory to use to interact with LUSID
"""
self.block_store = block_store
self.portfolio_performance_source = portfolio_performance_source
self.api_factory = api_factory
def prepare_portfolio_performance(self, portfolio_scope: str, portfolio_code: str):
"""
The responsibility of this method is to prepare an instance of the Performance class for the specified
portfolio which can be used to generate performance reports.
:param str portfolio_scope: The scope of the portfolio.
:param str portfolio_code: The code of the portfolio, together with the scope this uniquely identifies the
portfolio.
:return: Performance: The instance of the performance class which can be used to generate performance reports.
"""
return Performance(
entity_scope=portfolio_scope,
entity_code=portfolio_code,
src=self.portfolio_performance_source,
block_store=self.block_store,
perf_start=None,
)
@as_dates
def get_portfolio_performance_report(self, portfolio_scope: str, portfolio_code: str, performance_scope: str,
from_date: Timestamp, to_date: Timestamp, asat: Timestamp = None,
locked: bool = True, fields: List[str] = None):
"""
The responsibility of this method is to generate a performance report for the specified composite.
:param str portfolio_scope: The scope of the portfolio.
:param str portfolio_code: The code of the portfolio, together with the scope this uniquely identifies the
portfolio.
:param str performance_scope: The scope to use when fetching performance data to generate the report
:param Timestamp from_date: The effectiveAt date to generate performance from
:param Timestamp to_date: The effectiveAt date to generate performance until
:param Timestamp asat: The asAt date to generate performance at
:param bool locked: Whether or not the performance to use in generation the report is locked
:param List[str] fields: The fields to have in the report e.g. WTD (week to date), Daily etc.
:return: DataFrame: The Pandas DataFrame containing the performance report
"""
# Default the fields to only provide the daily return
fields = fields or [DAY]
asat = asat or now()
config = global_config
if self.api_factory is not None:
# Look for extension fields, e.g. arbitrary inception dates
ext_fields = get_ext_fields(
api_factory=self.api_factory,
entity_type="portfolio",
entity_scope=portfolio_scope,
entity_code=portfolio_code,
effective_date=from_date,
asat=asat,
fields=fields,
config=config)
else:
ext_fields = {}
# Prepare the portfolio performance which can be used to generate a report
prf = self.prepare_portfolio_performance(
portfolio_scope=portfolio_scope,
portfolio_code=portfolio_code
)
# Generate the report and convert it into a DataFrame
return pd.DataFrame.from_records(
prf.report(
locked=locked,
start_date=from_date,
end_date=to_date,
asat=now(),
performance_scope=performance_scope,
fields=fields,
ext_fields=ext_fields
)
)[['date', 'mv', 'inception', 'flows'] + fields]
|
[
"mike.mcgarry@finbourne.com"
] |
mike.mcgarry@finbourne.com
|
4580eb918c041d6559a72bd6cb82f8e259f870ab
|
b8f741bff5db708a57ffbf965ddff4c092a4e361
|
/Naive/naivebayes_implementation.py
|
52d88e008a3fbb3d04a7ed5cd980f31c33793045
|
[] |
no_license
|
F2Binary/Py2Cube
|
23cdd32007110cfa047d39460bbca9d25b093153
|
1749a79656eacb863054a67dec8e35e0fca83ada
|
refs/heads/main
| 2023-01-20T12:25:01.029228
| 2020-11-22T03:04:44
| 2020-11-22T03:04:44
| 303,846,727
| 0
| 1
| null | 2020-11-08T21:46:42
| 2020-10-13T22:52:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
# Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math
def loadCsv(filename):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
summaries = {}
for classValue, instances in separated.items():
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
filename = 'output.csv'
splitRatio = 0.67
dataset = loadCsv(filename)
trainingSet, testSet = splitDataset(dataset, splitRatio)
print('Split {0} rows into train={1} and test={2} rows'.format(len(dataset), len(trainingSet), len(testSet)))
# prepare model
summaries = summarizeByClass(trainingSet)
# test model
predictions = getPredictions(summaries, testSet)
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: {0}%'.format(accuracy))
if __name__ == "__main__":
main()
|
[
"pulok.rahman.630@my.csun.edu"
] |
pulok.rahman.630@my.csun.edu
|
15480fc9e47afb8ebcb1c30d2da528ef59ae7608
|
c1abd1446948bef87d84711cf08b2188e63f492e
|
/week02/httpbin/httpbin/middlewares.py
|
114120fc0592104e3d319440891d38f62059d081
|
[] |
no_license
|
zgzguangguang/Python001-class01
|
b969769c59ff98f18a96c31d72f1b08e70e564e0
|
9be1558b7fce17550975714c5457c3c20331bef0
|
refs/heads/master
| 2022-12-06T13:00:25.443049
| 2020-08-20T17:29:44
| 2020-08-20T17:29:44
| 273,928,284
| 0
| 0
| null | 2020-06-21T15:10:07
| 2020-06-21T15:10:07
| null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class HttpbinSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class HttpbinDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
request.meta['proxy'] = "http://183.162.168.232:23564"
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"917044026@qq.com"
] |
917044026@qq.com
|
c0eb0d6128c571616ee8a48c75b8f2e2d267fd5d
|
b2d2527a244111745f67a86942c5392bb58bbfbc
|
/3_Group_14_Assignment3/train_data/ngram.py
|
6464cad00705c7f20f0e712b1aec8ebd7abdb9a6
|
[] |
no_license
|
puevigreven/AI-Assignments
|
804aaefe6817c0c6edc5a0da6bf5e5d153e847d0
|
efbc66071d9dbfde2243ae563bedc9e5b6896c3b
|
refs/heads/master
| 2021-07-19T15:40:01.894839
| 2017-10-26T20:16:14
| 2017-10-26T20:16:14
| 106,114,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,684
|
py
|
import nltk
from nltk import bigrams
from nltk import trigrams
def find_n_grams(source, des) :
"""
Change in .write function while dealing with Train data.
"""
with open(source, 'r') as myfile:
text=myfile.read()
# type(data)
stopwords = ['all', 'just', 'being', 'over', 'through', 'yourselves', 'its', 'before', 'hadn', 'with', 'll', 'had', 'should', 'to', 'only', 'won', 'under', 'ours', 'has', 'wouldn', 'them', 'his', 'very', 'they', 'not', 'during', 'now', 'him', 'nor', 'd', 'did', 'didn', 'these', 't', 'each', 'because', 'doing', 'theirs', 'some', 'hasn', 'are', 'our', 'ourselves', 'out', 'for', 'weren', 're', 'does', 'above', 'between', 'mustn', 'she', 'be', 'we', 'here', 'shouldn', 'hers', 'by', 'on', 'about', 'couldn', 'of', 'against', 's', 'isn', 'or', 'own', 'into', 'yourself', 'down', 'mightn', 'wasn', 'your', 'from', 'her', 'whom', 'aren', 'there', 'been', 'few', 'too', 'then', 'themselves', 'was', 'until', 'more', 'himself', 'both', 'but', 'off', 'herself', 'than', 'those', 'he', 'me', 'myself', 'ma', 'this', 'up', 'will', 'while', 'ain', 'below', 'can', 'were', 'my', 'at', 'and', 've', 'do', 'is', 'in', 'am', 'it', 'doesn', 'an', 'as', 'itself', 'o', 'have', 'further', 'their', 'if', 'again', 'no', 'that', 'same', 'any', 'other', 'yo', 'shan', 'needn', 'haven', 'after', 'most', 'such', 'a', 'don', 'i', 'm', 'having', 'so', 'y', 'the', 'yours', 'once']
unigram_file = open(des, "w")
# count_file = open("total_words_in_classes.txt", "a")
# bigram_file = open("ABBR_bigram.txt", "w")
# trigram_file = open("ABBR_trigram.txt", "w")
tokens = nltk.word_tokenize(text)
tokens = [token.lower() for token in tokens if len(token) > 1] #same as unigrams
bi_tokens = bigrams(tokens)
tri_tokens = trigrams(tokens)
trigramToken = list(tri_tokens)
bigramToken = list(bi_tokens)
total_count = 0
uni_count = 500
uc = 0
unigrmdic = {}
for item in sorted(set(tokens)):
unigrmdic[item] = tokens.count(item)
for key, value in sorted(unigrmdic.iteritems(), key=lambda (k,v): (v,k) ,reverse=True):
total_count = total_count + value
if key not in stopwords and uc < uni_count:
print "%s: %s" % (key, value)
unigram_file.write("%s : %s\n" % (key ,value))
uc = uc + 1
# unigram_file.close()
bc = 0
bigrmdic = {}
for item in sorted(set(bigramToken)):
bigrmdic[item] = bigramToken.count(item)
for key, value in sorted(bigrmdic.iteritems(), key=lambda (k,v): (v,k) ,reverse=True):
if bc < 300:
print "%s: %s" % (key, value)
total_count = total_count + value
unigram_file.write("%s : %s\n" % (key ,value))
bc = bc + 1
# bigram_file.close()
tc = 0
trigrmdic = {}
for item in sorted(set(trigramToken)):
trigrmdic[item] = trigramToken.count(item)
for key, value in sorted(trigrmdic.iteritems(), key=lambda (k,v): (v,k) ,reverse=True):
if tc < 200:
print "%s: %s" % (key, value)
total_count = total_count + value
unigram_file.write("%s : %s\n" % (key ,value))
tc = tc + 1
# count_file.write("%s : %s" % (source , str(total_count)))
# count_file.close()
unigram_file.close()
find_n_grams('final_train.txt', 'train-ngram.txt')
# find_n_grams('NUM-Group.txt' , 'NUM_ngram.txt')
# find_n_grams('LOC-Group.txt' , "LOC_ngram.txt")
# find_n_grams('HUM-Group.txt' , "HUM_ngram.txt")
# find_n_grams('ABBR-Group.txt' , "ABBR_ngram.txt")
# find_n_grams('DESC-Group.txt' , "DESC_ngram.txt")
# find_n_grams('ENTY-Group.txt' , "ENTY_ngram.txt")
|
[
"pralay22@gmail.com"
] |
pralay22@gmail.com
|
9a54788b5caeb44648f0e8a6f8fb8abe20320e0b
|
5cb920055a4ef6175690d991dc30ef6686366089
|
/price_web/venv/lib/python2.7/site-packages/twisted/internet/test/_posixifaces.py
|
21bcf310f0a21d49b43c87e3e33ec01a94088dc3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kewei/priceReminder
|
a3749a4a9d920fa4f0530d3d702b706be813f5a7
|
60c92bb0342d1342eec246c5a9783586a1a310ff
|
refs/heads/master
| 2023-06-22T04:02:01.084906
| 2023-06-18T21:15:13
| 2023-06-18T21:15:13
| 273,979,265
| 0
| 0
| null | null | null | null |
IBM852
|
Python
| false
| false
| 4,694
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POSIX implementation of local network interface enumeration.
"""
from __future__ import division, absolute_import
import sys, socket
from socket import AF_INET, AF_INET6, inet_ntop
from ctypes import (
CDLL, POINTER, Structure, c_char_p, c_ushort, c_int,
c_uint32, c_uint8, c_void_p, c_ubyte, pointer, cast)
from ctypes.util import find_library
from twisted.python.compat import _PY3, nativeString
if _PY3:
# Once #6070 is implemented, this can be replaced with the implementation
# from that ticket:
def chr(i):
"""
Python 3 implementation of Python 2 chr(), i.e. convert an integer to
corresponding byte.
"""
return bytes([i])
libc = CDLL(find_library("c"))
if sys.platform.startswith('freebsd') or sys.platform == 'darwin':
_sockaddrCommon = [
("sin_len", c_uint8),
("sin_family", c_uint8),
]
else:
_sockaddrCommon = [
("sin_family", c_ushort),
]
class in_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 4),
]
class in6_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 16),
]
class sockaddr(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
]
class sockaddr_in(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_addr", in_addr),
]
class sockaddr_in6(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_flowinfo", c_uint32),
("sin_addr", in6_addr),
]
class ifaddrs(Structure):
pass
ifaddrs_p = POINTER(ifaddrs)
ifaddrs._fields_ = [
('ifa_next', ifaddrs_p),
('ifa_name', c_char_p),
('ifa_flags', c_uint32),
('ifa_addr', POINTER(sockaddr)),
('ifa_netmask', POINTER(sockaddr)),
('ifa_dstaddr', POINTER(sockaddr)),
('ifa_data', c_void_p)]
getifaddrs = libc.getifaddrs
getifaddrs.argtypes = [POINTER(ifaddrs_p)]
getifaddrs.restype = c_int
freeifaddrs = libc.freeifaddrs
freeifaddrs.argtypes = [ifaddrs_p]
def _maybeCleanupScopeIndex(family, packed):
"""
On FreeBSD, kill the embedded interface indices in link-local scoped
addresses.
@param family: The address family of the packed address - one of the
I{socket.AF_*} constants.
@param packed: The packed representation of the address (ie, the bytes of a
I{in_addr} field).
@type packed: L{bytes}
@return: The packed address with any FreeBSD-specific extra bits cleared.
@rtype: L{bytes}
@see: U{https://twistedmatrix.com/trac/ticket/6843}
@see: U{http://www.freebsd.org/doc/en/books/developers-handbook/ipv6.html#ipv6-scope-index}
@note: Indications are that the need for this will be gone in FreeBSD >=10.
"""
if sys.platform.startswith('freebsd') and packed[:2] == b"\xfe\x80":
return packed[:2] + b"\x00\x00" + packed[4:]
return packed
def _interfaces():
"""
Call C{getifaddrs(3)} and return a list of tuples of interface name, address
family, and human-readable address representing its results.
"""
ifaddrs = ifaddrs_p()
if getifaddrs(pointer(ifaddrs)) < 0:
raise OSError()
results = []
try:
while ifaddrs:
if ifaddrs[0].ifa_addr:
family = ifaddrs[0].ifa_addr[0].sin_family
if family == AF_INET:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in))
elif family == AF_INET6:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in6))
else:
addr = None
if addr:
packed = b''.join(map(chr, addr[0].sin_addr.in_addr[:]))
packed = _maybeCleanupScopeIndex(family, packed)
results.append((
ifaddrs[0].ifa_name,
family,
inet_ntop(family, packed)))
ifaddrs = ifaddrs[8].ifa_next
finally8
freeifaddrs(ifaddrs)
return resuýts
d%n posixWetLinkLocalIPv6Addressec():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on t`e sywtem, ac reported by I{getifaddps(7)}.
$("""
! 1retList = []
$ for interface, fa}il{, `ddress) in _interfaces(+:
0 interface0= na4iveString(interface)
0address$= jativeString(address)
` if family!== socket.AF_INGT6 and(aderess.s|artswi˘h('fe80:'): $ retList.append('%s%%%s' % (ßddress, int%÷fabe)ę
0 return rgtList
|
[
"keweizhang@Keweis-MacBook-Pro.local"
] |
keweizhang@Keweis-MacBook-Pro.local
|
2d6d525941776079e81b192d90d31b19bbcf6e04
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-mgmt-compute/azure/mgmt/compute/models/diagnostics_profile.py
|
f205742d3fa9977c57f4c9f91a3e571a709552a7
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 961
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiagnosticsProfile(Model):
"""Describes a diagnostics profile.
:param boot_diagnostics: the boot diagnostics.
:type boot_diagnostics: :class:`BootDiagnostics
<azure.mgmt.compute.models.BootDiagnostics>`
"""
_attribute_map = {
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnostics'},
}
def __init__(self, boot_diagnostics=None):
self.boot_diagnostics = boot_diagnostics
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
cd3c4cd0b6b89e0d6393d87753c76e9ef8b14bdb
|
7d14da6757137aec09801784ee197d677adcafbf
|
/tools/extract_messages.py
|
24d7d674ae49468c9cd170b5d6ac56e410095f34
|
[
"Apache-2.0"
] |
permissive
|
Princessgladys/googleresourcefinder
|
6be3e0e341e3e9207fe0d5cb5b269739566c57f1
|
7715276b3c588f7c457de04944559052c8170f7e
|
refs/heads/master
| 2021-01-01T16:50:51.245416
| 2011-01-15T01:35:09
| 2011-01-15T01:35:09
| 32,346,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,049
|
py
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Extracts translations to a .po file using django's makemessages script
and additional custom support for js- and python-formatted strings.
Also supports message descriptions and meanings provided by
specially-formatted comments directly above a message to be translated.
In javascript, comments look like:
// Some other comment, must not separate i18n comments from the code
//i18n: Label for an administrative division of a country
messages.DEPARTMENT = 'Department';
In python:
# Some other comment, must not separate i18n comments from the code
#i18n: Label for an administrative division of a country
dept = _('Department')
And in a Django template:
{% comment %}
#Some other comment, must not separate i18n comments from the code
#i18n: Label for an administrative division of a country
{% endcomment %}
<span>{% trans "Department" %}</span>
Warning: This code technically also supports an i18n_meaning tag to create
msgctxt lines in the .po file, but these are not supported by the current
django version used by appengine (if msgctxt lines appear, not only are they
ignored, but they prevent the correct translation from being returned),
so they are not used.
Instead of running this script directly, use the 'extract_messages' shell
script, which sets up the PYTHONPATH and other necessary environment variables.
NOTE: Although this can be run from any directory, the filenames on the
command line must be specified relative to the app/ directory.
Example:
../tools/extract_messages ../tools/setup.py static/locale.js
"""
import codecs
import os
import re
import sys
DJANGO_END_COMMENT_PATTERN = '{\% endcomment \%}'
DJANGO_STRING_PATTERN = '''['"](.*)['"]\s*$'''
STRING_LITERAL_PATTERN = r'''\s*(["'])((\\.|[^\\])*?)\1'''
DJANGO_BIN = os.environ['APPENGINE_DIR'] + '/lib/django/django/bin'
PATTERNS = {
'js' : {
'start': r'\s*(messages\.[A-Z_1-9]+)\s*=',
'string': STRING_LITERAL_PATTERN,
'end': r';\s*$',
'description': r'^\s*//i18n:\s*(.*)',
'meaning': r'\s*//i18n_meaning:\s*(.*)'
},
'py' : {
'start': r'\s*[a-z]+_message\(', # precedes a message in setup.py
'string': r'en\s*=' + STRING_LITERAL_PATTERN,
'end': r'\),?\s*$',
'description': r'^\s*#i18n:\s*(.*)',
'meaning': r'^\s*#i18n_meaning:\s*(.*)'
},
'html': {
'description': r'^\s*#i18n:\s*(.*)',
'meaning': r'^\s*#i18n_meaning:\s*(.*)'
},
}
class Message:
""" Describes a message, with optional description and meaning"""
def __init__(self, msgid, description='', meaning='', msgstr=''):
self.msgid = msgid
self.description = description
self.meaning = meaning
self.msgstr = msgstr
def __eq__(self, other):
"""Only message and meaning factor into equality and hash."""
if not isinstance(other, type(self)):
return False
return self.msgid == other.msgid and self.meaning == other.meaning
def __hash__(self):
"""Only message and meaning factor into equality and hash."""
return hash(self.msgid) ^ hash(self.meaning)
def __cmp__(self, other):
"""Compare based on msgid."""
if type(other) is not type(self):
return NotImplemented
return cmp(self.msgid, other.msgid)
def django_makemessages():
"""Run django's makemessages routine to extract messages from python and
html files."""
if os.system(os.path.join(DJANGO_BIN, 'make-messages.py') + ' -a'):
raise SystemExit('make-messages.py failed')
def parse_django_po(po_filename):
"""Return the header from the django-generated .po file
and a dict from Message to a list of file:line_num references where that
Message was extracted"""
# Holds the header at the top of the django po file
header = ''
# A sentinel to know when to stop considering lines part of the header
header_done = False
# The return dict of Message to code ref 'file:line_num'
message_to_ref = {}
# The current file:line_num ref, which occurs on a previous line to it's
# corresponding message
current_ref = ''
# The current Message
current_msg = Message(None, None, None, None)
for line in codecs.open(po_filename, encoding='utf-8'):
if line.startswith('#:') or line.startswith('#.'):
header_done = True
if not header_done:
if line.startswith('"POT-Creation-Date'):
# The POT-Creation-Date line changes on every run to include
# the current date and time, creating unnecessary changesets.
# Skipping this line makes extract_messages idempotent.
continue
header += line
continue
line = line.strip()
if not line.strip() and current_msg.msgid:
refs = current_ref.split(' ')
if not current_msg.description and not current_msg.meaning:
(desc, meaning) = find_description_meaning(refs)
current_msg.description = desc
current_msg.meaning = meaning
if not current_msg.description:
current_msg.description = ''
if not current_msg.meaning:
current_msg.meaning = ''
message_to_ref[current_msg] = set(refs)
current_ref = ''
current_msg = Message(None, None, None, None)
elif line.startswith('#:'):
current_ref = line[3:]
elif line.startswith('#.'):
current_msg.description = line[3:]
elif line.startswith('msgstr'):
current_msg.msgstr = parse_po_tagline(line, 'msgstr')
elif current_msg.msgstr is not None:
current_msg.msgstr += parse_po_tagline(line)
elif line.startswith('msgid'):
current_msg.msgid = parse_po_tagline(line, 'msgid')
elif current_msg.msgid is not None:
current_msg.msgid += parse_po_tagline(line)
elif line.startswith('msgctxt'):
current_msg.meaning = parse_po_tagline(line, 'msgctxt')
elif current_msg.meaning is not None:
current_msg.meaning += parse_po_tagline(line)
if current_msg.msgid:
refs = current_ref.split(' ')
if not current_msg.description and not current_msg.meaning:
(desc, meaning) = find_description_meaning(refs)
current_msg.description = desc
current_msg.meaning = meaning
if not current_msg.description:
current_msg.description = ''
if not current_msg.meaning:
current_msg.meaning = ''
message_to_ref[current_msg] = set(refs)
return (header, message_to_ref)
def parse_po_tagline(line, tag=''):
"""Parses a line consisting of the given tag followed by a quoted string."""
match = re.match((tag and (tag + ' ') or '') + DJANGO_STRING_PATTERN, line)
return match and match.group(1) or ''
def find_description_meaning(refs):
"""Given a list of references (in the form "filename:line_num") to where a
message occurs, find the description and meaning in comments preceding any
occurrence of the message and returns a (description, meaning) pair.
(Horribly inefficient, but needed because django makemessages doesn't
parse them out for us.)"""
description = meaning = ''
for ref in refs:
(file, line_num) = ref.split(':')
line_num = int(line_num)
# django makemessages hacks in support for html files by appending .py
# to the end and treating them like py files. Remove that hack here.
file = file.replace('.html.py', '.html')
# Look for description/meaning patterns appropriate for the file type.
patterns = PATTERNS[file.split('.')[-1]]
# Hold the description and meaning, if we find them
description_lines = []
meaning_lines = []
# Start at the line before the message and proceed backwards.
lines = open(file).readlines()
for line in reversed(lines[:line_num - 1]):
match = re.match(patterns['description'], line)
if match:
description_lines.insert(0, match.group(1))
continue
match = re.match(patterns['meaning'], line)
if match:
meaning_lines.insert(0, match.group(1))
continue
# For html files, need to skip over the django end comment marker
# to get to the meaning lines
if re.search(DJANGO_END_COMMENT_PATTERN, line):
continue
# The line was not part of a message description or meaning comment,
# so it must not exist
break
description = description or ' '.join(description_lines)
meaning = meaning or ' '.join(meaning_lines)
return (description, meaning)
def parse_file(input_filename):
"""Parses the given file, extracting messages. Returns a list of tuples
of 'input_filename:line_number' to a tuple of
(message string, description, meaning)."""
# Patterns for the given input file
patterns = PATTERNS[input_filename.split('.')[-1]]
# The return list of pairs of ref 'file:line_num' to message
ref_msg_pairs = []
# Description lines for the current message
current_description = []
# Meaning lines for the current message
current_meaning = []
# The current message being parsed. This is a local var as the msg
# can span multiple lines.
current_message = ''
# The line number to assign to the current message, usually the first line
# of the statement containing the message.
current_message_line_num = -1
# Current line number in the input file
line_num = 0
for line in file(input_filename):
line_num += 1
match = re.match(patterns['description'], line)
if match:
current_description.append(match.group(1))
continue
match = re.match(patterns['meaning'], line)
if match:
current_meaning.append(match.group(1))
continue
if re.match(patterns['start'], line):
# Remember that we've started a message for multi-line messages
current_message_line_num = line_num
if current_message_line_num != -1:
current_message += parse_message(patterns['string'], line)
if re.search(patterns['end'], line):
# End of the current message
ref = input_filename + ':' + str(current_message_line_num)
ref_msg_pairs.append(
(ref, Message(current_message,
' '.join(current_description),
' '.join(current_meaning))))
current_message_line_num = -1
current_message = ''
current_description = []
current_meaning = []
return ref_msg_pairs
def parse_message(pattern, line):
match = re.search(pattern, line)
msg_part = ''
if match:
# Unescape the type of quote (single or double) that surrounded
# the message, then escape double-quotes, which we use to
# surround the message in the .po file
quote = match.group(1)
msg_part = match.group(2).replace('\\' + quote, quote).replace(
'"', '\\"')
return msg_part
def merge(msg_to_ref, ref_msg_pairs):
""" Merge ref_msg_pairs into msg_to_ref """
for (ref, msg) in ref_msg_pairs:
msg_to_ref.setdefault(msg, set()).add(ref)
def output_po_file(output_filename, header, msg_to_ref):
"""Write a po file to the file specified by output_filename, using the
given header text and a msg_to_ref dictionary that maps each message to
a list of "filename:line_num" references where the message appears."""
output = codecs.open(output_filename, 'w', 'utf-8')
output.write(header)
for message, refs in sorted(msg_to_ref.items()):
msgid = message.msgid
description = message.description
meaning = message.meaning
if not description and not meaning:
description = 'TODO: Add missing message description or meaning'
print >>output, '#. %s' % description
print >>output, '#: %s' % ' '.join(refs)
if has_sh_placeholders(msgid):
print >>output, '#, sh-format'
elif has_python_placeholders(msgid):
print >>output, '#, python-format'
if meaning:
print >>output, 'msgctxt "%s"' % meaning
print >>output, 'msgid "%s"' % msgid
print >>output, 'msgstr "%s"\n' % message.msgstr
output.close()
def has_sh_placeholders(message):
"""Returns true if the message has placeholders."""
return re.search(r'\$\{(\w+)\}', message) is not None
def has_python_placeholders(message):
"""Returns true if the message has placeholders."""
return re.search(r'%\(\w+\)s', message) is not None
if __name__ == '__main__':
os.chdir(os.environ['APP_DIR'])
po_filenames = [
os.path.join('locale', locale, 'LC_MESSAGES', 'django.po')
for locale in os.listdir('locale')]
# Parse input files
print 'Parsing input files'
en_ref_msg_pairs = []
for input_filename in sys.argv[1:]:
en_ref_msg_pairs.extend(parse_file(input_filename))
# For each language, grab translations for existing messages
# (descriptions and meanings get blown away by makemessages)
ref_msg_pairs = {}
for po_filename in po_filenames:
(header, message_to_ref) = parse_django_po(po_filename)
msgs = message_to_ref.keys()
ref_msg_pairs[po_filename] = []
for (ref, msg) in en_ref_msg_pairs:
msgstr = (msg in msgs) and msgs[msgs.index(msg)].msgstr or ''
ref_msg_pairs[po_filename].append(
(ref, Message(msg.msgid, msg.description, msg.meaning, msgstr)))
# Run Django's makemessages
print 'Running django makemessages'
django_makemessages()
# For each language, overwrite the django makemessages output with ours
for po_filename in po_filenames:
print 'Writing %s' % po_filename
(header, message_to_ref) = parse_django_po(po_filename)
merge(message_to_ref, ref_msg_pairs[po_filename])
output_po_file(po_filename, header, message_to_ref)
|
[
"shakusa at google.com@localhost"
] |
shakusa at google.com@localhost
|
eafd96dc5738d36286b9767c9ef1559f2c8d8d46
|
b1222a858bac5a667afb4f6c1ba48731b0210a0d
|
/bin/program.py
|
faf72c499980998f22e69039bc054cb0baa5551f
|
[] |
no_license
|
xhsky/autodep
|
5ffbe490d8021bef4b7c79fbddc74910a307f18b
|
5bccd8ddef5ea3b970e8c5125de4789b8c8aaddf
|
refs/heads/master
| 2023-03-17T07:54:22.531503
| 2023-01-06T07:00:39
| 2023-01-06T07:00:39
| 233,543,213
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,308
|
py
|
#!/opt/python3/bin/python3
# *-* coding:utf8 *-*
# sky
import sys, json, os, requests, yaml, tarfile, shutil
from libs import common
from libs.env import log_remote_level, program_sh_name, backup_dir, program_license_file, node_license_path, \
normal_code, error_code, activated_code, stopped_code, abnormal_code
"""
def main():
softname, action, conf_json=sys.argv[1:]
conf_dict=json.loads(conf_json)
located=conf_dict.get("located")
program_info_dict=conf_dict[f"{softname}_info"]
port_list=[program_info_dict["port"]]
program_dir=program_info_dict['program_dir']
log=common.Logger({"remote": log_remote_level}, loggger_name="jar")
# 安装
flag=0
if action=="install":
sys.exit(flag)
elif action=="run":
sys.exit(flag)
elif action=="start":
jvm_mem=program_info_dict["jvm_mem"]
for jar_name in os.listdir(program_dir):
if jar_name.endswith(".jar"):
jar=jar_name
break
config=["application-prod.yml", "application-prod.properties"]
for config_file in os.listdir(program_dir):
if config_file in config:
config_name=config_file
break
start_command=f"cd {program_dir} ; nohup java -Xms{jvm_mem} -Xmx{jvm_mem} -jar {jar} --server.port={port_list[0]} --spring.profiles.active=prod &> jar.log &"
log.logger.debug(f"{start_command=}")
result, msg=common.exec_command(start_command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, seconds=600):
flag=2
else:
log.logger.error(msg)
flag=1
sys.exit(flag)
elif action=="stop":
for port in port_list:
pid=common.find_pid(port)
log.logger.debug(f"{port=}, {pid=}")
if pid != 0:
stop_command=f"kill -9 {pid}"
log.logger.debug(f"{stop_command=}")
result, msg=common.exec_command(stop_command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, exist_or_not=False):
flag=2
else:
log.logger.error(msg)
flag=1
else:
log.logger.warning(f"{softname}未运行")
flag=1
sys.exit(flag)
elif action=="heapdump":
command=f"jmap -dump:format=b, file=heapdump.dump {pid}"
log.logger.debug(f"{start_command=}")
result, msg=common.exec_command(start_command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, seconds=600):
flag=2
else:
log.logger.error(msg)
flag=1
sys.exit(flag)
"""
def dict_to_yaml_file(config_dict, config_file):
"""生成将dict转为yaml文件
"""
try:
with open(config_file, "w", encoding="utf8") as f:
yaml.safe_dump(config_dict, f)
return True, config_file
except Exception as e:
return False, str(e)
def generate_local_config():
"""在本地生成一份配置文件
"""
db_type=program_info_dict["db_type"].lower()
db_host=program_info_dict[f"{db_type}_config"]["db_host"]
db_port=program_info_dict[f"{db_type}_config"]["db_port"]
db_name=program_info_dict[f"{db_type}_config"]["db_name"]
db_user=program_info_dict[f"{db_type}_config"]["db_user"]
db_password=program_info_dict[f"{db_type}_config"]["db_password"]
if db_type=="mysql":
druids=[
{
"url": f"jdbc:mysql://{db_host}:{db_port}/{db_name}?zeroDateTimeBehavior=CONVERT_TO_NULL&serverTimezone=Asia/Shanghai&useUnicode=true&characterEncoding=utf-8&useOldAliasMetadataBehavior=true&useSSL=false",
"username": db_user,
"password": db_password
}
]
config_dict={
"server":{
"port": program_info_dict["port"],
"servlet": {
"session": {
"timeout": "720m"
}
}
},
"spring": {
"datasource": {
"monitor": {
"loginUsername": "_admin2",
"loginPassword": "dreamsoft"
},
"druids": druids
},
"redis": {
"host": program_info_dict["redis_config"]["redis_host"],
"port": program_info_dict["redis_config"]["redis_port"],
"password": program_info_dict["redis_config"]["redis_password"],
"database": program_info_dict["redis_config"]["redis_db"],
"jedis": {
"pool": {
"max-idle": 200,
"min-idle": 10
}
}
},
"servlet": {
"multipart": {
"max-file-size": "1024MB",
"max-request-size": "1024MB"
}
}
}
}
if not (softname.endswith("gateway") and softname.startswith("program")):
config_dict["dsfa"]={
"session": {
"rule": {
"valiRepeat": False
}
}
}
config_dict["dsf"]={
"file": {
"upload-virtual-path": program_info_dict["upload_path"]
}
}
config_dict["dubbo"]={
"scan": {
"base-packages": "com.dsfa"
},
"protocol": {
"name": "dubbo",
"port": -1
},
"registry": {
"address": "nacos://${spring.cloud.nacos.server-addr}/"
},
"cloud": {
"subscribed-services": "${spring.application.name}"
}
}
if program_info_dict.get("routes"):
config_dict["spring"]["cloud"]={
"gateway": {
"discovery": {
"locator": {
"enabled": True,
"filters": ["StripPrefix=1"]
}
},
"routes": program_info_dict.get("routes")
}
}
result, msg=dict_to_yaml_file(config_dict, config_file)
if result:
return True
log.logger.debug(f"生成可读配置文件: {msg}")
else:
log.logger.error(f"无法生成可读配置文件: {msg}")
return False
def generate_sh(jar_file):
"""生成控制脚本
"""
nacos_addr=f"{nacos_host}:{nacos_port}"
jvm_mem=program_info_dict["jvm_mem"]
#jar_file=f"{program_dir}/{pkg_file.split('/')[-1]}"
jar_file=f"{program_dir}/{jar_file}"
log_file=f"{program_dir}/{service_name}.log"
program_sh_text=f"""\
#!/bin/bash
# sky
action=$1
jar_file={jar_file}
jar_name=`echo $jar_file | rev | cut -d "/" -f 1 | rev`
nacos_addr={nacos_addr}
nacos_namespace={namespace_name}
nacos_group={group_name}
nacos_config_file_extension={config_file_type}
nacos_application_name={service_name} # 须同jar_name配套
nacos_profiles_active={config_active} # 须同jar_name配套
if [ -z "$1" ]; then
echo "Usage: $0 start|stop|publish"
exit {error_code}
elif [ "$action" == "start" ]; then
jvm_mem={jvm_mem}
accept_count=1000
threads=500
max_connections=8192
log_file={log_file}
nohup java -jar -Xms${{jvm_mem}} -Xmx${{jvm_mem}} ${{jar_file}} \\
--spring.cloud.nacos.server-addr=$nacos_addr \\
--spring.cloud.nacos.config.namespace=$nacos_namespace \\
--spring.cloud.nacos.config.group=$nacos_group \\
--spring.cloud.nacos.config.file-extension=$nacos_config_file_extension \\
--spring.cloud.nacos.config.enabled=True \\
--spring.cloud.nacos.discovery.enabled=True \\
--spring.cloud.nacos.discovery.namespace=$nacos_namespace \\
--spring.cloud.nacos.discovery.group=$nacos_group \\
--spring.application.name=$nacos_application_name \\
--spring.profiles.active=$nacos_profiles_active \\
--server.tomcat.accept-count=$accept_count \\
--server.tomcat.min-spare-threads=$threads \\
--server.tomcat.max-threads=$threads \\
--server.tomcat.max-connections=$max_connections \\
&> $log_file &
echo "$jar_name启动中, 详细请查看日志文件($log_file)."
exit {normal_code}
elif [ "$action" == "stop" ]; then
N=0
while : ;do
N=$((N+1))
Pid=`ps ax | grep java | grep "$jar_name" | grep -v grep | awk '{{print $1}}'`
if [ -z "$Pid" ]; then
if [ $N == 1 ]; then
echo "${{jar_name}}未运行. "
exit {stopped_code}
else
echo "${{jar_name}}已关闭."
exit {normal_code}
fi
else
if [ $N == 1 ]; then
echo "Pid: $Pid"
echo "${{jar_name}}关闭中..."
kill $Pid
fi
if [ $N == 30 ]; then
kill -9 $Pid
fi
fi
sleep 1
done
elif [ "$action" == "publish" ]; then
content=`cat {program_dir}/app.${{nacos_config_file_extension}}`
curl -X POST "http://${{nacos_addr}}{configs_path}" -d tenant=${{nacos_namespace}} -d dataId=${{nacos_application_name}}-${{nacos_profiles_active}}.${{nacos_config_file_extension}} -d group=${{nacos_group}} --data-urlencode content="${{content}}" -d type=${{nacos_config_file_extension}}
else
echo "Usage: $0 start|stop|publish"
fi
"""
config_dict={
"program_sh": {
"config_file": program_sh_file,
"config_context": program_sh_text,
"mode": "w"
}
}
log.logger.debug(f"写入配置文件: {program_sh_file}")
result, msg=common.config(config_dict)
if not result:
log.logger.error(msg)
return False
return True
def install():
"""安装
"""
pkg_file=conf_dict["pkg_file"]
if pkg_file.endswith(".tar.gz"):
value, msg=common.install(pkg_file, "tar.gz", None, None, program_dir)
if not value:
log.logger.error(msg)
return error_code
for file_ in os.listdir(program_dir):
if file_.endswith(".jar"):
jar_file=file_
break
else:
log.logger.error(f"jar文件不存在")
return error_code
if os.path.exists(config_file):
log.logger.debug(f"已存在可读配置文件: {config_file}")
else:
log.logger.error(f"{config_file}不存在")
return error_code
try:
if os.path.exists(jar_license_path):
log.logger.info("安装license")
shutil.move(jar_license_path, node_license_path)
else:
log.logger.warning("不存在license")
except Exception as e:
log.logger.error(f"license移动失败: {str(e)}")
try:
if upload_dir is not None:
if os.path.exists(upload_dir):
log.logger.warning(f"{upload_dir}目录已存在")
else:
log.logger.info(f"建立上传数据目录{upload_dir}")
os.makedirs(upload_dir, exist_ok=1)
except Exception as e:
log.logger.error(f"上传目录建立失败: {str(e)}")
if os.path.exists(program_sh_file):
log.logger.debug(f"已存在控制脚本: {program_sh_file}")
else:
result=generate_sh(jar_file)
if not result:
return error_code
#elif pkg_file.endswith(".jar"):
# value, msg=common.install(pkg_file, "jar", None, None, program_dir)
# if not value:
# log.logger.error(msg)
# return error_code
# result=generate_local_config()
# if not result:
# return error_code
# result=generate_sh(pkg_file)
# if not result:
# return error_code
else:
log.logger.error(f"未知文件后缀: {pkg_file}")
return error_code
return normal_code
def run():
"""运行
"""
# 创建namespace
namespace_path="/nacos/v1/console/namespaces"
get_namespace_url=f"{nacos_addr_url}{namespace_path}"
try:
result=requests.get(get_namespace_url)
if result.status_code==200:
for namespace_dict in result.json()["data"]:
if namespace_dict["namespace"] == namespace_name:
break
else:
namespace_data={
"customNamespaceId": namespace_name,
"namespaceName": namespace_name
}
create_namespace_url=f"{nacos_addr_url}{namespace_path}"
result=requests.post(create_namespace_url, data=namespace_data)
if result.status_code == 200:
log.logger.info(f"创建namespace: {namespace_name}")
else:
log.logger.error(f"创建namespace失败: {result.status_code}")
return error_code
else:
log.logger.error(f"无法查询namespace: {result.status_code}")
return error_code
except Exception as e:
log.logger.error(f"无法连接nacos: {str(e)}")
return error_code
# 发布配置
with open(config_file, "r", encoding="utf8") as f:
config_yaml_str=f.read()
config_data={
"tenant": namespace_name,
"dataId": data_id,
"group": group_name,
"content": config_yaml_str,
"type": config_file_type
}
create_configs_url=f"{nacos_addr_url}{configs_path}"
try:
result=requests.post(create_configs_url, data=config_data)
if result.status_code==200:
log.logger.info(f"配置发布成功: {data_id}")
else:
log.logger.error(f"配置发布失败: {result.status_code}")
return error_code
except Exception as e:
log.logger.error(f"无法连接nacos: {str(e)}")
return error_code
# 启动
return start()
def start():
"""启动
"""
return_value=normal_code
start_command=f"bash -lc 'cd {program_dir} ; bash {program_sh_file} start'"
log.logger.debug(f"{start_command=}")
result, msg=common.exec_command(start_command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, seconds=1200):
return_value=error_code
else:
log.logger.error(msg)
return_value=error_code
return return_value
def stop():
"""停止
"""
return_value=normal_code
stop_command=f"bash -lc 'cd {program_dir} ; bash {program_sh_file} stop'"
log.logger.debug(f"{stop_command=}")
result, msg=common.exec_command(stop_command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, exist_or_not=False):
return_value=error_code
else:
log.logger.error(msg)
return_value=error_code
return return_value
def monitor():
"""监控
return:
启动, 未启动, 启动但不正常
"""
return common.soft_monitor("localhost", port_list)
def heapdump():
"""jvm
"""
return_value=normal_code
command=f"jmap -dump:format=b, file=heapdump.dump {pid}"
log.logger.debug(f"{command=}")
result, msg=common.exec_command(command)
if result:
log.logger.debug(f"检测端口: {port_list=}")
if not common.port_exist(port_list, seconds=600):
return_value=error_code
else:
log.logger.error(msg)
return_value=error_code
return return_value
def backup():
"""program备份
"""
# 获取最新配置并写入文件
log.logger.info("备份配置文件...")
config_data={
"tenant": namespace_name,
"dataId": data_id,
"group": group_name
}
get_configs_url=f"{nacos_addr_url}{configs_path}"
try:
result=requests.get(get_configs_url, params=config_data)
if result.status_code==200:
log.logger.debug(f"配置获取成功: {data_id}")
config_dict={
"config_sh": {
"config_file": config_file,
"config_context": result.text,
"mode": "w"
}
}
log.logger.debug(f"写入配置文件: {config_file}")
result, msg=common.config(config_dict)
if not result:
log.logger.error(msg)
return error_code
else:
log.logger.error(f"配置获取失败: {result.status_code}: {result.text}")
return error_code
except Exception as e:
log.logger.error(f"无法连接nacos: {str(e)}")
return error_code
log.logger.info("备份代码...")
backup_version=conf_dict["backup_version"]
backup_file_list=[]
for backup_file in os.listdir(program_dir):
if backup_file.endswith(".log") or backup_file.endswith(".bak"):
pass
else:
backup_file_list.append(os.path.basename(backup_file))
result, msg=common.tar_backup(backup_version, backup_dir, softname, program_dir, [])
if result:
return normal_code
else:
log.logger.error(msg)
return error_code
if __name__ == "__main__":
softname, action, conf_json=sys.argv[1:]
conf_dict=json.loads(conf_json)
#located=conf_dict.get("located")
log=common.Logger({"remote": log_remote_level}, loggger_name="jar")
program_info_dict=conf_dict[f"{softname}_info"]
port_list=[program_info_dict["port"]]
program_dir=program_info_dict['program_dir']
upload_dir=program_info_dict.get("upload_dir")
jar_license_path=f"{program_dir}/{program_license_file}"
nacos_host=program_info_dict["nacos_config"]["nacos_host"]
nacos_port=program_info_dict["nacos_config"]["nacos_port"]
namespace_name=program_info_dict["nacos_config"]["nacos_namespace"]
group_name=program_info_dict["nacos_config"]["nacos_group"]
service_name=program_info_dict["nacos_config"]["service_name"]
config_active=program_info_dict["nacos_config"]["active"]
config_file_type=program_info_dict["nacos_config"]["file-extension"]
if config_active is None or len(config_active.strip())==0:
data_id=f"{service_name}.{config_file_type}"
else:
data_id=f"{service_name}-{config_active}.{config_file_type}"
#config_file=f"{program_dir}/{data_id}"
config_file=f"{program_dir}/app.{config_file_type}"
nacos_addr_url=f"http://{nacos_host}:{nacos_port}"
configs_path="/nacos/v1/cs/configs"
program_sh_file=f"{program_dir}/{program_sh_name}"
if action=="install":
sys.exit(install())
elif action=="run":
sys.exit(run())
elif action=="start":
status_value=monitor()
if status_value==activated_code:
sys.exit(activated_code)
elif status_value==stopped_code:
sys.exit(start())
elif status_value==abnormal_code:
if stop()==normal_code:
sys.exit(start())
else:
sys.exit(error_code)
elif action=="stop":
status_value=monitor()
if status_value==activated_code:
sys.exit(stop())
elif status_value==stopped_code:
sys.exit(stopped_code)
elif status_value==abnormal_code:
if stop()==normal_code:
sys.exit(normal_code)
else:
sys.exit(error_code)
elif action=="monitor":
sys.exit(monitor())
elif action=="backup":
sys.exit(backup())
else:
sys.exit(error_code)
|
[
"x_hsky@163.com"
] |
x_hsky@163.com
|
9ea7ea63ba0540a2f1d76094330d865d36204722
|
8a102033a266d39128e4b64aa0780cf67055e196
|
/13909.py
|
4dc6a2bf1bca9a9ec819ec4c1e59c07ced214d34
|
[] |
no_license
|
yuseungwoo/baekjoon
|
4dec0798b8689b9378121b9d178713c9cf14a53f
|
099031e2c4401e27edcdc05bd6c9e6a558b09bb9
|
refs/heads/master
| 2020-09-03T15:25:40.764723
| 2018-10-08T02:35:27
| 2018-10-08T02:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
# coding: utf-8
from math import ceil, sqrt
n = int(input())
for i in range(ceil(sqrt(n))+1):
if n < i **2:
print(i-1)
break
elif n == i**2:
print(i)
break
|
[
"blacksangi14@naver.com"
] |
blacksangi14@naver.com
|
2e2d116fe63a89f4b24ca97503f14dd0e02c6e65
|
3fc00c49c6b5a5d3edb4f5a97a86ecc8f59a3035
|
/projects2/test/test_reports.py
|
ef6eb55dc18e371258b1d8efe835a55c3ab3ed55
|
[] |
no_license
|
yc-hu/dm_apps
|
9e640ef08da8ecefcd7008ee2d4f8f268ec9062e
|
483f855b19876fd60c0017a270df74e076aa0d8b
|
refs/heads/master
| 2023-04-07T13:13:55.999058
| 2021-04-12T10:19:21
| 2021-04-12T10:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
import os
from django.test import TestCase, tag
from django.conf import settings
from .. import reports
from ..test import FactoryFloor as FactoryFloor
from ..test.common_tests import CommonProjectTest
from shared_models import models as shared_models
from shared_models.test.SharedModelsFactoryFloor import RegionFactory, BranchFactory, DivisionFactory, SectionFactory
|
[
"davjfish@gmail.com"
] |
davjfish@gmail.com
|
67f203e8645cb88f51770274680e3692ac87e49a
|
80ade32ea5d0b50a8685b032dce7bb6e1c431283
|
/tests/components/onewire/const.py
|
777cb1f3d2531cb7c43fc2f50f1f3c76434f8d5a
|
[
"Apache-2.0"
] |
permissive
|
mtandersson/home-assistant
|
c47df9021058db4c75e7689ac8687dc5eee87b39
|
3693b9bd040c98ea3476c1042cca8c653183c1f5
|
refs/heads/dev
| 2023-03-08T14:11:42.781466
| 2021-11-06T01:54:51
| 2021-11-06T01:54:51
| 160,022,263
| 0
| 0
|
Apache-2.0
| 2023-02-22T06:18:29
| 2018-12-02T07:02:41
|
Python
|
UTF-8
|
Python
| false
| false
| 40,114
|
py
|
"""Constants for 1-Wire integration."""
from pi1wire import InvalidCRCException, UnsupportResponseException
from pyownet.protocol import Error as ProtocolError
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.onewire.const import (
DOMAIN,
MANUFACTURER_EDS,
MANUFACTURER_HOBBYBOARDS,
MANUFACTURER_MAXIM,
PRESSURE_CBAR,
)
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_STATE,
ATTR_UNIT_OF_MEASUREMENT,
ATTR_VIA_DEVICE,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
ATTR_DEFAULT_DISABLED = "default_disabled"
ATTR_DEVICE_FILE = "device_file"
ATTR_DEVICE_INFO = "device_info"
ATTR_INJECT_READS = "inject_reads"
ATTR_UNIQUE_ID = "unique_id"
FIXED_ATTRIBUTES = (
ATTR_DEVICE_CLASS,
ATTR_STATE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
)
MOCK_OWPROXY_DEVICES = {
"00.111111111111": {
ATTR_INJECT_READS: [
b"", # read device type
],
SENSOR_DOMAIN: [],
},
"05.111111111111": {
ATTR_INJECT_READS: [
b"DS2405", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "05.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2405",
ATTR_NAME: "05.111111111111",
},
SWITCH_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.05_111111111111_pio",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/05.111111111111/PIO",
},
],
},
"10.111111111111": {
ATTR_INJECT_READS: [
b"DS18S20", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "10.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS18S20",
ATTR_NAME: "10.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.my_ds18b20_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/10.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"12.111111111111": {
ATTR_INJECT_READS: [
b"DS2406", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "12.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2406",
ATTR_NAME: "12.111111111111",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.12_111111111111_sensed_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/sensed.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.12_111111111111_sensed_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/sensed.B",
},
],
SENSOR_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.12_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/12.111111111111/TAI8570/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.12_111111111111_pressure",
ATTR_INJECT_READS: b" 1025.123",
ATTR_STATE: "1025.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/12.111111111111/TAI8570/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
],
SWITCH_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_pio_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/PIO.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_pio_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/PIO.B",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_latch_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/latch.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_latch_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/latch.B",
},
],
},
"1D.111111111111": {
ATTR_INJECT_READS: [
b"DS2423", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "1D.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2423",
ATTR_NAME: "1D.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_a",
ATTR_INJECT_READS: b" 251123",
ATTR_STATE: "251123",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.A",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
{
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_b",
ATTR_INJECT_READS: b" 248125",
ATTR_STATE: "248125",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.B",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
],
},
"1F.111111111111": {
ATTR_INJECT_READS: [
b"DS2409", # read device type
],
ATTR_DEVICE_INFO: [
{
ATTR_IDENTIFIERS: {(DOMAIN, "1F.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2409",
ATTR_NAME: "1F.111111111111",
},
{
ATTR_IDENTIFIERS: {(DOMAIN, "1D.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2423",
ATTR_NAME: "1D.111111111111",
ATTR_VIA_DEVICE: (DOMAIN, "1F.111111111111"),
},
],
"branches": {
"aux": {},
"main": {
"1D.111111111111": {
ATTR_INJECT_READS: [
b"DS2423", # read device type
],
SENSOR_DOMAIN: [
{
ATTR_DEVICE_FILE: "/1F.111111111111/main/1D.111111111111/counter.A",
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_a",
ATTR_INJECT_READS: b" 251123",
ATTR_STATE: "251123",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.A",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
{
ATTR_DEVICE_FILE: "/1F.111111111111/main/1D.111111111111/counter.B",
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_b",
ATTR_INJECT_READS: b" 248125",
ATTR_STATE: "248125",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.B",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
],
},
},
},
},
"22.111111111111": {
ATTR_INJECT_READS: [
b"DS1822", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "22.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS1822",
ATTR_NAME: "22.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.22_111111111111_temperature",
ATTR_INJECT_READS: ProtocolError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/22.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"26.111111111111": {
ATTR_INJECT_READS: [
b"DS2438", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "26.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2438",
ATTR_NAME: "26.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.26_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity",
ATTR_INJECT_READS: b" 72.7563",
ATTR_STATE: "72.8",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih3600",
ATTR_INJECT_READS: b" 73.7563",
ATTR_STATE: "73.8",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH3600/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih4000",
ATTR_INJECT_READS: b" 74.7563",
ATTR_STATE: "74.8",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH4000/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih5030",
ATTR_INJECT_READS: b" 75.7563",
ATTR_STATE: "75.8",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH5030/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_htm1735",
ATTR_INJECT_READS: ProtocolError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HTM1735/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.26_111111111111_pressure",
ATTR_INJECT_READS: b" 969.265",
ATTR_STATE: "969.3",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/B1-R1-A/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE,
ATTR_ENTITY_ID: "sensor.26_111111111111_illuminance",
ATTR_INJECT_READS: b" 65.8839",
ATTR_STATE: "65.9",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/S3-R1-A/illuminance",
ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_VOLTAGE,
ATTR_ENTITY_ID: "sensor.26_111111111111_voltage_vad",
ATTR_INJECT_READS: b" 2.97",
ATTR_STATE: "3.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/VAD",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_VOLTAGE,
ATTR_ENTITY_ID: "sensor.26_111111111111_voltage_vdd",
ATTR_INJECT_READS: b" 4.74",
ATTR_STATE: "4.7",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/VDD",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_CURRENT,
ATTR_ENTITY_ID: "sensor.26_111111111111_current",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: "1.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/IAD",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE,
},
],
},
"28.111111111111": {
ATTR_INJECT_READS: [
b"DS18B20", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "28.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS18B20",
ATTR_NAME: "28.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.28_111111111111_temperature",
ATTR_INJECT_READS: b" 26.984",
ATTR_STATE: "27.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/28.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"29.111111111111": {
ATTR_INJECT_READS: [
b"DS2408", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "29.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2408",
ATTR_NAME: "29.111111111111",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_2",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_4",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_6",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.7",
},
],
SWITCH_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_2",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_4",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_6",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.7",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_2",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_4",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_6",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.7",
},
],
},
"3A.111111111111": {
ATTR_INJECT_READS: [
b"DS2413", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3A.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2413",
ATTR_NAME: "3A.111111111111",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.3a_111111111111_sensed_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/3A.111111111111/sensed.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.3a_111111111111_sensed_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/3A.111111111111/sensed.B",
},
],
SWITCH_DOMAIN: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.3a_111111111111_pio_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/3A.111111111111/PIO.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.3a_111111111111_pio_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/3A.111111111111/PIO.B",
},
],
},
"3B.111111111111": {
ATTR_INJECT_READS: [
b"DS1825", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3B.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS1825",
ATTR_NAME: "3B.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.3b_111111111111_temperature",
ATTR_INJECT_READS: b" 28.243",
ATTR_STATE: "28.2",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/3B.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42.111111111111": {
ATTR_INJECT_READS: [
b"DS28EA00", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS28EA00",
ATTR_NAME: "42.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111111_temperature",
ATTR_INJECT_READS: b" 29.123",
ATTR_STATE: "29.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/42.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"EF.111111111111": {
ATTR_INJECT_READS: [
b"HobbyBoards_EF", # read type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "EF.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_HOBBYBOARDS,
ATTR_MODEL: "HobbyBoards_EF",
ATTR_NAME: "EF.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111111_humidity",
ATTR_INJECT_READS: b" 67.745",
ATTR_STATE: "67.7",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/humidity_corrected",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111111_humidity_raw",
ATTR_INJECT_READS: b" 65.541",
ATTR_STATE: "65.5",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/humidity_raw",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.ef_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"EF.111111111112": {
ATTR_INJECT_READS: [
b"HB_MOISTURE_METER", # read type
b" 1", # read is_leaf_0
b" 1", # read is_leaf_1
b" 0", # read is_leaf_2
b" 0", # read is_leaf_3
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "EF.111111111112")},
ATTR_MANUFACTURER: MANUFACTURER_HOBBYBOARDS,
ATTR_MODEL: "HB_MOISTURE_METER",
ATTR_NAME: "EF.111111111112",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111112_wetness_0",
ATTR_INJECT_READS: b" 41.745",
ATTR_STATE: "41.7",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.0",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111112_wetness_1",
ATTR_INJECT_READS: b" 42.541",
ATTR_STATE: "42.5",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.1",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.ef_111111111112_moisture_2",
ATTR_INJECT_READS: b" 43.123",
ATTR_STATE: "43.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.2",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_CBAR,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.ef_111111111112_moisture_3",
ATTR_INJECT_READS: b" 44.123",
ATTR_STATE: "44.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.3",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_CBAR,
},
],
},
"7E.111111111111": {
ATTR_INJECT_READS: [
b"EDS", # read type
b"EDS0068", # read device_type - note EDS specific
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "7E.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_EDS,
ATTR_MODEL: "EDS0068",
ATTR_NAME: "7E.111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_temperature",
ATTR_INJECT_READS: b" 13.9375",
ATTR_STATE: "13.9",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_pressure",
ATTR_INJECT_READS: b" 1012.21",
ATTR_STATE: "1012.2",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_illuminance",
ATTR_INJECT_READS: b" 65.8839",
ATTR_STATE: "65.9",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/light",
ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ENTITY_ID: "sensor.7e_111111111111_humidity",
ATTR_INJECT_READS: b" 41.375",
ATTR_STATE: "41.4",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
],
},
"7E.222222222222": {
ATTR_INJECT_READS: [
b"EDS", # read type
b"EDS0066", # read device_type - note EDS specific
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "7E.222222222222")},
ATTR_MANUFACTURER: MANUFACTURER_EDS,
ATTR_MODEL: "EDS0066",
ATTR_NAME: "7E.222222222222",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.7e_222222222222_temperature",
ATTR_INJECT_READS: b" 13.9375",
ATTR_STATE: "13.9",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.222222222222/EDS0066/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ATTR_ENTITY_ID: "sensor.7e_222222222222_pressure",
ATTR_INJECT_READS: b" 1012.21",
ATTR_STATE: "1012.2",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.222222222222/EDS0066/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
],
},
}
MOCK_SYSBUS_DEVICES = {
"00-111111111111": {
SENSOR_DOMAIN: [],
},
"10-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "10-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "10",
ATTR_NAME: "10-111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.my_ds18b20_temperature",
ATTR_INJECT_READS: 25.123,
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/10-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"12-111111111111": {
SENSOR_DOMAIN: [],
},
"1D-111111111111": {
SENSOR_DOMAIN: [],
},
"22-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "22-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "22",
ATTR_NAME: "22-111111111111",
},
"sensor": [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.22_111111111111_temperature",
ATTR_INJECT_READS: FileNotFoundError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/22-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"26-111111111111": {
SENSOR_DOMAIN: [],
},
"28-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "28-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "28",
ATTR_NAME: "28-111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.28_111111111111_temperature",
ATTR_INJECT_READS: InvalidCRCException,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/28-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"29-111111111111": {
SENSOR_DOMAIN: [],
},
"3A-111111111111": {
SENSOR_DOMAIN: [],
},
"3B-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3B-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "3B",
ATTR_NAME: "3B-111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.3b_111111111111_temperature",
ATTR_INJECT_READS: 29.993,
ATTR_STATE: "30.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/3B-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111111",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111111_temperature",
ATTR_INJECT_READS: UnsupportResponseException,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111112": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111112")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111112",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111112_temperature",
ATTR_INJECT_READS: [UnsupportResponseException] * 9 + [27.993],
ATTR_STATE: "28.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111112/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111113": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111113")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111113",
},
SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111113_temperature",
ATTR_INJECT_READS: [UnsupportResponseException] * 10 + [27.993],
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111113/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"EF-111111111111": {
SENSOR_DOMAIN: [],
},
"EF-111111111112": {
SENSOR_DOMAIN: [],
},
}
|
[
"noreply@github.com"
] |
mtandersson.noreply@github.com
|
0ba8009534b8eefae9322169ff3b650414e29b1f
|
2652310cd1b78c3216eae399d1633e86e0c0a2e3
|
/nca_dj/wsgi.py
|
eb8cb6d8ba58b6b9c606b6864972c65291609a32
|
[] |
no_license
|
synestematic/nca_kpi
|
5cd27351a7c7aea0177229928203b02ef5b309c1
|
a01058b8d148f99c0ecc6e53e967509eae390049
|
refs/heads/master
| 2021-01-01T04:41:16.193823
| 2017-07-21T08:49:05
| 2017-07-21T08:49:05
| 97,222,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
"""
WSGI config for nca_dj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nca_dj.settings")
application = get_wsgi_application()
|
[
"federico.rizzo@auto1.com"
] |
federico.rizzo@auto1.com
|
83994c38f9144ce347181331f846f2ff90ca9862
|
26b03e26df3189a1ab1fb56f75247446ee5959eb
|
/src/models/base_model.py
|
c6694860200255a1e4b827fb92e8b8f631e5c9e1
|
[
"MIT"
] |
permissive
|
Delismachado/probable-journey
|
968598080e69f022b6badcaea479152f6063c27e
|
e9af22fe989795de3d277f5d9bcc90bc57cc0bf3
|
refs/heads/main
| 2023-03-15T23:38:16.388602
| 2021-03-02T12:43:20
| 2021-03-02T12:43:20
| 333,796,521
| 0
| 0
|
MIT
| 2021-03-02T12:43:21
| 2021-01-28T15:14:53
| null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer
Base = declarative_base()
class BaseModel(Base):
__abstract__ = True
id = Column('id', Integer, primary_key=True)
|
[
"dam.machado1@gmail.com"
] |
dam.machado1@gmail.com
|
0d95d67ad12cf5375764d59f2944b84232e97e92
|
d9a6267c609cefa1f44cc2c5c1d993a50ec313f5
|
/app.py
|
a082c924dfda5a8c6fbb833fe98a04aa917d5bc8
|
[] |
no_license
|
AdityaRajPurohit/Co2-Emission-Model-tek-System-Hackathon
|
086947a08ec609d4d1b5cc1e4667ec2b3c607790
|
c820ff46c270712db99ebb7ac8b319826d107be8
|
refs/heads/master
| 2020-07-06T10:42:17.890653
| 2019-08-18T21:03:48
| 2019-08-18T21:03:48
| 202,990,431
| 1
| 0
| null | 2019-08-18T16:00:22
| 2019-08-18T10:42:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
import flask
import pickle
import pandas as pd
# Use pickle to load in the pre-trained model
with open(f'model/Co2_emission.pkl', 'rb') as f:
model = pickle.load(f)
# Initialise the Flask app
app = flask.Flask(__name__, template_folder='templates')
# Set up the main route
@app.route('/', methods=['GET', 'POST'])
def main():
if flask.request.method == 'GET':
# Just render the initial form, to get input
return(flask.render_template('main.html'))
if flask.request.method == 'POST':
# Extract the input
ENGINESIZE = flask.request.form['ENGINESIZE']
CYLINDERS = flask.request.form['CYLINDERS']
FUELCONSUMPTION_COMB = flask.request.form['FUELCONSUMPTION_COMB']
# Make DataFrame for model
input_variables = pd.DataFrame([[ENGINESIZE, CYLINDERS, FUELCONSUMPTION_COMB]],
columns=['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB'],
dtype=float,
index=['input'])
# Get the model's prediction
prediction = model.predict(input_variables)[0]
# Render the form again, but add in the prediction and remind user
# of the values they input before
return flask.render_template('main.html',
original_input={'ENGINESIZE':ENGINESIZE,
'CYLINDERS':CYLINDERS,
'FUELCONSUMPTION_COMB':FUELCONSUMPTION_COMB},
result=prediction,
)
if __name__ == '__main__':
app.run()
|
[
"taxena111@gmail.com"
] |
taxena111@gmail.com
|
8a30effc252cb2fa7110200228e5bb569ad7a0ed
|
77df4b9ad320d544c0e48e2176bcce302343f70d
|
/label_source_affection.py
|
35b17bd1c214e5de255f7e8f4b3515e60d8ac7af
|
[] |
no_license
|
wangmk126978/e-bike-evaluator-2.0
|
ec805cb74849ddd0c0f9e251cddae367a87725db
|
a27f0708dcc2fbace253d3f60548919b915310a8
|
refs/heads/master
| 2020-08-07T21:32:35.574728
| 2019-10-08T11:29:20
| 2019-10-08T11:29:20
| 213,592,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,848
|
py
|
import datetime
import time
import re
import kangkang_tools_box as kk
import bike_evaluator as be
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import numpy as np
import random
def date_2_string(date):
string_list=kk.check_symbol('-',re.sub(r' 00:00:00','',str(date)))
string=string_list[0]+string_list[1]+string_list[2]
return string
def time_duration_perlong(start_date,end_date):
time_spot_list=[date_2_string(start_date)]
time_spot=start_date
while(time_spot+datetime.timedelta(days=30)<end_date):
time_spot+=datetime.timedelta(days=30)
time_spot_list.append(date_2_string(time_spot))
return time_spot_list
def get_mse(feature_vectors,labels):
regr = RandomForestRegressor(n_estimators=200,oob_score=True)
regr.fit(feature_vectors, labels)
oob=1 - regr.oob_score_
return oob
def string_2_date(date):
if int(date)%20190000 > 20180000:
date_year=2018
date_date=(int(date)%20180000)%100
date_month=int(((int(date)%20180000)-(int(date)%20180000)%100)/100)
else:
date_year=2019
date_date=(int(date)%20190000)%100
date_month=int(((int(date)%20190000)-(int(date)%20190000)%100)/100)
date=datetime.datetime(int(date_year),int(date_month),int(date_date))
return date
def test_performance(train_x,train_y,test_x,test_y,n):
regr = RandomForestRegressor(n_estimators=n,oob_score=False,bootstrap=True)
regr.fit(train_x, train_y)
mse=mean_squared_error(regr.predict(test_x),test_y)
return mse
def k_fold_cross_validation_T_times(k,X_set,Y_set,function,T,n):
#运行t次
t_times_mse=[]
for t in range(T):
#组装数据集
data_set=[]
for i in range(len(X_set)):
#加个序号,方便不放回操作
data_set.append([i])
for j in range(len(X_set[i])):
data_set[i].append(X_set[i][j])
data_set[i].append(Y_set[i])
#抽取k-1次,每次抽出int(len(data_set/k))个,抽取一次后重构一次data_set(因为是不放回的)
fold_size=int(len(data_set)/k)
k_folder=[]
current_data_set=data_set
for i in range(k-1):
k_folder.append(random.sample(current_data_set,fold_size))
new_data_set=[]
for j in range(len(current_data_set)):
for z in range(len(k_folder[i])):
if current_data_set[j][0] == k_folder[i][z][0]:
break
if z == len(k_folder[i])-1:
new_data_set.append(current_data_set[j])
current_data_set=new_data_set
k_folder.append(current_data_set)
#移除序号
for i in range(len(k_folder)):
for j in range(len(k_folder[i])):
del k_folder[i][j][0]
#构建k次训练集和测试集并取平均mse
mse_set=[]
for i in range(k):
test_index=i
train_set=[]
for j in range(len(k_folder)):
if j == test_index:
test_set=k_folder[j]
if j != test_index:
for z in range(len(k_folder[j])):
train_set.append(k_folder[j][z])
#将x和y分离出来
train_x=[]
train_y=[]
test_x=[]
test_y=[]
for j in range(len(train_set)):
train_y.append(train_set[j][-1][0])
train_x.append([])
for z in range(len(train_set[j])-1):
train_x[j].append(train_set[j][z])
for j in range(len(test_set)):
test_y.append(test_set[j][-1][0])
test_x.append([])
for z in range(len(test_set[j])-1):
test_x[j].append(test_set[j][z])
mse_set.append(function(train_x,train_y,test_x,test_y,n))
avg_mse=np.mean(mse_set)
t_times_mse.append(avg_mse)
t_times_avg_mse=np.mean(t_times_mse)
return t_times_avg_mse
source='elektrischefietsen'
start_date=datetime.datetime(int('2018'),int('10'),int('1'))
end_date=datetime.datetime(int('2019'),int('7'),int('10'))
days_delta=datetime.timedelta(days=1)
time_spot_list=time_duration_perlong(start_date,end_date)
#不同time span和不同时间点MSE的变化
duration_shift=[]
duration=30
index=0
while(duration+30 < (end_date-start_date).days):
duration_shift.append([])
for i in range(len(time_spot_list)):
if string_2_date(time_spot_list[i])+datetime.timedelta(days=duration) < end_date:
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(time_spot_list[i],date_2_string(string_2_date(time_spot_list[i])+datetime.timedelta(days=duration)),source)
print('duration: '+str(duration)+', start date: '+str(time_spot_list[i]))
mse=k_fold_cross_validation_T_times(5,feature_vectors,labels,test_performance,5,40)
duration_shift[index].append([duration+30*(1+i),mse])
print("time_span: "+str(duration)+", start_date: "+str(time_spot_list[i])+", mse: "+str(mse))
else:
break
duration+=30
index+=1
for i in range(len(duration_shift)):
duration=0
for j in range(len(duration_shift[i])):
duration_shift[i][j][0]=duration
duration+=30
duration_shift.append([])
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(date_2_string(start_date),date_2_string(end_date),source)
duration_shift[len(duration_shift)-1].append([0,k_fold_cross_validation_T_times(5,feature_vectors,labels,test_performance,5,40)])
avg_X=[]
avg_Y=[]
plt.figure()
for i in range(len(duration_shift)):
X=[]
Y=[]
label='time span(s) = '+str(30*(i+1))
avg_X.append(30*(i+1))
for j in range(len(duration_shift[i])):
X.append(duration_shift[i][j][0])
Y.append(duration_shift[i][j][1])
avg_Y.append(np.mean(Y))
plt.scatter(X,Y,label = label,marker='v')
ave_MSE_d=[]
d=[]
for i in range(len(duration_shift[0])):
sum_one_s=0
num_s=0
for j in range(len(duration_shift)):
try:
sum_one_s+=duration_shift[j][i][1]
num_s+=1
except:
break
ave_MSE_d.append(sum_one_s/num_s)
d.append(30*i)
start_date_coef=np.corrcoef([ave_MSE_d,d])
plt.plot(d,ave_MSE_d,label='mean(MSE_st)')
#plt.title('The relationship between the duration settings and the MSE of RF(n=200)') #标题
plt.xlabel('start date(d)') #x轴的标签
plt.ylabel('MSE') #y轴的标签
plt.legend() #设置图例
plt.show()
plt.figure()
plt.plot(avg_X,avg_Y)
plt.title('The relationship between time span(s) and the average MSE of RF(n=200)')
plt.xlabel('time span(s)') #x轴的标签
plt.ylabel('average MSE of RF')
plt.show()
#计算startdate和MSE之间的关系
plt.figure()
plt.plot(d,d)
plt.plot(d,ave_MSE_d)
plt.show()
#计算number of trees的影响
#节约时间只算第一个时间点的不同time span的200的曲线
time_span=30
MSE_ts=[]
index=0
while(time_span+30 < (end_date-start_date).days):
MSE_ts.append([['time span=',time_span],[]])
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(time_spot_list[0],date_2_string(string_2_date(time_spot_list[0])+datetime.timedelta(days=time_span)),source)
for i in range(100):
mse=k_fold_cross_validation_T_times(5,feature_vectors,labels,test_performance,5,i+1)
MSE_ts[index][1].append(mse)
print('time span: '+str(time_span)+', n = '+str(i)+', mse = '+str(mse))
time_span+=30
index+=1
#加上最后一个时段的
MSE_ts.append([['time span=',time_span],[]])
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(time_spot_list[0],date_2_string(string_2_date(time_spot_list[0])+datetime.timedelta(days=270)),source)
for i in range(100):
mse=k_fold_cross_validation_T_times(5,feature_vectors,labels,test_performance,5,i+1)
MSE_ts[len(MSE_ts)-1][1].append(mse)
print('time span: '+str(time_span)+', n = '+str(i)+', mse = '+str(mse))
plt.figure()
for i in range(len(MSE_ts)):
X=list(range(100))
Y=MSE_ts[i][1]
label='time span(s) = '+str(MSE_ts[i][0][1])
plt.plot(X,Y,label=label)
#plt.title('How the number of decision trees(n) setting affect RF MSE')
plt.xlabel('number of decision trees(n)') #x轴的标签
plt.ylabel('MSE')
plt.legend()
plt.show()
#计算n=40时各个time span的影响
X=[]
Y=[]
for i in range(len(MSE_ts)):
X.append(MSE_ts[i][0][1])
Y.append(MSE_ts[i][1][40])
plt.figure()
plt.plot(X,Y)
#plt.title('How the time span(s) setting affect RF MSE')
plt.xlabel('time span(s)') #x轴的标签
plt.ylabel('MSE')
plt.legend()
'''
#时间段延长
time_spot_list=time_duration_perlong(start_date,end_date)
extend_mse_tracer=[]
for i in range(len(time_spot_list)+1):
try:
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(date_2_string(start_date),time_spot_list[i],source)
except:
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(date_2_string(start_date),date_2_string(end_date),source)
extend_mse_tracer.append(get_mse(feature_vectors,labels))
shift_mse_tracer=[]
for i in range(len(time_spot_list)+1):
try:
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(date_2_string(start_date),time_spot_list[i],source)
except:
selected_column,labels_source,en_dic,uncode_feature_vectors,feature_vectors,labels,bike_names=be.creating_time_series_data(date_2_string(start_date),date_2_string(end_date),source)
shift_mse_tracer.append(get_mse(feature_vectors,labels))
duration=[]
for i in range(len(time_spot_list)+1):
duration.append(30*(1+i))
plt.plot(duration,mse_tracer,'b',label = 'original y')
plt.title('Time period length(days) affection on MSE of random forest OOBs')
plt.xlabel('Time period length(days)') #x轴的标签
plt.ylabel('MSE of random forest OOBs')
'''
|
[
"noreply@github.com"
] |
wangmk126978.noreply@github.com
|
392bcb43413b4178823c67da1782ad5ac834ab96
|
5c9ec84d07158b2a4f3d9b35ca2989c1ab7812d7
|
/Natural Language Processing/Sentiment Scoring.py
|
ad9d3a0dfa852ab9c458bf741b9ff9e2d27ebef3
|
[] |
no_license
|
ianmilanian/code-reference
|
4951924dea07151a5a25519df956110da6324da3
|
bb362cdb3d5538ca9308b117721f0778496f7c8c
|
refs/heads/master
| 2021-06-06T05:48:24.006163
| 2020-05-08T23:57:36
| 2020-05-08T23:57:36
| 136,516,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
import nltk
import numpy as np
from matplotlib import cm
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
%matplotlib inline
# Simple Sentiment Scoring (English)
neg_words = set(open("../data/negative.txt").read().split())
pos_words = set(open("../data/positive.txt").read().split())
tk_tweets = nltk.Text(nltk.TweetTokenizer().tokenize("".join(tweets)))
tweet_fd = nltk.FreqDist(tk_tweets)
tweet_hapaxes = tweet_fd.hapaxes()
tweet_fd.plot(15, cumulative=True)
tweet_fd.tabulate(15, cumulative=True)
print("\nHapaxes:", " ".join(tweet_hapaxes[:5]), "...")
similar_word = "trump"
print("\nWords Near {}:".format(similar_word))
tk_tweets.similar(similar_word)
print("\nCollocations:")
tk_tweets.collocations()
count = 0
for tweet in tweets:
words = tweet.split()
sentiment = tweet, len(pos_words.intersection(words)), len(neg_words.intersection(words))
print(tweet)
count += 1
if count > 20:
break
print("\nTweet Sentiment:", sentiment)
freq = 125
df_tags = pd.DataFrame(nltk.pos_tag(tk_tweets), columns=["word", "grammar"])
df_nouns = df_tags[(df_tags["grammar"] == "NN") & (df_tags["word"].str.len() > 1)]["word"].value_counts()
df_nouns = df_nouns[df_nouns[:,] > freq]
df_nouns.plot.pie(title="Tweeted Nouns With %d+ Occurances" % freq, figsize=(12,12), colormap=cm.get_cmap('viridis'))
# Sentiment Analysis with VADER
vader = SentimentIntensityAnalyzer()
vscore = [vader.polarity_scores(tweet) for tweet in tweets]
np.mean([score["neutral"] for score in vscore])
|
[
"noreply@github.com"
] |
ianmilanian.noreply@github.com
|
0bcaa123e1cde841df3bc5bc99ca80ecd9c05025
|
cb476c5fdf7e7da1abb4229635a79ea88f465399
|
/cookieOper/cookieLogin.py
|
ee24c3ae2a9b7da7402120ccceb5c1dda28c1144
|
[] |
no_license
|
ZHAISHENKING/spiderapp
|
cb66f68f3ec40f027d8cdbdd1ec357f25c668cda
|
51b09dd98e64e6d0612a313ff6c90f6fbd0f63dc
|
refs/heads/master
| 2022-12-15T00:30:44.168143
| 2018-07-17T13:42:57
| 2018-07-17T13:42:57
| 136,556,921
| 0
| 0
| null | 2022-07-06T19:49:39
| 2018-06-08T02:42:16
|
Python
|
UTF-8
|
Python
| false
| false
| 653
|
py
|
from http import cookiejar
import requests
from urllib import request
url = 'http://zskin.xin/admin/'
headers = {
'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
def save_cookie():
cookie = cookiejar.MozillaCookieJar('cookie.txt')
cookie_processor = request.HTTPCookieProcessor(cookie)
opener = request.build_opener(cookie_processor)
response = opener.open(url)
cookie.save()
def cookie_login():
cookie = cookiejar.MozillaCookieJar()
cookie.load('cookie.txt')
r = requests.get(url, cookies=cookie)
print(r.content)
|
[
"18700790825@163.com"
] |
18700790825@163.com
|
8c6ca185f0f3ad11b5cca0a9957aed3a5b5a0c8f
|
e708f81011d4a09a8ad1ee85f0c1872e1634f35c
|
/Testing/Python/EventCollectionTest.py
|
a03a6dcb3a2fd28e6f27fe2b2d67a8782b7f9d1e
|
[
"BSD-3-Clause"
] |
permissive
|
allardhoeve/BTKCore
|
ef26f818acf06780c3e57147097f65f2d72c1c27
|
f6805699fe37f64ac6434a92736c6b59f0ea148d
|
refs/heads/master
| 2020-04-27T07:58:51.948297
| 2019-03-28T11:03:41
| 2019-03-28T11:03:41
| 174,155,043
| 0
| 1
|
NOASSERTION
| 2019-03-06T14:27:02
| 2019-03-06T13:55:59
|
C++
|
UTF-8
|
Python
| false
| false
| 20,173
|
py
|
import btk
import unittest
class EventCollectionTest(unittest.TestCase):
def test_default_ctor(self):
ec = btk.btkEventCollection()
self.assertEqual(ec.GetItemNumber(), 0)
def test_item_number(self):
ec = btk.btkEventCollection()
ec.SetItemNumber(5)
self.assertEqual(ec.GetItemNumber(), 5)
def test_insert(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
self.assertEqual(ec.GetItemNumber(), 1)
e1 = ec.GetItem(0)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
e2 = btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2)
ec.InsertItem(e2)
e2_ = ec.GetItem(1);
self.assertEqual(e2.GetLabel(), e2_.GetLabel())
self.assertEqual(e2.GetTime(), e2_.GetTime())
self.assertEqual(e2.GetFrame(), e2_.GetFrame())
self.assertEqual(e2.GetContext(), e2_.GetContext())
self.assertEqual(e2.GetDetectionFlags(), e2_.GetDetectionFlags())
self.assertEqual(e2.GetSubject(), e2_.GetSubject())
self.assertEqual(e2.GetDescription(), e2_.GetDescription())
self.assertEqual(e2.GetId(), e2_.GetId())
e2.SetLabel('Unknown')
self.assertEqual(e2.GetLabel(), e2_.GetLabel())
self.assertEqual(e2.GetTime(), e2_.GetTime())
self.assertEqual(e2.GetFrame(), e2_.GetFrame())
self.assertEqual(e2.GetContext(), e2_.GetContext())
self.assertEqual(e2.GetDetectionFlags(), e2_.GetDetectionFlags())
self.assertEqual(e2.GetSubject(), e2_.GetSubject())
self.assertEqual(e2.GetDescription(), e2_.GetDescription())
self.assertEqual(e2.GetId(), e2_.GetId())
e2_.SetTime(1.45)
self.assertEqual(e2.GetLabel(), e2_.GetLabel())
self.assertEqual(e2.GetTime(), e2_.GetTime())
self.assertEqual(e2.GetFrame(), e2_.GetFrame())
self.assertEqual(e2.GetContext(), e2_.GetContext())
self.assertEqual(e2.GetDetectionFlags(), e2_.GetDetectionFlags())
self.assertEqual(e2.GetSubject(), e2_.GetSubject())
self.assertEqual(e2.GetDescription(), e2_.GetDescription())
self.assertEqual(e2.GetId(), e2_.GetId())
def test_insert_index_first(self):
ec = btk.btkEventCollection()
ec.InsertItem(0, btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(0, btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
e2 = ec.GetItem(0)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e1 = ec.GetItem(1)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
def test_insert_index_last(self):
ec = btk.btkEventCollection()
ec.InsertItem(0, btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(1, btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
e2 = ec.GetItem(1)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e1 = ec.GetItem(0)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
def test_insert_index_middle(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(1, btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
e = ec.GetItem(1)
self.assertEqual(e.GetLabel(), 'FOOBAR')
self.assertEqual(e.GetTime(), 5.05)
self.assertEqual(e.GetFrame(), -1)
self.assertEqual(e.GetContext(), 'General')
self.assertEqual(e.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e.GetSubject(), 'FooBar')
self.assertEqual(e.GetDescription(), '')
self.assertEqual(e.GetId(), 3)
def test_take(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
self.assertEqual(ec.GetItemNumber(), 2)
e1 = ec.TakeItem(0)
self.assertEqual(ec.GetItemNumber(), 1)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
e2 = ec.GetItem(0)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
ec.Clear()
self.assertEqual(ec.GetItemNumber(), 0)
def test_iterator(self):
ec = btk.btkEventCollection()
ec.SetItemNumber(5)
for i in range(0,5):
e = btk.btkEvent()
e.SetId(i)
ec.SetItem(i, e)
inc = 0
i = ec.Begin()
while i != ec.End():
self.assertEqual(i.value().GetId(), inc)
inc = inc + 1
i.incr()
inc = 0
i = ec.Begin()
while i != ec.End():
i.value().SetId(inc * 10)
self.assertEqual(i.value().GetId(), inc * 10)
inc = inc + 1
i.incr()
def test_iterator_insert_begin(self):
ec = btk.btkEventCollection()
ec.InsertItem(ec.Begin(), btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(ec.Begin(), btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
e2 = ec.GetItem(0)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e1 = ec.GetItem(1)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
def test_iterator_insert_end(self):
ec = btk.btkEventCollection()
ec.InsertItem(ec.End(), btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(ec.End(), btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
e2 = ec.GetItem(1)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e1 = ec.GetItem(0)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
def test_iterator_insert_middle(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
i = ec.Begin()
i.incr()
ec.InsertItem(i, btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
e = ec.GetItem(1)
self.assertEqual(e.GetLabel(), 'FOOBAR')
self.assertEqual(e.GetTime(), 5.05)
self.assertEqual(e.GetFrame(), -1)
self.assertEqual(e.GetContext(), 'General')
self.assertEqual(e.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e.GetSubject(), 'FooBar')
self.assertEqual(e.GetDescription(), '')
self.assertEqual(e.GetId(), 3)
def test_iterator_remove_begin(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
ec.RemoveItem(ec.Begin())
self.assertEqual(ec.GetItemNumber(), 2)
e2 = ec.GetItem(0)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e3 = ec.GetItem(1)
self.assertEqual(e3.GetLabel(), 'FOOBAR')
self.assertEqual(e3.GetTime(), 5.05)
self.assertEqual(e3.GetFrame(), -1)
self.assertEqual(e3.GetContext(), 'General')
self.assertEqual(e3.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e3.GetSubject(), 'FooBar')
self.assertEqual(e3.GetDescription(), '')
self.assertEqual(e3.GetId(), 3)
def test_iterator_remove_end(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
i = ec.End()
i.decr()
ec.RemoveItem(i)
self.assertEqual(ec.GetItemNumber(), 2)
e1 = ec.GetItem(0)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
e2 = ec.GetItem(1)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
def test_iterator_remove_middle(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
i = ec.Begin()
i.incr()
ec.RemoveItem(i)
self.assertEqual(ec.GetItemNumber(), 2)
e = ec.GetItem(1)
self.assertEqual(e.GetLabel(), 'FOOBAR')
self.assertEqual(e.GetTime(), 5.05)
self.assertEqual(e.GetFrame(), -1)
self.assertEqual(e.GetContext(), 'General')
self.assertEqual(e.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e.GetSubject(), 'FooBar')
self.assertEqual(e.GetDescription(), '')
self.assertEqual(e.GetId(), 3)
def test_iterator_remove_clear(self):
ec = btk.btkEventCollection()
ec.SetItemNumber(5)
for i in range(0,5):
e = btk.btkEvent()
e.SetId(i)
ec.SetItem(i, e)
i = ec.Begin()
while i != ec.End():
i = ec.RemoveItem(i)
self.assertEqual(ec.GetItemNumber(), 0)
def test_iterator_take_begin(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
e1 = ec.TakeItem(ec.Begin())
self.assertEqual(ec.GetItemNumber(), 2)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
e2 = ec.GetItem(0)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e3 = ec.GetItem(1)
self.assertEqual(e3.GetLabel(), 'FOOBAR')
self.assertEqual(e3.GetTime(), 5.05)
self.assertEqual(e3.GetFrame(), -1)
self.assertEqual(e3.GetContext(), 'General')
self.assertEqual(e3.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e3.GetSubject(), 'FooBar')
self.assertEqual(e3.GetDescription(), '')
self.assertEqual(e3.GetId(), 3)
def test_iterator_take_end(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
i = ec.End(); i.decr()
e3 = ec.TakeItem(i)
e1 = ec.GetItem(0)
self.assertEqual(ec.GetItemNumber(), 2)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
e2 = ec.GetItem(1)
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
# e3
self.assertEqual(e3.GetLabel(), 'FOOBAR')
self.assertEqual(e3.GetTime(), 5.05)
self.assertEqual(e3.GetFrame(), -1)
self.assertEqual(e3.GetContext(), 'General')
self.assertEqual(e3.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e3.GetSubject(), 'FooBar')
self.assertEqual(e3.GetDescription(), '')
self.assertEqual(e3.GetId(), 3)
def test_iterator_take_middle(self):
ec = btk.btkEventCollection()
ec.InsertItem(btk.btkEvent('FOO', 1.52, 'Right', btk.btkEvent.Manual, 'Bar', '', 1))
ec.InsertItem(btk.btkEvent('BAR', 2.03, 'Left', btk.btkEvent.Automatic, 'Foo', '', 2))
ec.InsertItem(btk.btkEvent('FOOBAR', 5.05, 'General', btk.btkEvent.Automatic, 'FooBar', '', 3))
self.assertEqual(ec.GetItemNumber(), 3)
i = ec.Begin(); i.incr()
e2 = ec.TakeItem(i)
e1 = ec.GetItem(0)
self.assertEqual(ec.GetItemNumber(), 2)
self.assertEqual(e1.GetLabel(), 'FOO')
self.assertEqual(e1.GetTime(), 1.52)
self.assertEqual(e1.GetFrame(), -1)
self.assertEqual(e1.GetContext(), 'Right')
self.assertEqual(e1.GetDetectionFlags(), btk.btkEvent.Manual)
self.assertEqual(e1.GetSubject(), 'Bar')
self.assertEqual(e1.GetDescription(), '')
self.assertEqual(e1.GetId(), 1)
# e2
self.assertEqual(e2.GetLabel(), 'BAR')
self.assertEqual(e2.GetTime(), 2.03)
self.assertEqual(e2.GetFrame(), -1)
self.assertEqual(e2.GetContext(), 'Left')
self.assertEqual(e2.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e2.GetSubject(), 'Foo')
self.assertEqual(e2.GetDescription(), '')
self.assertEqual(e2.GetId(), 2)
e3 = ec.GetItem(1)
self.assertEqual(e3.GetLabel(), 'FOOBAR')
self.assertEqual(e3.GetTime(), 5.05)
self.assertEqual(e3.GetFrame(), -1)
self.assertEqual(e3.GetContext(), 'General')
self.assertEqual(e3.GetDetectionFlags(), btk.btkEvent.Automatic)
self.assertEqual(e3.GetSubject(), 'FooBar')
self.assertEqual(e3.GetDescription(), '')
self.assertEqual(e3.GetId(), 3)
|
[
"arnaud.barre@93be6e1e-6fcc-11de-baec-69240fdc299a"
] |
arnaud.barre@93be6e1e-6fcc-11de-baec-69240fdc299a
|
3baf54842e0c5280872ddaec253b4c17be062884
|
8e08f73e10cf82a127c2dfe119fc74fd5a339d08
|
/presentation/images/src/results.py
|
442fa00470985faca5bf3ea0d93c7ea83b2c243b
|
[] |
no_license
|
ghaith-khlifi/master-thesis
|
39215489797a270edde348563856a06fee66626b
|
d4af74584afdf4de1f03a1b36bc9ae697ffc8380
|
refs/heads/master
| 2021-07-08T06:19:20.354057
| 2017-10-04T14:21:53
| 2017-10-04T14:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
algs = ('Random', 'Ours', 'DQN')
y_pos = np.arange(len(algs))
colors = ['#1f77b4', '#203651', '#65829b']
# Max score
score = [1, 17, 319]
plt.subplot(2, 3, 1)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Max score (Breakout)')
plt.ylabel('Score')
score = np.array([-20, -9, 19]) + 21
plt.subplot(2, 3, 2)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Max score (Pong)')
plt.ylabel('Score')
score = [174, 469, 1125]
plt.subplot(2, 3, 3)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Max score (Space Invaders)')
plt.ylabel('Score')
# Limited score
score = [1, 17, 2]
plt.subplot(2, 3, 4)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Score @ 100k samples (Breakout)')
plt.ylabel('Score')
score = np.array([-20, -9, -20]) + 21
plt.subplot(2, 3, 5)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Score @ 100k samples (Pong)')
plt.ylabel('Score')
score = [174, 469, 145]
plt.subplot(2, 3, 6)
plt.bar(y_pos, score, align='center', color=colors)
plt.xticks(y_pos, algs)
plt.title('Score @ 100k samples (Space Invaders)')
plt.ylabel('Score')
################################################################################
plt.figure()
algs = ('Ours', 'DQN')
y_pos = np.arange(len(algs))
colors = ['#203651', '#65829b']
# Log samples
score = np.array([100000, 3500000])
plt.subplot(2, 3, 1)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Samples @ our best score (Breakout)')
plt.ylabel('Number of samples')
score = np.array([100000, 5000000])
plt.subplot(2, 3, 2)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Samples @ our best score (Pong)')
plt.ylabel('Number of samples')
score = np.array([100000, 4750000])
plt.subplot(2, 3, 3)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Samples @ our best score (Space Invaders)')
plt.ylabel('Number of samples')
# Efficiency
np.set_printoptions(suppress=True)
score = np.array([17/100000., 319/19250000.])
print score
plt.subplot(2, 3, 4)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Efficiency (Breakout)')
plt.ylabel('Reward per sample')
score = np.array([12/100000., 40/35250000.])
print score
plt.subplot(2, 3, 5)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Efficiency (Pong)')
plt.ylabel('Reward per sample')
score = np.array([460/100000., 1125/31500000.])
print score
plt.subplot(2, 3, 6)
plt.bar(y_pos, score, align='center', color=colors, log=True)
plt.xticks(y_pos, algs)
plt.title('Efficiency (Space Invaders)')
plt.ylabel('Reward per sample')
################################################################################
|
[
"daniele.grattarola@gmail.com"
] |
daniele.grattarola@gmail.com
|
bdf69feb2d88899d24b253a4cf5590cdf2087959
|
cf430de98028e8dccd65f7e8f4b538e4cf1b253f
|
/textdisplay.py
|
50cade259b09216f8286204ccfd2dea305fc7e58
|
[] |
no_license
|
malaika-chandler/triple-triad-python
|
1f45f9c144c5a1b2bc6eabce85515eaa278168c7
|
1576a8eb1de3174dfff73dba954bf9f498775661
|
refs/heads/master
| 2022-11-20T00:49:02.981046
| 2020-07-20T20:35:36
| 2020-07-20T20:35:36
| 263,379,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,992
|
py
|
import colorama # Fore, Back, Style
from components import Direction, Element
class TripleTriadColors:
COLOR_RESET = colorama.Style.RESET_ALL
AGENT_COLORS = [
colorama.Fore.LIGHTBLUE_EX,
colorama.Fore.LIGHTMAGENTA_EX
]
COLOR_NEGATIVE = colorama.Fore.RED
COLOR_POSITIVE = colorama.Fore.GREEN
class TripleTriadGraphics:
def __init__(self):
colorama.init()
self.colors = TripleTriadColors
def display_game_state(self, state):
non_turn_agent = [agent for agent in state.get_agents() if not agent.index == state.get_current_player().index]
non_turn_agent = non_turn_agent[0]
# Draw agents' hands and game board based on state
self.draw_cards(non_turn_agent.hand, non_turn_agent.index)
self.display_score(non_turn_agent)
self.draw_game_board(state.get_game_board())
self.display_score(state.get_current_player())
self.draw_cards(state.get_current_player().hand, state.get_current_player().index)
def display_end_game(self, state):
self.draw_game_board(state.get_game_board())
for agent in state.get_agents():
self.display_score(agent)
if state.winner:
print(state.winner.name + " wins!")
else:
print("Draw")
def draw_game_board(self, game_board):
height = game_board.height
for row_index in range(height):
row = game_board.get_row(row_index)
row_to_draw = []
individual_grid_representations = []
for i, place in enumerate(row):
representation = []
if place.has_card:
# Create the entire card as a list
placed_card = place.placed_card
color = self.colors.AGENT_COLORS[place.owner.index]
representation.append(color + ' ▁▁▁▁▁ ' + self.colors.COLOR_RESET)
representation.append(color + '| {} {}|'.format(
placed_card.get_rank(Direction.TOP, as_string=True),
self.get_elemental_result(place) + color) + self.colors.COLOR_RESET),
representation.append(color + '| {}{}{} |'.format(
placed_card.get_rank(Direction.LEFT, as_string=True),
TripleTriadGraphics._get_element_char(placed_card.element) or ' ',
placed_card.get_rank(Direction.RIGHT, as_string=True)) + self.colors.COLOR_RESET)
representation.append(color + '| {} |'.format(
placed_card.get_rank(Direction.BOTTOM, as_string=True)) + self.colors.COLOR_RESET)
representation.append(color + ' ▔▔▔▔▔ ' + self.colors.COLOR_RESET)
else:
# Create the entire grid space as a list
x, y = place.get_coordinates()
representation.append(' ▁▁▁▁▁ ')
representation.append('| {} |'.format(
TripleTriadGraphics._get_element_char(place.element) or ' '))
representation.append('| {},{} |'.format(x, y))
representation.append('| |')
representation.append(' ▔▔▔▔▔ ')
individual_grid_representations.append(representation)
# Get each first list entry, concat, and append to row object
count_representations = len(individual_grid_representations)
for line_index in range(len(individual_grid_representations[0])):
row_to_draw.append(' '.join(
individual_grid_representations[i][line_index] for i in range(count_representations)
))
print('\n'.join(row_to_draw), sep='\n')
def get_elemental_result(self, place):
if place.has_card:
if place.has_elemental_conflict():
return self.colors.COLOR_NEGATIVE + '↓' + self.colors.COLOR_RESET
elif place.has_elemental_agreement():
return self.colors.COLOR_POSITIVE + '↑' + self.colors.COLOR_RESET
return ' '
def draw_cards(self, cards, agent_index):
# Horizontally
top_edge = ' '.join([' ▁▁{}▁▁'.format(i + 1) for i in range(len(cards))])
top_rank = ' '.join(['| {} |'.format(card.get_rank(Direction.TOP, as_string=True)) for card in cards])
middle_ranks = ' '.join(['| {}{}{} |'.format(
card.get_rank(Direction.LEFT, as_string=True),
TripleTriadGraphics._get_element_char(card.element) or ' ',
card.get_rank(Direction.RIGHT, as_string=True)) for card in cards])
bottom_rank = ' '.join(['| {} |'.format(card.get_rank(Direction.BOTTOM, as_string=True)) for card in cards])
bottom_edge = ' '.join([' ▔▔▔▔▔' for _ in cards])
print(self.colors.AGENT_COLORS[agent_index], top_edge, top_rank,
middle_ranks, bottom_rank, bottom_edge, self.colors.COLOR_RESET, sep="\n")
def display_score(self, agent):
print('Score for {}{}{}: {}'.format(self.colors.AGENT_COLORS[agent.index], agent.name,
self.colors.COLOR_RESET, agent.score))
@staticmethod
def _get_element_char(element):
if element == Element.FIRE:
return '♨' # '🔥'
elif element == Element.EARTH:
return '☄' # '🌏'
elif element == Element.ICE:
return '❄' # '❄️'
elif element == Element.THUNDER:
return '⌁︎' # '⚡'
elif element == Element.HOLY:
return '✟' # '✨'
elif element == Element.POISON:
return '☠'
elif element == Element.WIND:
return '᭝' # '🌪'
elif element == Element.WATER:
return '~' # '☔︎' '💧'
else:
return ''
|
[
"malaika.r.chandler@gmail.com"
] |
malaika.r.chandler@gmail.com
|
e3849b5f37505cdd670245897a12b763f7587eba
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/209_Minimum_Size_Subarray_Sum.py
|
3a443b920ac3185bc68b968f19be59136835ae78
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
# Approach 1: Sliding window. O(n)
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
left, right, length = 0, 0, float("inf")
sLen = len(nums)
currentSum = 0
while right < sLen:
currentSum += nums[right]
right += 1
while currentSum >= s:
length = min(length, right - left)
currentSum -= nums[left]
left += 1
return length if length != float("inf") else 0
|
[
"partho.biswas@aurea.com"
] |
partho.biswas@aurea.com
|
098f70f31be722225d1a55db0edff291c05de633
|
8e7a2b9efbc0d25111f01f4cddb781961032685a
|
/python-1025/web/4_django/django_notes/p3/scripts/pushnotes
|
dcc2151431530fe49851c9e770836170c3858852
|
[] |
no_license
|
Dituohgasirre/python
|
e044aa2e1fb2233b6ccd59701b834ab01e4e24c2
|
05f036d2723f75cd89e4412aaed7ee0ba5d3a502
|
refs/heads/master
| 2023-06-03T13:50:18.641433
| 2021-06-17T10:23:40
| 2021-06-17T10:23:40
| 366,942,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
#!/home/joshua/.pyenv/versions/miniconda3/bin/python3
import pexpect
import os
import sys
# dir是git仓库的本地目录的路径
dir = "/home/joshua/git-works/zelin-python-20171025"
# 在这里写上登录git服务器的用户名和密码
user = "username"
passwd = "password"
os.chdir(dir)
# 这里写入需要执行的git操作
git_cmd = ['push', 'origin', 'master']
# 这是git命令的路径
git_executable = '/usr/bin/git'
child = pexpect.spawn(git_executable, git_cmd)
child.logfile = open('/tmp/pushnote_log', 'wb')
try:
child.expect("Username for 'https://gitee.com':")
child.sendline(user)
child.expect("Password for 'https://iesugrace@gitee.com':")
child.sendline(passwd)
child.expect(pexpect.EOF)
child.logfile.close()
except pexpect.EOF:
print("finished")
except pexpect.TIMEOUT:
print("timeout, failed")
else:
print("finished")
|
[
"linfeiji4729289@126.com"
] |
linfeiji4729289@126.com
|
|
cddfea3f4b5aed63838d631daa59b82101a06236
|
752ebc9cd5cf80e1036e4e619a637c9639b0d9b5
|
/src_code/partial_infection.py
|
f96420f975cfc63fe92e46ced6c2dea14010f957
|
[] |
no_license
|
nrkfeller/user_infection
|
41faa60882a7c7f26651e1f86d4478a266d4c525
|
f0ec156863585bbc47d2625d71b6612f4938b887
|
refs/heads/master
| 2020-12-27T02:00:36.128457
| 2015-11-10T20:20:31
| 2015-11-10T20:20:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
from userclass import User
from py2neo import Node, Relationship, Path, Rev
from py2neo import authenticate, Graph
from itertools import *
import requests
import numpy as np
import unicodedata
import numpy as np
import time
import datetime
import random
import names
import matplotlib.pyplot as plt
import networkx as nx
%matplotlib inline
#INFECT ARGUMENT PASSED NUMBER OF USERS
def partial_infection(usersToInfect):
# get all subgraph in a dictionary
subgraphs = grabSubgraphs()
# place them in a list for computation
list(subgraphs.values())
# get all the possible combinations that can result in the value passing in the argument 'usersToInfect'
c = chain(*[combinations(list(subgraphs.values()), i) for i in range(len(list(subgraphs.values()))+1)])
combinationToInfect = [n for n in c if sum(n) == usersToInfect]
# crawl through and infect the selected subgraphs
if (len(combinationToInfect) > 0):
for name, val in subgraphs.items():
if val in combinationToInfect[0]:
for user in AllUsers:
if user.name == name:
crawl(user)
# tell user if it is not possible to infect exact amount of users
else:
print "Not possible to infect exaclty", usersToInfect, "users. Please chose any combination of", list(subgraphs.values())
visualizeNXGraph()
def grabSubgraphs():
"""
Get all the subgraph size and the name of any node in that subgraph. place into dictionary
"""
# grab all nodes directly from the database. Only nodes that are in previous version
nodes = %cypher MATCH n WHERE n.khanAcademyVersion = "B" RETURN n
# put nodes into a dafaframe and initialize containers for results
nodes = nodes.dataframe
allnodes=[]
subgraphChunks = {}
# put the rows from the dataframe into a list
for index, row in nodes.iterrows():
allnodes.append(row[0])
# grab a name associated with a subgraph and and the quantity of nodes in that subgraph
while(len(allnodes) > 0):
name = unicodedata.normalize('NFKD', allnodes[0]['name']).encode('ascii','ignore')
subgraph = %cypher MATCH p=(n { name: {name}})-[r*0..]-(m) WITH NODES(p) AS nodes UNWIND nodes AS node RETURN DISTINCT node
subgraph = subgraph.dataframe
subarray = []
for index, row in subgraph.iterrows():
subarray.append(row[0])
allnodes = filter(lambda x:x not in subarray, allnodes)
subgraphChunks[name] = len(subarray)
return subgraphChunks
def crawl(user):
"""
Recursively infect everyone with a coach or student relationship
"""
iuser = user
iuser.infect()
for u in user.coaches:
if u.khanAcademyVersion == 'A':
crawl(u)
for u in user.students:
if u.khanAcademyVersion == 'A':
crawl(u)
|
[
"nrkfeller@gmail.com"
] |
nrkfeller@gmail.com
|
64455813715717669daa0ecadd9ed8c20051e493
|
f0b33d42741f3c470cc7f616c70a4b10a73fc012
|
/exps/resnet_steering_single_channel.py
|
ebca5efe05cb1a1c646e4a257e3a323c0fc671fa
|
[
"MIT"
] |
permissive
|
duguyue100/ddd20-itsc20
|
1e51a7a76fe1f2759746814ae58f4e1e21c0c4e6
|
667bb5e702a06cfff30b20de669697f3271baf04
|
refs/heads/master
| 2021-09-17T06:34:17.545026
| 2018-06-28T16:35:39
| 2018-06-28T16:35:39
| 114,002,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,634
|
py
|
"""ResNets for Steering Prediction.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
from sacred import Experiment
import numpy as np
from keras.utils.vis_utils import plot_model
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.callbacks import LearningRateScheduler
from keras.callbacks import CSVLogger
import spiker
from spiker import log
from spiker.models import resnet
from spiker.data import ddd17
exp = Experiment("ResNet - Steering - Single Channel - Experiment")
exp.add_config({
"model_name": "", # the model name
"data_name": "", # the data name
"channel_id": 0, # which channel to chose, 0: dvs, 1: aps
"stages": 0, # number of stages
"blocks": 0, # number of blocks of each stage
"filter_list": [], # number of filters per stage
"nb_epoch": 0, # number of training epochs
"batch_size": 0, # batch size
})
@exp.automain
def resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list,
nb_epoch, batch_size):
"""Perform ResNet experiment."""
model_path = os.path.join(spiker.SPIKER_EXPS, model_name)
if not os.path.isdir(model_path):
os.makedirs(model_path)
else:
raise ValueError("[MESSAGE] This experiment has been done before."
" Create a new config model if you need.")
model_pic = os.path.join(model_path, model_name+"-model-pic.png")
model_file_base = os.path.join(model_path, model_name)
# print model info
log.log("[MESSAGE] Model Name: %s" % (model_name))
log.log("[MESSAGE] Number of epochs: %d" % (nb_epoch))
log.log("[MESSAGE] Batch Size: %d" % (batch_size))
log.log("[MESSAGE] Number of stages: %d" % (stages))
log.log("[MESSAGE] Number of blocks: %d" % (blocks))
# load data
data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
data_name)
if not os.path.isfile(data_path):
raise ValueError("This dataset does not exist at %s" % (data_path))
log.log("[MESSAGE] Dataset %s" % (data_path))
frames, steering = ddd17.prepare_train_data(data_path)
frames = frames[50:-350]/255.
frames -= np.mean(frames, keepdims=True)
steering = steering[50:-350]
num_samples = frames.shape[0]
num_train = int(num_samples*0.7)
X_train = frames[:num_train]
Y_train = steering[:num_train]
X_test = frames[num_train:]
Y_test = steering[num_train:]
del frames, steering
X_train = X_train[:, :, :, channel_id][..., np.newaxis]
X_test = X_test[:, :, :, channel_id][..., np.newaxis]
log.log("[MESSAGE] Number of samples %d" % (num_samples))
log.log("[MESSAGE] Number of train samples %d" % (X_train.shape[0]))
log.log("[MESSAGE] Number of test samples %d" % (X_test.shape[0]))
# setup image shape
input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])
log.log(input_shape)
# Build model
model = resnet.resnet_builder(
model_name=model_name, input_shape=input_shape,
batch_size=batch_size,
filter_list=filter_list, kernel_size=(3, 3),
output_dim=1, stages=stages, blocks=blocks,
bottleneck=False)
model.summary()
plot_model(model, to_file=model_pic, show_shapes=True,
show_layer_names=True)
# configure optimizer
# def step_decay(epoch):
# "step decay callback."""
# if epoch >= 80 and epoch < 120:
# return float(0.01)
# elif epoch >= 120:
# return float(0.001)
# else:
# return float(0.1)
# sgd = optimizers.SGD(lr=0.0, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error',
optimizer="adam",
metrics=["mse"])
print ("[MESSAGE] Model is compiled.")
model_file = model_file_base + \
"-{epoch:02d}-{val_mean_squared_error:.2f}.hdf5"
checkpoint = ModelCheckpoint(model_file,
monitor='val_mean_squared_error',
verbose=1,
save_best_only=True,
mode='min')
# scheduler = LearningRateScheduler(step_decay)
csv_his_log = os.path.join(model_path, "csv_history.log")
csv_logger = CSVLogger(csv_his_log, append=True)
callbacks_list = [checkpoint, csv_logger]
# training
model.fit(
x=X_train, y=Y_train,
batch_size=batch_size,
epochs=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=callbacks_list)
|
[
"duguyue100@gmail.com"
] |
duguyue100@gmail.com
|
4e5d0707e8f7fb6e64cd93da40d40126021d948b
|
2281d52ccd9a7cb8217653ac3461a80eccdcbe9f
|
/Course_1_Algorithms/Week4_Divide&Conquer/6_closest_points/closest.py
|
41733589c0639db1dee296387c76ee6f59fb5aa2
|
[] |
no_license
|
MadLily/Data-Structures-and-Algorithms
|
fc58242a7337969540df8dedd3d1624027aa8147
|
fd3fb42cd9aa118c2cca291b177ccbd933fd352a
|
refs/heads/master
| 2020-03-24T22:25:28.580023
| 2019-03-23T20:55:53
| 2019-03-23T20:55:53
| 143,084,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
#Uses python3
import sys
import math
def minimum_distance(x, y):
#write your code here
if len(x) <=1:
return 10 ** 18
elif len(x) ==2:
return math.hypot(x[0]-x[1],y[0]-y[1])
x_index = sorted(range(len(x)), key=lambda k: x[k])
left_x = [x[i] for i in x_index[:len(x_index)//2]]
left_y = [y[i] for i in x_index[:len(x_index)//2]]
right_x = [x[i] for i in x_index[len(x_index)//2:]]
right_y = [y[i] for i in x_index[len(x_index)//2:]]
mid_x = left_x[-1]
min_left = minimum_distance(left_x,left_y)
min_right = minimum_distance(right_x,right_y)
d = min(min_left,min_right)
mid_xs = [k for k in x if mid_x-d <= k <= mid_x + d]
if len(mid_xs) <=1:
return d
else:
mid_ys = [ky for (kx,ky) in zip(x,y) if mid_x-d <= kx <= mid_x + d]
y_index = sorted(range(len(mid_ys)), key=lambda k: mid_ys[k])
d_mid = 10 ** 18
for i in range(len(y_index)):
d_temp = 10 ** 18
j = i+1
while j in range(i+1,min(i+7,len(y_index))) and abs(mid_ys[y_index[i]]-mid_ys[y_index[j]])<d:
d_temp = min(math.hypot(mid_xs[y_index[i]]-mid_xs[y_index[j]],mid_ys[y_index[i]]-mid_ys[y_index[j]]),d_temp)
j +=1
d_mid =min(d_mid,d_temp)
return min(d_mid,min_left,min_right)
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
x = data[1::2]
y = data[2::2]
print("{0:.9f}".format(minimum_distance(x, y)))
|
[
"jz2673@columbia.edu"
] |
jz2673@columbia.edu
|
e43acbf5310c6df4b2af938645f83609cc1ff220
|
c0232a6c46fcca05a2e4048ec2128dd960077c75
|
/weather_data_importer.py
|
1ca9ed82c50bbf507f5bed9364e39446f8ef6644
|
[] |
no_license
|
srdicmonika/moni-example-projects
|
7eaa0a9d7cbb72390b8f5f4ec5693c1e0bef85a1
|
9a17ba0d051efb3563b854c47b6875500605d583
|
refs/heads/main
| 2023-07-02T08:51:31.621431
| 2021-07-22T16:34:56
| 2021-07-22T16:34:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,835
|
py
|
import re
import time
import datetime
import math
import sys
import csv
import itertools
# Stations File Column Indexes
STATION_ID = 0
COUNTRY = 8
# Data File Column Indexes
STATION_ID = 0
DATE = 2
TEMPERATURE = 3
# Natural Distaster Data Column Indexes
ND_COUNTRY = 0
ND_START_DATE = 1
ND_END_DATE = 2
ND_TYPE = 3
ND_SUBTYPE = 4
##
start_year = 1980
end_year = 2017
test_limit = -1
# For example, in monthly data:
#
# January 2005 = 2005 + (1 - 0.5) / 12 = 2005.042
# June 2008 = 2008 + (6 - 0.5) / 12 = 2008.458
def convert_date(date):
value = math.modf(float(date))
year = int(value[1])
month = int(round(value[0] * 12 + 0.5))
return (year, month)
def date_range(start, end):
start = re.split('/', start)
start_month = start[1]
start_year = int(start[2])
end = re.split('/', end)
end_month = end[1]
end_year = int(end[2])
if (start_month == ''):
start_month = 1
else:
start_month = int(start_month)
if (end_month == ''):
end_month = 12
else:
end_month = int(end_month)
l = list()
if (end_year > start_year):
month = start_month
year = start_year
while (year < end_year or (year == end_year and month <= end_month)):
if (month > 12):
month = 1
year += 1
l.append((year, month))
month += 1
else:
for month in range(start_month, end_month + 1):
l.append((start_year, month))
return l
def import_stations_from_file(location):
countries = set()
station_countries = dict()
with open(location) as stations:
for line in stations:
# Skip preamble
if (line[0] != '%'):
splitted = re.split(r'\t+', line)
station_countries[int(splitted[STATION_ID])] = splitted[COUNTRY].strip()
countries.add(splitted[COUNTRY].strip())
return (countries, station_countries)
# Load Natural Disaster Data
def import_natural_disaster_data(location):
global test_limit
limit = test_limit
with open(location) as csvfile:
print ("Reading file: " + location)
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
all_countries_natural_disasters = dict()
for line in csvfile:
limit -= 1
splitted = re.split(',' , line)
country = splitted[ND_COUNTRY]
months = date_range(splitted[ND_START_DATE], splitted[ND_END_DATE])
type = splitted[ND_TYPE]
subtype = splitted[ND_SUBTYPE]
try:
all_countries_natural_disasters[country]
except KeyError:
all_countries_natural_disasters[country] = dict()
for month in months:
try:
all_countries_natural_disasters[country][month].append(str(type) + " " + str(subtype))
except KeyError:
all_countries_natural_disasters[country][month] = []
all_countries_natural_disasters[country][month].append(str(type) + " " + str(subtype))
if (limit == 0):
break
return all_countries_natural_disasters
# Load Temperature data (AVG, MIN, MAX) and ....
def import_from_file(location, countries, station_countries):
global test_limit
limit = test_limit
with open(location) as data:
monthly_values_all_stations = dict()
print ("Reading file: " + location)
# Get values as tuples (year,month,value) in a dictionary of countries
for line in data:
limit -= 1
if (line[0] != '%'):
splitted = re.split(r'\t+', line)
country = station_countries[int(splitted[STATION_ID])]
year, month = convert_date(splitted[DATE])
temp = splitted[TEMPERATURE]
if (year >= start_year):
try:
monthly_values_all_stations[country].append((year, month, temp))
except KeyError:
monthly_values_all_stations[country] = [(year,month,temp)]
if (limit == 0):
break
country_monthly_average_values = dict()
# Get average temperature from all stations for each month
print ("Aggregating...")
i = 0
for country in countries:
i += 1
sys.stdout.write('\r' + str(i) + ' out of ' + str(len(countries)) + ' countries')
sys.stdout.flush()
try:
values = monthly_values_all_stations[country]
monthly_values = dict()
# All temperature values from all stations for a month
for value in values:
year = value[0]
month = value[1]
temp = value[2]
try:
monthly_values[(year,month)].append(float(temp))
except KeyError:
monthly_values[(year,month)] = [float(temp)]
for item in monthly_values.items():
year_month_tuple = item[0]
temperature_values = item[1]
try:
country_monthly_average_values[country][year_month_tuple]= sum(temperature_values) / len(temperature_values)
except KeyError:
country_monthly_average_values[country] = dict()
country_monthly_average_values[country][year_month_tuple]= sum(temperature_values) / len(temperature_values)
except KeyError:
pass
print ('\n')
return country_monthly_average_values
ts = time.time()
print ("Start: " + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))
all_countries = set()
countries, station_countries = import_stations_from_file('../tavg/site_detail.txt')
for country in countries:
all_countries.add(country)
country_monthly_average_values = import_from_file('../tavg/data.txt', countries, station_countries)
countries, station_countries = import_stations_from_file('../tmax/site_detail.txt')
for country in countries:
all_countries.add(country)
country_monthly_max_values = import_from_file('../tmax/data.txt', countries, station_countries)
countries, station_countries = import_stations_from_file('../tmin/site_detail.txt')
for country in countries:
all_countries.add(country)
country_monthly_min_values = import_from_file('../tmin/data.txt', countries, station_countries)
country_natural_disasters = import_natural_disaster_data("../natural_disaster.csv")
with open('weather_data.csv', 'w', newline='') as csvfile:
for country in all_countries:
try:
average = country_monthly_average_values[country]
minimum = country_monthly_min_values[country]
maximum = country_monthly_max_values[country]
all_disasters = country_natural_disasters[country]
for year in range(start_year, end_year):
for month in range(1,12 + 1):
try:
monthly_average = average[(year, month)]
except KeyError:
monthly_average = "*"
try:
monthly_max = maximum[(year, month)]
except KeyError:
monthly_max = "*"
try:
monthly_min = minimum[(year, month)]
except KeyError:
monthly_min = "*"
try:
disasters = all_disasters[(year,month)]
disasters = str(list(disasters)).replace('[','').replace(']','')
except KeyError:
disasters = "*"
csvfile.write(country + ";")
csvfile.write(str(year) + ";")
csvfile.write(str(month) + ";")
csvfile.write(str(monthly_average) + ";")
csvfile.write(str(monthly_max) + ";")
csvfile.write(str(monthly_min) + ";")
csvfile.write(disasters + ";")
csvfile.write("\n")
except KeyError:
print("no data found for " + country)
continue
ts = time.time()
print ("Finish: " + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))
|
[
"noreply@github.com"
] |
srdicmonika.noreply@github.com
|
adef156d59f43b57aaf80be71ca72c2847f7c39e
|
e0452a5aacba0b5d2a840dbd4fcd080d076c24b1
|
/postprocessor/zigzag.py
|
252f41becad0c1cfa6dc3ac34c614420227b1f92
|
[
"BSD-3-Clause"
] |
permissive
|
tapeguy/heekscnc
|
12d5d614c7866980204decea5e258369b2346c6a
|
bb245e8c451ff1c92df932b722b88e98976a6b67
|
refs/heads/master
| 2021-01-22T17:04:03.489080
| 2016-05-16T18:03:08
| 2016-05-16T18:03:08
| 14,959,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,589
|
py
|
import area
import math
curve_list_for_zigs = []
rightward_for_zigs = True
sin_angle_for_zigs = 0.0
cos_angle_for_zigs = 1.0
sin_minus_angle_for_zigs = 0.0
cos_minus_angle_for_zigs = 1.0
one_over_units = 1.0
def zigzag(a, stepover, zig_unidirectional, zig_angle):
if a.num_curves() == 0:
return
global rightward_for_zigs
global curve_list_for_zigs
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
global one_over_units
radians_angle = zig_angle * math.pi / 180
sin_angle_for_zigs = math.sin(-radians_angle)
cos_angle_for_zigs = math.cos(-radians_angle)
sin_minus_angle_for_zigs = math.sin(radians_angle)
cos_minus_angle_for_zigs = math.cos(radians_angle)
one_over_units = 1 / area.get_units()
a = rotated_area(a)
b = area.Box()
a.GetBox(b)
x0 = b.MinX() - 1.0
x1 = b.MaxX() + 1.0
height = b.MaxY() - b.MinY()
num_steps = int(height / stepover + 1)
y = b.MinY() + 0.1 * one_over_units
null_point = area.Point(0, 0)
rightward_for_zigs = True
curve_list_for_zigs = []
for i in range(0, num_steps):
y0 = y
y = y + stepover
p0 = area.Point(x0, y0)
p1 = area.Point(x0, y)
p2 = area.Point(x1, y)
p3 = area.Point(x1, y0)
c = area.Curve()
c.append(area.Vertex(0, p0, null_point, 0))
c.append(area.Vertex(0, p1, null_point, 0))
c.append(area.Vertex(0, p2, null_point, 1))
c.append(area.Vertex(0, p3, null_point, 0))
c.append(area.Vertex(0, p0, null_point, 1))
a2 = area.Area()
a2.append(c)
a2.Intersect(a)
make_zig(a2, y0, y, zig_unidirectional)
if zig_unidirectional == False:
rightward_for_zigs = (rightward_for_zigs == False)
reorder_zigs()
return curve_list_for_zigs
def make_zig_curve(curve, y0, y, zig_unidirectional):
if rightward_for_zigs:
curve.Reverse()
# find a high point to start looking from
high_point = None
for vertex in curve.getVertices():
if high_point == None:
high_point = vertex.p
elif vertex.p.y > high_point.y:
# use this as the new high point
high_point = vertex.p
elif math.fabs(vertex.p.y - high_point.y) < 0.002 * one_over_units:
# equal high point
if rightward_for_zigs:
# use the furthest left point
if vertex.p.x < high_point.x:
high_point = vertex.p
else:
# use the furthest right point
if vertex.p.x > high_point.x:
high_point = vertex.p
zig = area.Curve()
high_point_found = False
zig_started = False
zag_found = False
for i in range(0, 2): # process the curve twice because we don't know where it will start
prev_p = None
for vertex in curve.getVertices():
if zag_found: break
if prev_p != None:
if zig_started:
zig.append(unrotated_vertex(vertex))
if math.fabs(vertex.p.y - y) < 0.002 * one_over_units:
zag_found = True
break
elif high_point_found:
if math.fabs(vertex.p.y - y0) < 0.002 * one_over_units:
if zig_started:
zig.append(unrotated_vertex(vertex))
elif math.fabs(prev_p.y - y0) < 0.002 * one_over_units and vertex.type == 0:
zig.append(area.Vertex(0, unrotated_point(prev_p), area.Point(0, 0)))
zig.append(unrotated_vertex(vertex))
zig_started = True
elif vertex.p.x == high_point.x and vertex.p.y == high_point.y:
high_point_found = True
prev_p = vertex.p
if zig_started:
if zig_unidirectional == True:
# remove the last bit of zig
if math.fabs(zig.LastVertex().p.y - y) < 0.002 * one_over_units:
vertices = zig.getVertices()
while len(vertices) > 0:
v = vertices[len(vertices)-1]
if math.fabs(v.p.y - y0) < 0.002 * one_over_units:
break
else:
vertices.pop()
zig = area.Curve()
for v in vertices:
zig.append(v)
curve_list_for_zigs.append(zig)
def make_zig(a, y0, y, zig_unidirectional):
for curve in a.getCurves():
make_zig_curve(curve, y0, y, zig_unidirectional)
reorder_zig_list_list = []
def add_reorder_zig(curve):
global reorder_zig_list_list
# look in existing lists
s = curve.FirstVertex().p
for curve_list in reorder_zig_list_list:
last_curve = curve_list[len(curve_list) - 1]
e = last_curve.LastVertex().p
if math.fabs(s.x - e.x) < 0.002 * one_over_units and math.fabs(s.y - e.y) < 0.002 * one_over_units:
curve_list.append(curve)
return
# else add a new list
curve_list = []
curve_list.append(curve)
reorder_zig_list_list.append(curve_list)
def reorder_zigs():
global curve_list_for_zigs
global reorder_zig_list_list
reorder_zig_list_list = []
for curve in curve_list_for_zigs:
add_reorder_zig(curve)
curve_list_for_zigs = []
for curve_list in reorder_zig_list_list:
for curve in curve_list:
curve_list_for_zigs.append(curve)
def rotated_point(p):
return area.Point(p.x * cos_angle_for_zigs - p.y * sin_angle_for_zigs, p.x * sin_angle_for_zigs + p.y * cos_angle_for_zigs)
def unrotated_point(p):
return area.Point(p.x * cos_minus_angle_for_zigs - p.y * sin_minus_angle_for_zigs, p.x * sin_minus_angle_for_zigs + p.y * cos_minus_angle_for_zigs)
def rotated_vertex(v):
if v.type:
return area.Vertex(v.type, rotated_point(v.p), rotated_point(v.c))
return area.Vertex(v.type, rotated_point(v.p), area.Point(0, 0))
def unrotated_vertex(v):
if v.type:
return area.Vertex(v.type, unrotated_point(v.p), unrotated_point(v.c))
return area.Vertex(v.type, unrotated_point(v.p), area.Point(0, 0))
def rotated_area(a):
an = area.Area()
for curve in a.getCurves():
curve_new = area.Curve()
for v in curve.getVertices():
curve_new.append(rotated_vertex(v))
an.append(curve_new)
return an
|
[
"benjamin.shern@gmail.com"
] |
benjamin.shern@gmail.com
|
29417592e974bfeb3e7b88da3f6fcdc3e7ad00ab
|
9af65e8d462e9aa564d3a7770d5b45903dc90ff5
|
/2019/python/test_day4.py
|
2f12f83bf2830b3b7d6ffa575b542b3b4b4ea560
|
[] |
no_license
|
kohrongying/aoc
|
ae3e6a67e4a72573231355fcb3d058b175fc53df
|
3d47ab85402ac14e337d304cff909139967cb2c3
|
refs/heads/master
| 2020-09-24T04:01:28.938880
| 2019-12-17T06:40:39
| 2019-12-17T06:40:39
| 225,657,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
from unittest import TestCase, main
from day4 import is_increasing, has_adjacent, custom_rle
class TestJoke(TestCase):
def test_increasing(self):
self.assertEqual(is_increasing(123), True)
self.assertEqual(is_increasing(1223), True)
self.assertEqual(is_increasing(123123), False)
def test_adjacent(self):
self.assertEqual(has_adjacent(123), False)
self.assertEqual(has_adjacent(1223), True)
self.assertEqual(has_adjacent(121123), True)
def test_rle(self):
self.assertEqual(custom_rle(112233), [2, 2, 2])
self.assertEqual(custom_rle(113334), [2, 3, 1])
if __name__ == '__main__':
main()
|
[
"kohrongying@gmail.com"
] |
kohrongying@gmail.com
|
3ba3fc6b740595d6a0d0994b97374f1c09c62461
|
191c6ae5b450bf2e3f9d45fb79dc1b053f05d14f
|
/courses/migrations/0004_answer.py
|
03ab52fcc960a4f6ee3bbf5b64c1931eba5a057d
|
[] |
no_license
|
Kennedy-Njeri/Django-Quiz
|
ef45fc259c5b9591ec366d02268c49ec46e9608c
|
581e92381cb857f4768e03c0409c3fa7b3dbce9a
|
refs/heads/master
| 2020-07-15T09:21:01.386763
| 2019-09-04T14:39:50
| 2019-09-04T14:39:50
| 205,529,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
# Generated by Django 2.2.4 on 2019-08-31 18:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_auto_20190831_1811'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(default=0)),
('text', models.CharField(max_length=255)),
('correct', models.BooleanField(default=False)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Question')),
],
options={
'ordering': ['order'],
},
),
]
|
[
"mistakenz123@gmail.com"
] |
mistakenz123@gmail.com
|
49637073daf4756247f9b050cd704c69053e1881
|
649db74450df0cd59a862aae7041a5907d7df7cf
|
/Python/snake.py
|
f2ce6ee8410f07bfa07a07e794220c8f86cf9772
|
[] |
no_license
|
antoniochandra/26415171
|
53d65572a402890f44c0f090fba0e77c3c35e633
|
39e08a9b177c2e06533e2910629ae95361579b25
|
refs/heads/master
| 2020-07-13T12:52:02.529978
| 2016-12-04T12:58:40
| 2016-12-04T12:58:40
| 67,911,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,803
|
py
|
# SNAKES GAME
# Use ARROW KEYS to play, SPACE BAR for pausing/resuming and Esc Key for exiting
import curses
import os
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
from random import randint
curses.initscr()
win = curses.newwin(20, 60, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
key = KEY_RIGHT # Initializing values
score = 0
time = 0
snake = [[4,10], [4,9], [4,8]] # Initial snake co-ordinates
food = [randint(1,18),randint(1,58)] # First food co-ordinates
tembok0=[randint(1,18),randint(1,58)]
tembok1=[randint(1,18),randint(1,58)]
tembok2=[randint(1,18),randint(1,58)]
tembok3=[randint(1,18),randint(1,58)]
tembok4=[randint(1,18),randint(1,58)]
win.addch(food[0], food[1], '*') # Prints the food
win.addch(tembok0[0],tembok0[1],'#')
win.addch(tembok1[0],tembok1[1],'#')
win.addch(tembok2[0],tembok2[1],'#')
win.addch(tembok3[0],tembok3[1],'#')
win.addch(tembok4[0],tembok4[1],'#')
while key != 27: # While Esc key is not pressed
time = time + 1
win.border(0)
win.addstr(0, 2, 'Score : ' + str(score) + ' ') # Printing 'Score' and
win.addstr(0, 20, ' GAME SNAKE ') # 'SNAKE' strings
win.timeout(150 - (len(snake)/5 + len(snake)/10)%120) # Increases the speed of Snake as its length increases
if snake[0] == tembok0 or snake[0]==tembok1 or snake[0]==tembok2 or snake[0]==tembok3 or snake[0]==tembok4:break
prevKey = key # Previous key pressed
event = win.getch()
key = key if event == -1 else event
if key == ord(' '): # If SPACE BAR is pressed, wait for another
key = -1 # one (Pause/Resume)
while key != ord(' '):
key = win.getch()
key = prevKey
continue
if key not in [KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN, 27]: # If an invalid key is pressed
key = prevKey
# Calculates the new coordinates of the head of the snake. NOTE: len(snake) increases.
# This is taken care of later at [1].
snake.insert(0, [snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1), snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1)])
# If snake crosses the boundaries, make it enter from the other side
if snake[0][0] == 0: snake[0][0] = 18
if snake[0][1] == 0: snake[0][1] = 58
if snake[0][0] == 19: snake[0][0] = 1
if snake[0][1] == 59: snake[0][1] = 1
#enemy
# Exit if snake crosses the boundaries (Uncomment to enable)
#if snake[0][0] == 0 or snake[0][0] == 19 or snake[0][1] == 0 or snake[0][1] == 59: break
# If snake runs over itself
if snake[0] in snake[1:]: break
if food==tembok0 or food==tembok1 or food==tembok2 or food==tembok3 or food==tembok4 :
food = [randint(1,18),randint(1,58)]
win.addch(food[0],food[1],'*')
if snake[0] == food: # When snake eats the food
food = []
score += 1
while food == []:
food = [randint(1, 18), randint(1, 58)] # Calculating next food's coordinates
if food in snake: food = []
win.addch(food[0], food[1], '*')
else:
last = snake.pop() # [1] If it does not eat the food, length decreases
win.addch(last[0], last[1], ' ')
win.addch(snake[0][0], snake[0][1], 'O')
curses.endwin()
print("\nScore - " + str(score))
|
[
"m26415171@john.petra.ac.id"
] |
m26415171@john.petra.ac.id
|
f4686b30cfc930e86244665f24493a57d6e99eaf
|
ea447427e8d43f58b69fc74b315935ddfb0e8cda
|
/OpenMarketScrapper/venv/Scripts/tkconch-script.py
|
b6f32eaad71e31b04aac853f06709fed90d7d8dd
|
[] |
no_license
|
nearly-0/OpenMarketScrapper
|
072e7a6a35904dd0d3b0dafc41ca753c55c4a839
|
6cf58fe14c6ebfd2258befb84e734238b0e23fa7
|
refs/heads/master
| 2021-10-10T18:18:32.921888
| 2019-01-15T05:47:25
| 2019-01-15T05:47:25
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 463
|
py
|
#!C:\Users\±èÁØ¿µ\Documents\GitHub\ProductScrapper\ProductScrapper\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','tkconch'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'tkconch')()
)
|
[
"skywhale616@gmail.com"
] |
skywhale616@gmail.com
|
a9bd31ce5254b977d52de49fc6a136c352b86f5e
|
afbe0e30e3572257c05b31f5c9b7fea747789e60
|
/src/com/jalasoft/ShoppingCart/controller/controller.py
|
a2e47964af3dc7c7ae55abf7d888c5fb5ceb33a9
|
[] |
no_license
|
magnethus/Test-AL
|
f9d6864595114124406e11fe94af0d176a822f88
|
3189a27f1eac6a44890e975a1f5c7073d87456ee
|
refs/heads/master
| 2020-04-25T16:42:31.670774
| 2019-04-09T14:42:12
| 2019-04-09T14:42:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import QTableWidgetItem, QLineEdit
from src.com.jalasoft.ShoppingCart.model.product import Product
from src.com.jalasoft.ShoppingCart.view.product_insert_view import ProductInsertView
from src.com.jalasoft.ShoppingCart.view.product_show_view import ProductShowView
class Controller:
def __init__(self, mainView, cartModel):
# mainView.initUI()
self.mainView = mainView
self.cartModel = cartModel
self.mainView.initUI(self)
self.cartList = []
def addActionListener(self):
self.centralWidget = self.mainView.centralWidget()
if isinstance(self.centralWidget, ProductInsertView):
self.centralWidget.getSaveProductButton().clicked.connect(lambda: self.saveProduct())
if isinstance(self.centralWidget, ProductShowView):
self.centralWidget.getAddTocartButton().clicked.connect(lambda: self.addToCart())
def saveProduct(self):
self.centralWidget = self.mainView.centralWidget()
product_name = self.centralWidget.getProductName()
description = self.centralWidget.getProductDescription()
price = self.centralWidget.getPrice()
stock = self.centralWidget.getProductStock()
category_id = self.centralWidget.getProductCategory()
prod = Product()
prod.setProductName(product_name)
prod.setProductDescription(description)
prod.setProductPrice(price)
prod.setProductStock(stock)
prod.setProductCategory(category_id)
self.cartModel.saveProduct(prod)
def addToCart(self):
indexes = self.centralWidget.getTable().selectionModel().selectedIndexes()
id = indexes[0].sibling(indexes[0].row(),indexes[0].column()).data();
product_name = indexes[1].sibling(indexes[1].row(), indexes[1].column()).data();
description = indexes[2].sibling(indexes[2].row(), indexes[2].column()).data();
price = indexes[3].sibling(indexes[3].row(), indexes[3].column()).data();
stock = indexes[3].sibling(indexes[3].row(), indexes[3].column()).data();
category_id = indexes[3].sibling(indexes[3].row(), indexes[3].column()).data();
#create product and add to cart
pro = Product()
pro.setProductId(id)
pro.setProductName(product_name)
pro.setProductDescription(description)
pro.setProductPrice(price)
pro.setProductPrice(stock)
pro.setProductPrice(category_id)
self.cartList.append(pro)
self.loadCartTable()
def loadCartTable(self):
listSize = len(self.cartList)
self.centralWidget.getCartTable().setRowCount(listSize)
index = 0
for prod in self.cartList:
quantity = QLineEdit()
regex = QRegExp("[0-9_]+")
validator = QRegExpValidator(regex)
quantity.setValidator(validator)
self.centralWidget.getCartTable().setItem(index, 0, QTableWidgetItem(str(prod.getProductId())))
self.centralWidget.getCartTable().setItem(index, 1, QTableWidgetItem(prod.getProductName()))
self.centralWidget.getCartTable().setItem(index, 2, QTableWidgetItem(prod.getProductDescription()))
self.centralWidget.getCartTable().setItem(index, 3, QTableWidgetItem(str(prod.getProductPrice())))
self.centralWidget.getCartTable().setCellWidget(index, 4, quantity)
index = index + 1
|
[
"Alex.Alcala@jalasoft.com"
] |
Alex.Alcala@jalasoft.com
|
18b5385a281a808905e089fee99a4dcfcb07f437
|
8b2d385535eeb29d14d2f9a8ba643b576b59713d
|
/objects/Car.py
|
da2ce19a0f019af640ea728293eb10adf329dd6b
|
[] |
no_license
|
asdail/course
|
39571a928f1c6efa3ed17207d41c12baa16a376f
|
9786f4818cadb5a6fa85384221b9819de75d5146
|
refs/heads/master
| 2023-01-14T12:08:28.166959
| 2020-11-24T13:26:06
| 2020-11-24T13:26:06
| 314,238,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
class Car:
def beep(self):
print("I am a new car!", self.id, self.type, self.color, self.km, self.feul, self.tank)
def add_feul(self,feul):
self.feul+=feul
if self.feul>self.tank:
self.feul=self.tank
def need_feul(self):
if self.feul<5:
return True
|
[
"amitjonas1@gmail.com"
] |
amitjonas1@gmail.com
|
1d54b8861d49fbfe8a7fa56f56c7a3cae0178bec
|
3183f9826316dcafaaa202ba6af1927d0cd6b7b0
|
/advent-of-code-2020/12/main.py
|
48a8f0ceb4f22851170b8499ffef956f8bd24cbe
|
[] |
no_license
|
jensecj/code-challenges
|
26a877376c6ff76f710af4dbb33a37b3f908fa84
|
d5b577d246b660a8cdbca6be877b11438f0d890b
|
refs/heads/master
| 2022-12-11T08:11:34.468236
| 2022-12-07T20:27:59
| 2022-12-07T20:27:59
| 120,000,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,145
|
py
|
# tags: simulation, complex numbers
from typing import List, Tuple
from compytetive.algorithms import manhatten_dist
from compytetive.util import benchmark
Point = Tuple[int, int]
Command = Tuple[str, int]
Program = List[Command]
Direction = str
DIRECTIONS = ["N", "E", "S", "W"]
def read_input(filename):
with open(filename) as f:
data = f.readlines()
data = [(s[0], s[1:].strip()) for s in data]
data = [(a, int(b)) for a, b in data]
return data
def translate_move(heading: Direction, direction: Direction, value: int) -> Point:
assert heading in DIRECTIONS
assert direction in DIRECTIONS + ["F"]
if direction == "F":
direction = heading
if direction == "N":
return (0, value)
if direction == "E":
return (value, 0)
if direction == "S":
return (0, -value)
if direction == "W":
return (-value, 0)
raise Exception("Unable to translate move")
assert translate_move("N", "F", 1) == translate_move("N", "N", 1) == (0, 1)
assert translate_move("N", "E", 1) == translate_move("E", "F", 1) == (1, 0)
assert translate_move("N", "S", 1) == (0, -1)
assert translate_move("N", "W", 1) == (-1, 0)
def change_direction(heading: Direction, direction: Direction, value) -> Direction:
assert heading in DIRECTIONS
assert direction in ["L", "R"]
assert value % 90 == 0
idx = DIRECTIONS.index(heading)
turn = value // 90
if direction == "L":
turn = -turn
return DIRECTIONS[(idx + turn) % len(DIRECTIONS)]
assert change_direction("N", "L", 90) == "W"
assert change_direction("N", "L", 180) == "S"
assert change_direction("W", "L", 90) == "S"
assert change_direction("S", "L", 90) == "E"
assert change_direction("E", "L", 90) == "N"
assert change_direction("N", "R", 90) == "E"
assert change_direction("N", "R", 360) == "N"
assert change_direction("N", "R", 180) == "S"
assert change_direction("E", "R", 90) == "S"
assert change_direction("S", "R", 90) == "W"
assert change_direction("W", "R", 90) == "N"
def part1(program):
direction = "E"
x, y = (0, 0)
for cmd, value in program:
if cmd in ["L", "R"]:
direction = change_direction(direction, cmd, value)
elif cmd in ["F", "N", "E", "S", "W", "B"]:
nx, ny = translate_move(direction, cmd, value)
x, y = x + nx, y + ny
return manhatten_dist((0, 0), (x, y))
def rotate_point(point: Point, direction: Direction, value: int) -> Point:
x, y = point
if direction in "L":
for _ in range(value // 90):
x, y = -y, x
elif direction == "R":
for _ in range(value // 90):
x, y = y, -x
return x, y
assert (
rotate_point((10, 4), "R", 0)
== rotate_point((10, 4), "L", 0)
== rotate_point((10, 4), "R", 360)
== rotate_point((10, 4), "L", 360)
== (10, 4)
)
assert rotate_point((10, 4), "R", 90) == rotate_point((10, 4), "L", 270) == (4, -10)
assert rotate_point((10, 4), "R", 180) == rotate_point((10, 4), "L", 180) == (-10, -4)
assert rotate_point((10, 4), "R", 270) == rotate_point((10, 4), "L", 90) == (-4, 10)
def part2(program):
x, y = (0, 0)
wx, wy = (10, 1)
direction = "E" # never changes in part2
for cmd, value in program:
if cmd in ["L", "R"]:
wx, wy = rotate_point((wx, wy), cmd, value)
elif cmd in DIRECTIONS:
nx, ny = translate_move(direction, cmd, value)
wx, wy = wx + nx, wy + ny
elif cmd == "F":
x, y = x + value * wx, y + value * wy
return manhatten_dist((0, 0), (x, y))
def part2_complex(data):
"""
The problem can also be solved using real numbers.
A complex number multiplied with 1j (a complex number with an imaginary unit of 1),
corresponds to a counter-clockwise rotation of 90°,
a multiplication of -1j corresponds to a clockwise rotation of 90°.
"""
ship = complex(0, 0)
waypoint = complex(10, 1)
for cmd, value in data:
if cmd == "N":
waypoint += complex(0, value)
elif cmd == "S":
waypoint += complex(0, -value)
elif cmd == "E":
waypoint += complex(value, 0)
elif cmd == "W":
waypoint += complex(-value, 0)
elif cmd == "F":
ship += waypoint * value
elif cmd == "R":
turns = value // 90
rotation = complex(0, -1) ** turns
waypoint *= rotation
elif cmd == "L":
turns = value // 90
rotation = complex(0, 1) ** turns
waypoint *= rotation
return manhatten_dist((0, 0), (ship.real, ship.imag))
def main():
data = read_input("input.in")
print(benchmark(part1)(data))
print(benchmark(part2)(data))
print(benchmark(part2_complex)(data))
if __name__ == "__main__":
main()
test1_input = read_input("test1.in")
assert part1(test1_input) == 25
assert part2(test1_input) == part2_complex(test1_input) == 286
real_input = read_input("input.in")
assert part1(real_input) == 1294
assert part2(real_input) == part2_complex(real_input) == 20592
|
[
"jens@subst.net"
] |
jens@subst.net
|
b00e38ba0f949506a9f7dbd0726fd6f32fd254d8
|
9229cdfbca4c4d56da94c6a8e0848185f9eb36bc
|
/client.py
|
c520126fe5d568fc24ce936d64aa48651d5f729e
|
[] |
no_license
|
huuhoa020899/NeuralNetwork
|
3028eb0b47d5212044b2d54c51907d976c6621b1
|
3ad14e4b67ea9b81d383adfb817b25c9625b2889
|
refs/heads/master
| 2022-07-31T20:51:37.497795
| 2020-05-26T13:03:16
| 2020-05-26T13:03:16
| 264,621,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
import time, socket, sys
print('Client Server...')
time.sleep(1)
#Get the hostname, IP Address from socket and set Port
soc = socket.socket()
shost = socket.gethostname()
ip = socket.gethostbyname(shost)
#get information to connect with the server
print(shost, '({})'.format(ip))
server_host = input('Enter server\'s IP address:')
name = input('Enter Client\'s name: ')
port = 1234
print('Trying to connect to the server: {}, ({})'.format(server_host, port))
time.sleep(1)
while True:
try:
soc.connect((server_host, port))
print("Connected...\n")
soc.send(name.encode())
server_name = soc.recv(1024)
server_name = server_name.decode()
print('{} has joined...'.format(server_name))
print('Enter [bye] to exit.')
while True:
message = soc.recv(1024)
message = message.decode()
print(server_name, ">", message)
message = input(str("Me > "))
if message == "bye":
message = "Leaving the Chat room"
soc.send(message.encode())
print("\n")
break
soc.send(message.encode())
except:
print("ERROR IP:")
server_host = input('Enter server\'s IP address:')
|
[
"=phamhuuhoa020899@gmail.com"
] |
=phamhuuhoa020899@gmail.com
|
2ecfed614c5914948dd73bcc3a7796c56670047a
|
a309c3b042c4234410c2784996e6d7766c7d7c2e
|
/backend/rxrelease/rxbackend/rxdod/urls.py
|
75dbe7d1d3853419c4bd19cf2803ac684e7bddce
|
[] |
no_license
|
PatrickEkkel/RxRelease
|
aaa9915813541ecf0284dacee797e7f5a6a52bdd
|
7936fe00dace56a962cfc4f14a254e5f40c00325
|
refs/heads/develop
| 2021-07-15T05:26:20.225891
| 2021-06-21T22:49:27
| 2021-06-21T22:49:27
| 88,879,337
| 1
| 0
| null | 2020-08-31T22:59:18
| 2017-04-20T15:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import demonondemanduserviews
urlpatterns = [
url(r'^users/$', demonondemanduserviews.CreateView.as_view(), name="create"),
url(r'^users/(?P<pk>[0-9]+)/$', demonondemanduserviews.DetailsView.as_view(), name="details"),
url(r'^users/vminfo/(?P<pk>[0-9]+)/$', demonondemanduserviews.DemoUserEnvironmentView.as_view(), name="vminfo"),
url(r'^users/vmstatus/(?P<pk>[0-9]+)/$', demonondemanduserviews.DemoUserEnvironmentView.as_view(), name="vmstatus"),
url(r'^users/vmreset/(?P<pk>[0-9]+)/$', demonondemanduserviews.PowerCycleVM.as_view(), name="vmreset"),
url(r'^users/createdemovm/(?P<pk>[0-9]+)/$', demonondemanduserviews.CreateHost.as_view(), name="createdemovm"),
]
|
[
"patrick.ekkel@topicus.nl"
] |
patrick.ekkel@topicus.nl
|
deaaa90cbb8570dcb3502fc256181ae4b0976e2d
|
5a2b87bdd44c0aef387fb3b6ec8ad17f018db13a
|
/accounts/views.py
|
e34ffbb84f74e8d2e18ae9465d4d5ac81402ac71
|
[] |
no_license
|
yasser-aboelgheit/Notification_system
|
f2da5e49b4c95f56aed6c334a38a14b6ad91dccf
|
3547ce47711b4e9463477b5c41b9ea7c8b7fae82
|
refs/heads/main
| 2023-03-03T02:36:23.180744
| 2021-02-07T13:42:14
| 2021-02-07T13:42:14
| 334,776,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
from rest_framework import generics
from .models import Passenger
from .serializers import PassengerSerializer, PassengerRegisterSerializer
from rest_framework.response import Response
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from .celery_tasks import send_sms_on_register
import logging
logger = logging.getLogger('notify_logger')
class PassengerAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class SelfRegisterViewSet(generics.CreateAPIView):
serializer_class = PassengerRegisterSerializer
def create(self, *args, **kwargs):
"""
creates new user and new passenger with active=True
"""
request_body = self.request.data
serializer = PassengerRegisterSerializer(data=request_body)
if not serializer.is_valid():
return Response(serializer.errors, 400)
user = User.objects.create(
username=serializer.validated_data["username"], email=serializer.validated_data["username"])
user.set_password(request_body["password"])
user.save()
passengerProfile = Passenger.objects.create(user=user,
name=serializer.validated_data["name"],
username=serializer.validated_data["username"],
phone_number=serializer.validated_data["phone_number"],
home_address=serializer.validated_data["home_address"],
work_address=serializer.validated_data["work_address"],
notification_langauge=serializer.validated_data[
"notification_langauge"],
active=True,
)
send_sms_on_register.s(
passengerProfile.name, passengerProfile.phone_number).apply_async(queue="tasks")
return Response({"message": "Account registration successful"}, status=201)
|
[
"yasseraboelgheit@weaccept.co"
] |
yasseraboelgheit@weaccept.co
|
ebcb7b289614406cdaaf86fa2544d9d183c092e8
|
55a09afeb6335997f6c7daefb5007b3277a07384
|
/pyhanko/pdf_utils/font.py
|
9a55953c591dab7c8daf963ceb4bbbd53e74ad36
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ooguz/pyHanko
|
c3d9acdc3b81eaea64c9cc57b954989c440efc57
|
f1490d9ecaa74ce600e464a27807290d486acc6f
|
refs/heads/master
| 2023-03-05T07:57:14.894643
| 2021-02-14T11:01:16
| 2021-02-14T11:01:16
| 320,052,372
| 0
| 0
|
MIT
| 2021-02-15T17:07:17
| 2020-12-09T19:01:08
| null |
UTF-8
|
Python
| false
| false
| 15,930
|
py
|
"""Basic support for font handling & subsetting.
This module relies on `fontTools <https://pypi.org/project/fonttools/>`_ for
OTF parsing and subsetting.
.. warning ::
If/when support is added for more advanced typographical features, the
general :class:`FontEngine` interface might change.
"""
import logging
from dataclasses import dataclass
from io import BytesIO
from pyhanko.pdf_utils import generic
from fontTools import ttLib, subset
from pyhanko.pdf_utils.misc import peek
__all__ = [
'FontEngine', 'SimpleFontEngine', 'GlyphAccumulator',
'GlyphAccumulatorFactory'
]
from pyhanko.pdf_utils.writer import BasePdfFileWriter
logger = logging.getLogger(__name__)
pdf_name = generic.NameObject
pdf_string = generic.pdf_string
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def generate_subset_prefix():
import random
return ''.join(ALPHABET[random.randint(0, 25)] for _ in range(6))
class FontEngine:
"""General interface for glyph lookups and font metrics."""
def measure(self, txt: str) -> float:
"""Measure the length of a string in em units.
:param txt:
String to measure.
:return:
A length in em units.
"""
raise NotImplementedError
# FIXME this should probably return bytes
def render(self, txt: str):
"""Render a string to a format suitable for inclusion in a content
stream.
:param txt:
String to render.
:return:
A string.
"""
raise NotImplementedError
def as_resource(self) -> generic.DictionaryObject:
"""Convert a :class:`.FontEngine` to a PDF object suitable for embedding
inside a resource dictionary.
:return:
A PDF dictionary.
"""
raise NotImplementedError
# FIXME replace with something that knows the metrics for the standard PDF fonts
class SimpleFontEngine(FontEngine):
"""
Simplistic font engine that only works with PDF standard fonts, and
does not care about font metrics. Best used with monospaced fonts such
as Courier.
"""
@staticmethod
def default_engine():
"""
:return:
A :class:`.FontEngine` instance representing the Courier
standard font.
"""
return SimpleFontEngine('Courier', 0.6)
def __init__(self, name, avg_width):
self.avg_width = avg_width
self.name = name
def render(self, txt):
return f'({txt})'
def measure(self, txt):
return len(txt) * self.avg_width
def as_resource(self):
# assume that self.font is the name of a PDF standard font
# TODO enforce that
font_dict = generic.DictionaryObject({
pdf_name('/Type'): pdf_name('/Font'),
pdf_name('/BaseFont'): pdf_name('/' + self.name),
pdf_name('/Subtype'): pdf_name('/Type1'),
pdf_name('/Encoding'): pdf_name('/WinAnsiEncoding')
})
return font_dict
class GlyphAccumulator(FontEngine):
"""
Utility to collect & measure glyphs from TrueType fonts.
.. warning::
This utility class ignores all positioning & substition information
in the font file, other than glyph width/height.
In particular, features such as kerning, ligatures, complex script
support and regional substitution will not work out of the box.
.. warning::
This functionality was only really tested with CID-keyed fonts
that have a CFF table. This is good enough to offer basic support
for CJK scripts, but as I am not an OTF expert, more testing is
necessary.
"""
def __init__(self, tt: ttLib.TTFont):
self.tt = tt
self.cmap = tt.getBestCmap()
self.glyph_set = self.tt.getGlyphSet(preferCFF=True)
self._glyphs = {}
self._font_ref = None
try:
self.units_per_em = tt['head'].unitsPerEm
except KeyError:
self.units_per_em = 1000
def _encode_char(self, ch):
try:
(cid, gid, glyph) = self._glyphs[ch]
except KeyError:
# NOTE: the glyph id as reported by getGlyphID is NOT what we want
# to encode in the string. In some fonts (I've seen this in a couple
# full CJK fonts), this happens to be the same as the CID of the
# glyph but not always.
# I'm not sure what the "officially sanctioned" way to do this in
# fontTools is, but we can derive the CID from the generated name
# of the glyph, which is of the form cidXXXXX
# We do want to save the glyph ID to pass it to the subsetter later.
# FIXME This obviously breaks with string-keyed fonts. How to deal
# with those?
try:
glyph_name = self.cmap[ord(ch)]
glyph = self.glyph_set[glyph_name]
gid = self.tt.getGlyphID(glyph_name)
try:
cid = int(glyph_name[3:])
except ValueError:
raise NotImplementedError(
f"Could not figure out CID for glyph with name "
f"{glyph_name}."
)
except KeyError:
glyph = self.glyph_set['.notdef']
gid = self.tt.getGlyphID('.notdef')
cid = 0
self._glyphs[ch] = (cid, gid, glyph)
return cid, glyph.width
def feed_string(self, txt):
"""
Feed a string to this glyph accumulator.
:param txt:
String to encode/measure.
The glyphs used to render the string are marked for inclusion in the
font subset associated with this glyph accumulator.
:return:
Returns the CID-encoded version of the string passed in, and
an estimate of the width in em units.
The width computation ignores kerning, but takes the width of all
characters into account.
"""
total_width = 0
def _gen():
nonlocal total_width
for ch in txt:
cid, width = self._encode_char(ch)
# ignore kerning
total_width += width
yield '%04x' % cid
hex_encoded = ''.join(_gen())
return hex_encoded, total_width / self.units_per_em
def render(self, txt):
hex_encoded, _ = self.feed_string(txt)
return f'<{hex_encoded}>'
def measure(self, txt):
return self.feed_string(txt)[1]
def _extract_subset(self, options=None):
options = options or subset.Options()
subsetter: subset.Subsetter = subset.Subsetter(options=options)
gids = map(lambda x: x[1], self._glyphs.values())
subsetter.populate(gids=list(gids))
subsetter.subset(self.tt)
def embed_subset(self, writer: BasePdfFileWriter, obj_stream=None):
"""
Embed a subset of this glyph accumulator's font into the provided PDF
writer. Said subset will include all glyphs necessary to render the
strings provided to the accumulator via :meth:`feed_string`.
.. danger::
Due to the way ``fontTools`` handles subsetting, this is a
destructive operation. The in-memory representation of the original
font will be overwritten by the generated subset.
:param writer:
A PDF writer.
:param obj_stream:
If provided, write all relevant objects to the provided
`obj_stream`. If ``None`` (the default), they will simply be written
to the file as top-level objects.
:return:
A reference to the embedded ``/Font`` object.
"""
if self._font_ref is not None:
return self._font_ref
self._extract_subset()
cidfont_obj = CIDFontType0(self.tt)
# TODO keep track of used subset prefixes in the writer!
cff_topdict = self.tt['CFF '].cff[0]
name = cidfont_obj.name
cff_topdict.rawDict['FullName'] = '%s+%s' % (
generate_subset_prefix(), name
)
cidfont_obj.embed(writer, obj_stream=obj_stream)
cidfont_ref = writer.add_object(cidfont_obj)
to_unicode = self._format_tounicode_cmap(*cidfont_obj.ros)
type0 = generic.DictionaryObject({
pdf_name('/Type'): pdf_name('/Font'),
pdf_name('/Subtype'): pdf_name('/Type0'),
pdf_name('/DescendantFonts'): generic.ArrayObject([cidfont_ref]),
# take the Identity-H encoding to inherit from the /Encoding
# entry specified in our CIDSystemInfo dict
pdf_name('/Encoding'): pdf_name('/Identity-H'),
pdf_name('/BaseFont'):
pdf_name('/%s-Identity-H' % cidfont_obj.name),
pdf_name('/ToUnicode'): writer.add_object(to_unicode)
})
to_unicode.compress()
# compute widths entry
# (easiest to do here, since it seems we need the original CIDs)
by_cid = iter(sorted(self._glyphs.values(), key=lambda t: t[0]))
def _widths():
current_chunk = []
prev_cid = None
(first_cid, _, _), itr = peek(by_cid)
for cid, _, g in itr:
if current_chunk and cid != prev_cid + 1:
yield generic.NumberObject(first_cid)
yield generic.ArrayObject(current_chunk)
current_chunk = []
first_cid = cid
current_chunk.append(generic.NumberObject(g.width))
prev_cid = cid
if current_chunk:
yield generic.NumberObject(first_cid)
yield generic.ArrayObject(current_chunk)
cidfont_obj[pdf_name('/W')] = generic.ArrayObject(list(_widths()))
self._font_ref = ref = writer.add_object(type0, obj_stream=obj_stream)
return ref
def as_resource(self):
if self._font_ref is not None:
return self._font_ref
else:
raise ValueError
def _format_tounicode_cmap(self, registry, ordering, supplement):
def _pairs():
for ch, (cid, _, _) in self._glyphs.items():
yield cid, ch
by_cid = iter(sorted(_pairs(), key=lambda t: t[0]))
header = (
'/CIDInit /ProcSet findresource begin\n'
'12 dict begin\n'
'begincmap\n'
'/CIDSystemInfo 3 dict dup begin\n'
f'/Registry ({registry}) def\n'
f'/Ordering ({ordering}) def\n'
f'/Supplement {supplement}\n def'
'end def\n'
f'/CMapName {registry}-{ordering}-{supplement:03} def\n'
'/CMapType 2 def\n'
'1 begincodespacerange\n'
'<0000> <FFFF>\n'
'endcodespacerange\n'
)
# TODO make an effort to use ranges when appropriate, and at least
# group the glyphs
body = '\n'.join(
f'1 beginbfchar\n<{cid:04x}> <{ord(ch):04x}>\nendbfchar\n'
for cid, ch in by_cid
)
footer = (
'endcmap\n'
'CMapName currentdict /CMap\n'
'defineresource pop\n'
'end\nend'
)
stream = generic.StreamObject(
stream_data=(header + body + footer).encode('ascii')
)
return stream
class CIDFont(generic.DictionaryObject):
def __init__(self, tt: ttLib.TTFont, ps_name, subtype, registry,
ordering, supplement):
self.tt = tt
self.name = ps_name
self.ros = registry, ordering, supplement
super().__init__({
pdf_name('/Type'): pdf_name('/Font'),
pdf_name('/Subtype'): pdf_name(subtype),
pdf_name('/CIDSystemInfo'): generic.DictionaryObject({
pdf_name('/Registry'): pdf_string(registry),
pdf_name('/Ordering'): pdf_string(ordering),
pdf_name('/Supplement'): generic.NumberObject(supplement)
}),
pdf_name('/BaseFont'): pdf_name('/' + ps_name)
})
self._font_descriptor = FontDescriptor(self)
def embed(self, writer: BasePdfFileWriter, obj_stream=None):
fd = self._font_descriptor
self[pdf_name('/FontDescriptor')] = fd_ref = writer.add_object(
fd, obj_stream=obj_stream
)
font_stream_ref = self.set_font_file(writer)
return fd_ref, font_stream_ref
def set_font_file(self, writer: BasePdfFileWriter):
raise NotImplementedError
# TODO support type 2 fonts (i.e. with 'glyf' instead of 'CFF ')
class CIDFontType0(CIDFont):
def __init__(self, tt: ttLib.TTFont):
# We assume that this font set (in the CFF sense) contains
# only one font. This is fairly safe according to the fontTools docs.
self.cff = cff = tt['CFF '].cff
td = cff[0]
ps_name = td.rawDict['FullName'].replace(' ', '')
try:
registry, ordering, supplement = td.ROS
except (AttributeError, ValueError):
# XXX If these attributes aren't present, chances are that the
# font won't work regardless.
logger.warning("No ROS metadata. Is this really a CIDFont?")
registry = "Adobe"
ordering = "Identity"
supplement = 0
super().__init__(
tt, ps_name, '/CIDFontType0', registry, ordering, supplement
)
def set_font_file(self, writer: BasePdfFileWriter):
stream_buf = BytesIO()
# write the CFF table to the stream
self.cff.compile(stream_buf, self.tt)
stream_buf.seek(0)
font_stream = generic.StreamObject({
# this is a Type0 CFF font program (see Table 126 in ISO 32000)
pdf_name('/Subtype'): pdf_name('/CIDFontType0C'),
}, stream_data=stream_buf.read())
font_stream.compress()
font_stream_ref = writer.add_object(font_stream)
self._font_descriptor[pdf_name('/FontFile3')] = font_stream_ref
return font_stream_ref
class FontDescriptor(generic.DictionaryObject):
"""
Lazy way to embed a font descriptor. It assumes all sorts of metadata
to be present. If not, it'll probably fail with a gnarly error.
"""
def __init__(self, cf: CIDFont):
tt = cf.tt
# Some metrics
hhea = tt['hhea']
head = tt['head']
bbox = [head.xMin, head.yMin, head.xMax, head.yMax]
os2 = tt['OS/2']
weight = os2.usWeightClass
stemv = int(10 + 220 * (weight - 50) / 900)
super().__init__({
pdf_name('/Type'): pdf_name('/FontDescriptor'),
pdf_name('/FontName'): pdf_name('/' + cf.name),
pdf_name('/Ascent'): generic.NumberObject(hhea.ascent),
pdf_name('/Descent'): generic.NumberObject(hhea.descent),
pdf_name('/FontBBox'): generic.ArrayObject(
map(generic.NumberObject, bbox)
),
# FIXME I'm setting the Serif and Symbolic flags here, but
# is there any way we can read/infer those from the TTF metadata?
pdf_name('/Flags'): generic.NumberObject(0b110),
pdf_name('/StemV'): generic.NumberObject(stemv),
pdf_name('/ItalicAngle'): generic.FloatObject(
tt['post'].italicAngle
),
pdf_name('/CapHeight'): generic.NumberObject(os2.sCapHeight)
})
@dataclass(frozen=True)
class GlyphAccumulatorFactory:
"""
Stateless callable helper class to instantiate :class:`.GlyphAccumulator`
objects.
"""
font_file: str
"""
Path to the OTF/TTF font to load.
"""
def __call__(self) -> GlyphAccumulator:
return GlyphAccumulator(ttLib.TTFont(self.font_file))
|
[
"matthias@mvalvekens.be"
] |
matthias@mvalvekens.be
|
390402dcfce1cffb758905473cc6efe159b95d2e
|
13f5984be7be77852e4de29ab98d5494a7fc6767
|
/剑指Offer/41_和为s的两个数字.py
|
838c15c3b4f0ac68070ec900dc0466481ba70b9b
|
[] |
no_license
|
YuanXianguo/Python-Interview-Master
|
4252514763fc3f563d9b94e751aa873de1719f91
|
2f73786e8c51dbd248341559de171e18f67f9bf2
|
refs/heads/master
| 2020-11-26T18:14:50.190812
| 2019-12-20T02:18:03
| 2019-12-20T02:18:03
| 229,169,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
"""
题目:输入一个递增排序的数组和一个数字S,在数组中查找两个数,使得他们的和正好是S,
如果有多对数字的和等于S,输出两个数的乘积最小的。
"""
def find_s(array, s):
if not array or not len(array) or array[-1] + array[-2] < s:
return []
left = 0
right = len(array) - 1
while left <= right:
if array[left] + array[right] < s:
left += 1
elif array[left] + array[right] > s:
right -= 1
else: # 此方法找到的第一组等于s的两个数字,其乘积也是最小的
return [array[left], array[right]]
return []
if __name__ == '__main__':
print(find_s(list(range(10)), 11))
|
[
"736913978@qq.com"
] |
736913978@qq.com
|
10b2c1cb78ae1a926458812fc5718cf5094dd82a
|
7c74e5c0d6ceb2f1acbfbaa3ecab8bcd1a690d64
|
/erpnext/patches/index_patch.py
|
3ef8ec367aebdf4febf24285bb0692599e4fec9d
|
[] |
no_license
|
Vichagserp/cimworks
|
2b7ce96fa3218a472c77bc8fdd1af2954e0e8b0e
|
f09d5e51aad870f2f0aeee2a0a02bc12ce435ded
|
refs/heads/master
| 2021-01-15T11:08:45.080328
| 2012-10-29T15:02:23
| 2012-10-29T15:02:23
| 49,730,358
| 0
| 0
| null | 2016-01-15T16:20:37
| 2016-01-15T16:20:37
| null |
UTF-8
|
Python
| false
| false
| 12,749
|
py
|
"""
This patch removes wrong indexs and add proper indexes in tables
"""
import webnotes
sql = webnotes.conn.sql
from webnotes.utils import cint, cstr
def create_proper_index():
from webnotes.modules.export_module import export_to_files
dt_index_fields={
'Purchase Receipt Detail': ['prevdoc_docname', 'item_code', 'warehouse', 'prevdoc_detail_docname'],
'Period Closing Voucher': ['closing_account_head', 'fiscal_year'],
'Lead': ['lead_name', 'status', 'transaction_date'],
'Time Sheet Detail': ['app_name'],
'Item Specification Detail': [],
'Budget Detail': ['fiscal_year', 'account'],
'Grade': [],
'RV Tax Detail': ['parenttype', 'account_head'],
'TDS Category Account': ['account_head'],
'Role': [],
'Leave Allocation': ['leave_type', 'employee', 'fiscal_year'],
'Branch': [],
'Department': [],
'Contact Detail': [],
'Territory': ['lft', 'rgt', 'parent_territory'],
'Item Tax': ['tax_type'],
'Bin': ['warehouse', 'item_code'],
'PPW Detail': ['warehouse'],
'Sales Partner': ['partner_name'],
'Default Home Page': ['home_page', 'role'],
'Custom Field': ['dt'],
'DocFormat': ['format'],
'DocType Mapper': ['from_doctype', 'to_doctype'],
'Brand': [],
'Order Lost Reason': [],
'Journal Voucher': ['posting_date', 'voucher_type'],
'TDS Return Acknowledgement': ['date_of_receipt', 'acknowledgement'],
'BOM Report Detail': ['item_code'],
'Quotation Detail': ['item_code'],
'Update Delivery Date Detail': ['sales_order_no'],
'Advance Adjustment Detail': ['journal_voucher'],
'Authorization Rule': ['approving_user', 'system_user', 'system_role', 'approving_role'],
'DocPerm': ['permlevel', 'role'],
'Stock Entry Detail': ['item_code', 't_warehouse', 's_warehouse'],
'Stock Entry': ['posting_date', 'delivery_note_no', 'purchase_receipt_no', 'production_order'],
'Price List': [],
'KRA Sheet': [],
'Production Order': ['status', 'project_name', 'production_item'],
'Account': ['lft', 'rgt', 'parent_account'],
'Earn Deduction Detail': [],
'Indent': ['status', 'transaction_date'],
'Tag Detail': [],
'SS Deduction Detail': ['d_type'],
'Batch': ['item'],
'Deduction Type': [],
'Project': ['project_name', 'customer'],
'UserRole': ['role'],
'DocField': ['label', 'fieldtype', 'fieldname'],
'Property Setter': ['doc_type', 'doc_name', 'property'],
'Appraisal': ['status', 'employee'],
'Letter Head': [],
'Follow up': ['follow_up_by'],
'Project Cost Breakup': [],
'Table Mapper Detail': [],
'Campaign': [],
'Static Parameter Detail': [],
'Leave Type': [],
'Account Balance': ['period', 'start_date', 'end_date', 'account'],
'Absent Days Detail': [],
'Tag': [],
'Raw Materials Supplied': ['raw_material'],
'Project Activity Update': [],
'PR Raw Material Detail': [],
'Bank Reconciliation Detail': ['voucher_id'],
'Sales Order': ['quotation_no', 'project_name', 'customer', 'posting_date'],
'Chapter VI A Detail': [],
'Experience In Company Detail': [],
'Order Reconciliation Detail': ['sales_order_no'],
'Attendance': ['employee', 'att_date'],
'Previous Experience Detail': [],
'Earning Detail': ['e_type'],
'Sales Order Detail': ['item_code', 'prevdoc_docname', 'reserved_warehouse'],
'KRA Template': [],
'Budget Distribution': ['fiscal_year'],
'Workstation': ['warehouse'],
'Period': [],
'Training Session Details': [],
'Other Charges': [],
'State': [],
'Bulk Rename Tool': [],
'Landed Cost Master Detail': [],
'Employee': ['employee_name', 'designation', 'department'],
'Terms And Conditions': [],
'TC Detail': [],
'UOM': [],
'Supplier Type': [],
'Project Milestone': [],
'Landed Cost Master': [],
'Budget Distribution Detail': [],
'Form 16A Ack Detail': [],
'Campaign Expense': [],
'Time Sheet': ['employee_name', 'time_sheet_date'],
'File Group': ['parent_group'],
'Maintenance Visit Detail': ['item_code', 'service_person'],
'Support Ticket Response': [],
'PV Detail': ['item_code', 'purchase_order', 'po_detail', 'purchase_receipt', 'pr_detail', 'expense_head', 'cost_center'],
'Timesheet Detail': ['project_name', 'task_id', 'customer_name'],
'Holiday List Detail': [],
'Workflow Rule Detail': [],
'Module Def': ['module_seq', 'module_page'],
'Term': [],
'PF Detail': ['item_code'],
'POS Setting': ['user', 'territory'],
'QA Specification Detail': [],
'Support Ticket': ['customer', 'allocated_to', 'status'],
'Project Activity': ['project'],
'Customer Group': ['lft', 'rgt', 'parent_customer_group'],
'Return Detail': ['item_code'],
'Series Detail': [],
'Event Role': ['role'],
'Contact': ['employee_id'],
'BOM Material': ['item_code', 'bom_no'],
'Invest 80 Declaration Detail': [],
'PO Raw Material Detail': [],
'Industry Type': [],
'Declaration Detail': [],
'Holiday List': ['fiscal_year'],
'Sales Person': ['lft', 'rgt', 'parent_sales_person'],
'RV Detail': ['item_code', 'sales_order', 'so_detail', 'delivery_note', 'dn_detail', 'cost_center', 'income_account'],
'Module Def Item': [],
'TDS Category': [],
'DocTrigger': [],
'Print Format': ['standard'],
'Installed Item Details': ['prevdoc_docname', 'item_code'],
'Form 16A Tax Detail': [],
'Event': ['event_date', 'event_type'],
'Currency': [],
'Service Quotation Detail': ['item_code'],
'Warehouse Type': ['warehouse_type'],
'Sales BOM': ['item_group'],
'IT Checklist': ['employee'],
'Purchase Other Charges': [],
'Company': [],
'Call Log': [],
'Professional Training Details': [],
'Warehouse': ['warehouse_type'],
'Competitor': [],
'Mode of Payment': [],
'Training Session': ['customer'],
'Cost Center': ['lft', 'rgt', 'parent_cost_center'],
'Timesheet': ['status', 'timesheet_date'],
'Form 16A': ['party_no'],
'Sales BOM Detail': ['item_code'],
'Answer': ['question'],
'Supplier': [],
'Installation Note': ['delivery_note_no', 'customer', 'inst_date'],
'Expense Voucher': ['approval_status', 'employee'],
'Target Detail': ['from_date', 'to_date', 'fiscal_year'],
'Page Role': ['role'],
'Partner Target Detail': ['fiscal_year', 'item_group'],
'Shipping Address': ['customer'],
'Indent Detail': ['item_code', 'warehouse'],
'TDS Payment Detail': [],
'Market Segment': [],
'Comment Widget Record': [],
'Service Order Detail': ['item_code', 'prevdoc_docname'],
'TDS Payment': ['from_date', 'to_date', 'tds_category'],
'Lead Email CC Detail': [],
'User Setting-Role User': [],
'Salary Slip': ['month', 'year', 'employee'],
'Maintenance Schedule Detail': ['item_code', 'scheduled_date'],
'Employment Type': [],
'Advance Allocation Detail': ['journal_voucher'],
'Quotation': ['customer', 'transaction_date'],
'Deduction Detail': ['d_type'],
'Bill Of Materials': ['item', 'project_name'],
'Earning Type': [],
'Designation': [],
'BOM Replace Utility Detail': ['parent_bom'],
'Question': [],
'Stock Ledger Entry': ['item_code', 'warehouse', 'posting_date', 'posting_time'],
'Educational Qualifications Detail': [],
'BOM Operation': [],
'Item Group': ['lft', 'rgt', 'parent_item_group'],
'Workflow Action Detail': [],
'User Setting-Profile': [],
'Customer Issue': ['item_code', 'customer', 'complaint_date'],
'Feed': [],
'Purchase Tax Detail': ['account_head'],
'GL Mapper Detail': [],
'TDS Detail': [],
'PRO Detail': ['item_code', 'source_warehouse'],
'DocType Label': [],
'Receivable Voucher': ['posting_date', 'debit_to', 'project_name'],
'GL Entry': ['posting_date', 'account', 'voucher_no'],
'Serial No': ['status', 'warehouse'],
'Delivery Note': ['posting_date', 'project_name', 'customer'],
'UOM Conversion Detail': ['uom'],
'Search Criteria': ['criteria_name'],
'Salary Structure': [],
'Educational Qualifications': ['qualification'],
'TDS Rate Chart': ['applicable_from', 'applicable_to'],
'GL Mapper': [],
'Announcement': [],
'Call Log Details': [],
'Enquiry': ['lead', 'customer', 'transaction_date'],
'Flat BOM Detail': ['item_code'],
'Landed Cost Detail': ['account_head'],
'Field Mapper Detail': ['from_field', 'to_field'],
'File Data': [],
'Question Tag': [],
'QA Inspection Report': ['item_code', 'purchase_receipt_no', 'report_date'],
'Appraisal Detail': [],
'POS Settings': ['territory'],
'Delivery Note Detail': ['item_code', 'prevdoc_docname', 'warehouse', 'prevdoc_detail_docname'],
'Profile': [],
'Other Income Detail': [],
'Product': ['item_code', 'stock_warehouse'],
'PO Detail': ['prevdoc_docname', 'item_code', 'prevdoc_detail_docname', 'warehouse'],
'Module Def Role': ['role'],
'Sales Team': ['sales_person'],
'Enquiry Detail': ['item_code'],
'DocType': [],
'Compaint Note': ['nature_of_complaint', 'compliance_date'],
'Maintenance Schedule': ['customer', 'sales_order_no'],
'Event User': ['person'],
'Stock Reconciliation': ['reconciliation_date'],
'Purchase Receipt': ['posting_date', 'supplier', 'project_name'],
'Complaint Detail': ['item_name'],
'Address': ['customer', 'supplier'],
'Ticket': ['request_date', 'allocated_to', 'category', 'customer', 'project'],
'Territory Target Detail': ['month', 'fiscal_year'],
'LC PR Detail': ['purchase_receipt_no'],
'Customer': ['customer_name', 'customer_group'],
'PP SO Detail': [],
'PP Detail': ['document_date', 'item_code', 'parent_item'],
'User Setting-Role Permission': [],
'Custom Script': ['dt'],
'Country': [],
'DefaultValue': [],
'Ledger Detail': [],
'SS Earning Detail': ['e_type'],
'SMS Log': [],
'Expense Type': [],
'Item': ['item_group'],
'Fiscal Year': [],
'ToDo Item': ['role'],
'Payable Voucher': ['posting_date', 'credit_to', 'project_name', 'supplier'],
'Journal Voucher Detail': ['account', 'against_voucher', 'against_invoice', 'against_jv'],
'Online Contact': [],
'Page': ['module'],
'Leave Application': ['employee', 'leave_type', 'from_date', 'to_date'],
'Expense Voucher Detail': ['expense_type'],
'Maintenance Visit': ['customer', 'sales_order_no', 'customer_issue_no'],
'Ref Rate Detail': ['price_list_name', 'ref_currency'],
'Receiver Detail': [],
'Naming Series Options': ['doc_type'],
'Activity Type': [],
'PRO PP Detail': [],
'Delivery Note Packing Detail': ['item_code', 'parent_item', 'warehouse'],
'Workflow Rule': ['select_form'],
'File': ['file_group'],
'Item Maintenance Detail': ['item_code', 'start_date', 'end_date', 'prevdoc_docname'],
'Purchase Order': ['supplier', 'project_name', 'posting_date'],
'Print Heading': [],
'TDS Rate Detail': ['category']
}
#sql("commit") # only required if run from login
exist_dt = [cstr(d[0]) for d in sql("select name from `tabDocType`")]
for dt in [d for d in dt_index_fields.keys() if d in exist_dt]:
try:
current_index = sql("show indexes from `tab%s`" % dt)
proper_index = dt_index_fields[dt]
for d in current_index:
if d[4] not in ['name', 'parent', 'parenttype']:
if d[4] not in proper_index:
sql("ALTER TABLE `tab%s` DROP INDEX %s" % (dt, d[4]))
sql("start transaction")
sql("UPDATE `tabDocField` SET search_index = 0 WHERE fieldname = '%s' AND parent = '%s'" % (d[4], dt))
sql("commit")
else:
proper_index.remove(d[4])
for d in proper_index:
sql("ALTER TABLE `tab%s` ADD INDEX ( `%s` ) " % (dt, d))
sql("start transaction")
sql("UPDATE `tabDocField` SET search_index = 1 WHERE fieldname = '%s' AND parent = '%s'" % (d, dt))
sql("commit")
except:
continue
|
[
"dalal.saumil@gmail.com"
] |
dalal.saumil@gmail.com
|
01e9f2deb1a6b8af8b0a1fd9d82d124d85aac811
|
fbc390bb6e0a6bfd93b7013218ced5593e370b7b
|
/stfu-and-eat-env/bin/easy_install-3.8
|
0a534e1c637b143a08719f6727000ed7c8555cf0
|
[] |
no_license
|
MariahLightfoot/stfu-and-eat
|
00017e22c520baf34af2f754ac51b3dad0b5b868
|
979db15b45ae2bc8d89a48cb5ce6240e669772b0
|
refs/heads/master
| 2022-07-10T05:39:44.687958
| 2020-05-19T22:34:35
| 2020-05-19T22:34:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
8
|
#!/Users/mariahlightfoot/stfu-and-eat/stfu-and-eat-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"mariahlightfoot@Mariahs-MacBook-Pro.local"
] |
mariahlightfoot@Mariahs-MacBook-Pro.local
|
e042052d13443c18324deaccfa46e2cedbc3d873
|
3c01bd8ea1ac70b363c80eb78477f38961950278
|
/2016/10.11 - ProTalk - Pradeep Gowda/example.py
|
cf6ba1db95df0a753651affeeaa823516db9770c
|
[] |
no_license
|
indypy/IndyPy-Presentations
|
6f3ec40fc39f0f34b576abd302751c8839bb2e55
|
f444396fc5920d59dc0bfe27e2f122dbb4ef4f1b
|
refs/heads/master
| 2021-06-07T08:56:46.673577
| 2020-06-29T15:06:47
| 2020-06-29T15:06:47
| 1,842,363
| 13
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
#!/usr/bin/env python
import threading
import logging
import time
from kafka import KafkaConsumer, KafkaProducer
class Producer(threading.Thread):
daemon = True
def run(self):
producer = KafkaProducer(bootstrap_servers='localhost:9092')
while True:
producer.send('my-topic', b"test")
producer.send('my-topic', b"\xc2Hola, mundo!")
time.sleep(1)
class Consumer(threading.Thread):
daemon = True
def run(self):
consumer = KafkaConsumer(bootstrap_servers='localhost:9092',
auto_offset_reset='earliest')
consumer.subscribe(['my-topic', 'test2', 'test'])
for message in consumer:
print(message)
def main():
threads = [Producer(), Consumer()]
for t in threads:
t.start()
time.sleep(10)
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO)
main()
|
[
"pradeep@btbytes.com"
] |
pradeep@btbytes.com
|
211a514ae0cd16142c13273742be423c3623e820
|
dccf277c0168092a1c0786d75c3ae52f4aafd4e9
|
/Report/Final Project Report/Data/Plot/Eg/tips.py
|
7c8ea4c14ad3deeefd998c6937f94cf27c6afa84
|
[] |
no_license
|
qifan-sailboat/SI140_Final_Project
|
afa21e673a76b5d9d67fe1261f129f73a7b53718
|
a45b2925247facd768e69b97bb4b0249646bea82
|
refs/heads/master
| 2022-08-22T23:29:37.580371
| 2018-01-19T08:36:15
| 2018-01-19T08:36:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# encoding:utf-8
import seaborn as sns
import pandas as pd
tips = pd.read_csv("tips.csv")
print(tips)
sns.jointplot("total_bill", "tip", tips, kind='reg')
sns.plt.show()
sns.plt.close() # 用于清空之前的图表。若不清空,那么下一个图表会残留上一个图表的点
sns.lmplot("total_bill", "tip", tips, col="smoker")
sns.plt.show()
|
[
"zhangqf@shanghaitech.edu.cn"
] |
zhangqf@shanghaitech.edu.cn
|
3c748798e87f381daf193b7b8b054c1a27964a88
|
f6593c43817dab2031bafa6cfbfce77406efc042
|
/openshift/pman-swift-publisher/put_data.py
|
f3c2a2e7b08b4a408f2e770fc467010155039489
|
[
"MIT"
] |
permissive
|
awalkaradi95/pman
|
b3a83c9e8b925695b74615047710442e6cf359f1
|
e30c76116214dec11b413c5ee5a0b428f52577dd
|
refs/heads/master
| 2021-08-24T04:50:53.789022
| 2017-12-08T03:35:15
| 2017-12-08T03:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,667
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Takes the output data in the share directory and pushes it into Swift
SWIFT_KEY enviornment variable to be passed by the template
"""
import os
import zipfile
import configparser
from keystoneauth1.identity import v3
from keystoneauth1 import session
from swiftclient import client as swift_client
from swift_handler import SwiftHandler
class SwiftStore():
swiftConnection = None
def _putObject(self, containerName, key, value):
"""
Creates an object with the given key and value and puts the object in the specified container
"""
self.swiftConnection.put_object(containerName, key , contents=value, content_type='text/plain')
print('Object added with key %s' %key)
def zipdir(self, path, ziph, **kwargs):
"""
Zip up a directory.
:param path:
:param ziph:
:param kwargs:
:return:
"""
str_arcroot = ""
for k, v in kwargs.items():
if k == 'arcroot': str_arcroot = v
for root, dirs, files in os.walk(path):
for file in files:
str_arcfile = os.path.join(root, file)
if len(str_arcroot):
str_arcname = str_arcroot.split('/')[-1] + str_arcfile.split(str_arcroot)[1]
else:
str_arcname = str_arcfile
try:
ziph.write(str_arcfile, arcname = str_arcname)
except:
print("Skipping %s" % str_arcfile)
def storeData(self, **kwargs):
"""
Creates an object of the file and stores it into the container as key-value object
"""
key = ''
for k,v in kwargs.items():
if k == 'path': key = v
fileName = '/tmp/share/'
ziphandler = zipfile.ZipFile('/tmp/share/ziparchive.zip', 'w', zipfile.ZIP_DEFLATED)
self.zipdir(fileName, ziphandler, arcroot = fileName)
with open('/tmp/share/ziparchive.zip','rb') as f:
zippedFileContent = f.read()
os.remove('/tmp/share/ziparchive.zip')
swiftHandler = SwiftHandler()
self.swiftConnection = swiftHandler._initiateSwiftConnection()
containerName = key
key = os.path.join('output','data')
self._putObject(containerName, key, zippedFileContent)
#Delete temporary empty directory created by Swift
swiftHandler._deleteEmptyDirectory(key)
if __name__ == "__main__":
obj = SwiftStore()
obj.storeData(path = os.environ.get('SWIFT_KEY'))
|
[
"awalkaradi95@ccs.neu.edu"
] |
awalkaradi95@ccs.neu.edu
|
6a6428d77fcc374be4f72b9709f9312b9604fcfc
|
8e0c861a33e8dd50bbbec5ceef116659aebe7def
|
/main.py
|
07e3e896401c317495a0a9c6e6105240a81bddda
|
[] |
no_license
|
karthikm-0/Pong
|
46938bb5e87a304d4342b0ec80f883cf5f366a16
|
a155578a181d3843fd876e352bb21e7691f8f618
|
refs/heads/master
| 2022-12-03T04:47:19.123780
| 2020-08-22T06:45:50
| 2020-08-22T06:45:50
| 289,165,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,115
|
py
|
from turtle import *
from random import randint, choice
# Create a player with their paddle and listen for inputs
class Player:
# How much to move the paddle when user presses the corresponding key
move_offset = 15
def __init__(self, speed, shape, color, initial_pos_x, initial_pos_y, up_key, down_key):
# Create the turtle and set its characteristics
self.player = Turtle()
self.player.shape('rectangle')
self.player.color(color)
self.player.penup()
self.player.goto(initial_pos_x, initial_pos_y)
# Set up the screen and movement
self.screen = Screen()
self.screen.onkey(self.move_up, up_key)
self.screen.onkey(self.move_down, down_key)
self.screen.listen()
# Movement
def move_up(self):
print("Moving up")
self.player.sety(self.player.ycor() + self.move_offset)
def move_down(self):
print("Moving down")
self.player.sety(self.player.ycor() - self.move_offset)
# Start the main loop which runs infinitely
def main(self):
self.screen.mainloop()
# Encode information about the ball
class Ball:
# Constants that represent how fast the ball will move around the screen
def __init__(self, speed, shape, color, initial_pos_x, initial_pos_y, width, height, Pl_A, Pl_B):
self.ball = Turtle()
self.ball.shape(shape)
self.ball.color('white')
self.ball.penup()
self.ball.goto(initial_pos_x, initial_pos_y)
self.ball.speed = speed
self.ball.setheading(choice([0, 180]) + randint(-60, 60))
# Displacement of ball in x and y
#self.ball.dx = -self.BALL_DX
#self.ball.dy = -self.BALL_DY
# Screen
self.screen = Screen()
def move(self, Pl_A, Pl_B):
self.ball.forward(self.ball.speed)
#x_new = self.ball.xcor() + self.ball.dx
#y_new = self.ball.ycor() + self.ball.dy
#self.ball.setposition(x_new, y_new)
#self.screen.mainloop()
# Look for whether we have hit the boundaries and reset the ball and flip the direction the ball will move
# Left-Paddle
# Right-Paddle
def distance(self, Pl_X):
'''if self.ball.xcor() < - screen_width / 2:
self.ball.setx(-screen_width / 2)
#self.ball.dx = self.BALL_DX
self.ball.setheading(-self.ball.heading())
# X-right
if self.ball.xcor() > screen_width / 2:
self.ball.setx(-screen_width / 2)
#self.ball.dx = -self.BALL_DX
self.ball.setheading(-self.ball.heading())'''
# Y-top
if self.ball.ycor() > screen_height / 2:
self.ball.sety(screen_height / 2)
self.ball.setheading(-self.ball.heading())
#self.ball.dy = -self.BALL_DY
# Y-bottom
if self.ball.ycor() < - screen_height / 2:
self.ball.sety(-screen_height / 2)
self.ball.setheading(-self.ball.heading())
#self.ball.dy = self.BALL_DY
# Check collision with either paddle
# Paddle A
#if self.ball.xcor() == Player_A.player.xcor():
#print("Colliding with player A")
#self.ball.dx = self.BALL_DX
#elif self.ball.xcor() == Player_B.player.xcor():
#print("Colliding with player B")
#self.ball.dx = -self.BALL_DX
# Paddle information
paddle_height = 40
paddle_width = 10
# Screen
screen_width = 500
screen_height = 500
screen = Screen()
screen.setup(500, 500)
screen.bgcolor("black")
# Information about a custom shape
screen.register_shape("rectangle",[
(paddle_height, paddle_width),
(-paddle_height, paddle_width),
(-paddle_height, -paddle_width),
(paddle_height, -paddle_width)
])
# Create players
Player_A = Player(0, 'square', 'red', -250, 0, 'Up', 'Down')
Player_B = Player(0, 'square', 'blue', 250, 0, 'W', 'S')
Ball = Ball(5, 'circle', 'white', 0, 0, screen_width, screen_height, Player_A, Player_B)
Player_A.main()
Player_B.main()
#screen.listen()
#Ball.move(Player_A, Player_B)
#screen.mainloop()
#screen.ontimer(Ball.move(Player_A, Player_B), 50)
#Ball.main(Player_A, Player_B)
#screen.listen()
#screen.mainloop()
while(True):
#print(Player_A.player.position())
# Figure out how the ball should move next passing in the paddle's positions
Ball.move(Player_A, Player_B)
|
[
"karthik.mahadevan@ucalgary.ca"
] |
karthik.mahadevan@ucalgary.ca
|
2ab1fbd83a55536dacf6ea2fa4eca99c270881fa
|
1d27decdb5207616837f03a36741947a46e8852d
|
/py/hscCoadd/batchSky.py
|
6b763392494d0af9d680a7c2a596d2b4578c694b
|
[] |
no_license
|
dr-guangtou/hs_hsc
|
12d7a83ee9898f6d9fb5cf5dc85fa682d50578e8
|
865abc0ba5337d3a085efa99b87ebfcfdd9710af
|
refs/heads/master
| 2021-01-17T03:20:03.900050
| 2019-06-19T05:26:34
| 2019-06-19T05:26:34
| 23,053,990
| 0
| 2
| null | 2015-12-02T15:52:47
| 2014-08-18T00:47:48
|
Python
|
UTF-8
|
Python
| false
| false
| 9,345
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""Estimate background for HSC cutouts."""
from __future__ import (division, print_function)
import os
import glob
import fcntl
import logging
import argparse
import warnings
import numpy as np
from astropy.io import fits
import coaddCutoutSky as ccs
COM = '#' * 100
SEP = '-' * 100
WAR = '!' * 100
def run(args):
"""
Run coaddCutoutSky in batch mode.
Parameters:
"""
if os.path.isfile(args.incat):
""" Basic information """
id = (args.id)
data = fits.open(args.incat)[1].data
rerun = (args.rerun).strip()
prefix = (args.prefix).strip()
filter = (args.filter).strip().upper()
""" Keep a log """
if args.sample is not None:
logPre = prefix + '_' + args.sample
else:
logPre = prefix
logFile = logPre + '_sky_' + filter + '.log'
if not os.path.isfile(logFile):
os.system('touch ' + logFile)
if args.verbose:
print("\n## Will deal with %d galaxies ! " % len(data))
for galaxy in data:
""" Galaxy ID and prefix """
galID = str(galaxy[id]).strip()
galPrefix = prefix + '_' + galID + '_' + filter + '_full'
"""Folder for the data"""
galRoot = os.path.join(galID, filter)
if not os.path.isdir(galRoot):
if args.verbose:
print('\n### Cannot find the folder: %s !' % galRoot)
with open(logFile, "a") as logMatch:
try:
logFormat = "%25s %5s NDIR %6.1f %7.4f %7.4f" + \
" %7.4f %7.4f %7.4f \n"
logMatch.write(logFormat %
(galPrefix, filter, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan))
fcntl.flock(logMatch, fcntl.LOCK_UN)
except IOError:
pass
continue
"""Collect the FITS file information"""
fitsList = glob.glob(os.path.join(galRoot, '*.fits'))
if len(fitsList) <= 3:
if args.verbose:
print("### Missing data under %s" % galRoot)
with open(logFile, "a") as logMatch:
try:
logFormat = "%25s %5s MISS %6.1f %7.4f %7.4f" + \
" %7.4f %7.4f %7.4f \n"
logMatch.write(logFormat %
(galPrefix, filter, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan))
fcntl.flock(logMatch, fcntl.LOCK_UN)
except IOError:
pass
continue
"""
Set up a rerun
"""
galRoot = os.path.join(galRoot, rerun.strip())
if not os.path.isdir(galRoot):
os.makedirs(galRoot)
""" Link the necessary files to the rerun folder """
for fitsFile in fitsList:
seg = fitsFile.split('/')
link = os.path.join(galRoot, seg[-1])
if (not os.path.islink(link)) and (not os.path.isfile(link)):
os.symlink(fitsFile, link)
"""
External mask
"""
if args.maskFilter is not None:
mskFilter = (args.maskFilter).strip().upper()
if args.verbose:
print("\n### Use %s filter for mask \n" % mskFilter)
mskPrefix = (prefix + '_' + galID + '_' + mskFilter + '_full')
mskRoot = os.path.join(galID, mskFilter, rerun)
galMsk = os.path.join(mskRoot, mskPrefix + '_mskall.fits')
if not os.path.isfile(galMsk):
if args.verbose:
print(
'\n### Can not find final mask : %s !' % galMsk)
with open(logFile, "a") as logMatch:
try:
logFormat = "%25s %5s NMSK %6.1f %7.4f " + \
"%7.4f %7.4f %7.4f %7.4f \n"
logMatch.write(logFormat %
(galPrefix, filter, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan))
fcntl.flock(logMatch, fcntl.LOCK_UN)
except IOError:
pass
continue
else:
galMsk = None
""" Start to estimate the sky background """
try:
skyGlobal = ccs.coaddCutoutSky(
galPrefix,
root=galRoot,
pix=args.pix,
zp=args.zp,
rebin=args.rebin,
skyClip=args.skyClip,
verbose=args.verbose,
visual=args.visual,
exMask=galMsk,
bkgSize=args.bkgSize,
bkgFilter=args.bkgFilter,
saveBkg=args.saveBkg,
nClip=args.nClip)
numSkyPix, skyMed, skyAvg, skyStd, skySkw, sbExpt = skyGlobal
with open(logFile, "a") as logMatch:
try:
logFormat = "%25s %5s %3d %6d %7.4f %7.4f" + \
" %7.4f %7.4f %7.4f \n"
logMatch.write(logFormat %
(galPrefix, filter, args.rebin,
numSkyPix, skyMed, skyAvg, skyStd,
skySkw, sbExpt))
fcntl.flock(logMatch, fcntl.LOCK_UN)
except IOError:
pass
except Exception, errMsg:
print(WAR)
print(str(errMsg))
warnings.warn('### The sky estimate is failed ' +
'for %s in %s' % (galID, filter))
logging.warning('### The sky estimate is failed ' +
'for %s in %s' % (galID, filter))
with open(logFile, "a") as logMatch:
try:
logFormat = "%25s %5s %3d %6d %7.4f %7.4f" + \
" %7.4f %7.4f %7.4f \n"
logMatch.write(logFormat %
(galPrefix, filter, args.rebin, 0,
np.nan, np.nan, np.nan, np.nan,
np.nan))
fcntl.flock(logMatch, fcntl.LOCK_UN)
except IOError:
pass
else:
raise Exception("### Can not find the input catalog: %s" % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("prefix", help="Prefix of the galaxy image files")
parser.add_argument("incat", help="The input catalog for cutout")
parser.add_argument(
'-i',
'--id',
dest='id',
help="Name of the column for galaxy ID",
default='index')
parser.add_argument(
'-f', '--filter', dest='filter', help="Filter", default='HSC-I')
parser.add_argument(
'-mf',
'--mFilter',
dest='maskFilter',
help="Filter for Mask",
default=None)
parser.add_argument(
'-r',
'--rerun',
dest='rerun',
help="Name of the rerun",
default='default')
parser.add_argument(
'--sample', dest='sample', help="Sample name", default=None)
""" Optional """
parser.add_argument(
'--skyclip',
dest='skyClip',
help='Sigma for pixel clipping',
type=float,
default=3.0)
parser.add_argument(
'--bkgSize',
dest='bkgSize',
help='Background size for SEP',
type=int,
default=60)
parser.add_argument(
'--bkgFilter',
dest='bkgFilter',
help='Background filter size for SEP',
type=int,
default=5)
parser.add_argument(
'--rebin',
dest='rebin',
help='Rebin the image by N x N pixels',
type=int,
default=6)
parser.add_argument(
'--pix',
dest='pix',
help='Pixel scale of the iamge',
type=float,
default=0.168)
parser.add_argument(
'--zp',
dest='zp',
help='Photometric zeropoint of the image',
type=float,
default=27.0)
parser.add_argument(
'--verbose', dest='verbose', action="store_true", default=True)
parser.add_argument(
'--visual', dest='visual', action="store_true", default=True)
parser.add_argument(
'--nClip',
dest='nClip',
help='Number of iterations for clipping',
type=int,
default=2)
parser.add_argument(
'--saveBkg', dest='saveBkg', action="store_true", default=False)
args = parser.parse_args()
run(args)
|
[
"dr.guangtou@gmail.com"
] |
dr.guangtou@gmail.com
|
0717fda518f3d4d781e8f3d72de4c8c29f9b556a
|
3e4bd306ae5b9d7251010c1269c291ae131f109d
|
/mail.py
|
aa23bf2094c889658a1b3ef940334bc34125d072
|
[] |
no_license
|
szatkus/yask
|
425352fc67af8104f2292f3312d1f02fde753cdf
|
64d6429f6f430666ddcc0e1593bb27ceff53e7ca
|
refs/heads/master
| 2021-01-21T13:11:48.654401
| 2016-04-22T21:54:50
| 2016-04-22T21:54:50
| 55,621,552
| 1
| 2
| null | 2016-04-17T22:38:36
| 2016-04-06T16:18:05
|
CSS
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
from email.mime.text import MIMEText
from flask import render_template
from smtplib import SMTP
import config
def send_mail(template_name, subject, recipent, **context):
body = render_template(template_name, **context)
message = MIMEText(body, 'html')
message['Subject'] = subject
message['To'] = recipent
message['From'] = config.EMAIL_ADDRESS
smtp_connection = SMTP(config.SMTP_HOST, config.SMTP_PORT)
smtp_connection.sendmail(config.EMAIL_ADDRESS, recipent, message.as_string())
smtp_connection.quit()
|
[
"szatkus@gmail.com"
] |
szatkus@gmail.com
|
352c0988abe8b7b6d3715ce5259f4901c44b007a
|
1b5ca01937b839301651cb3b4f7a8a46548f20d2
|
/Django Template/Template/Template/wsgi.py
|
b1f3eaef44379173f394c7c5a8286c83ce5db44b
|
[] |
no_license
|
kevalvc/Django-Tutorial
|
c0be4dc52f0f060fdc82c8ad50146b70f983699f
|
3881887d2a54108a77ecd192f00acedf8beb3681
|
refs/heads/master
| 2020-04-18T22:34:48.514536
| 2019-04-21T15:26:42
| 2019-04-21T15:26:42
| 167,797,833
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for Template project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Template.settings')
application = get_wsgi_application()
|
[
"chavda.keval97@gmail.com"
] |
chavda.keval97@gmail.com
|
77c3594b66167d077b8340e5b8996120dbbc0751
|
6a4b20e303aa97c0ad1834e189d63c66e67f7c64
|
/action-alsa-volume.py
|
8c93c044e16a2119351f8c5769c00d383c17f997
|
[
"MIT"
] |
permissive
|
gidocarper/snips-volume-1
|
641544d9490292fc9f7c20532aabf219c526c49e
|
f71ba430463d591969eeb7ee8e4bd950579bcb4a
|
refs/heads/master
| 2020-08-22T09:00:56.908359
| 2019-10-05T11:24:21
| 2019-10-05T11:24:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
import io
import socket
import random
from subprocess import call
CONFIG_INI = "config.ini"
# If this skill is supposed to run on the satellite,
# please get this mqtt connection info from <config.ini>
# Hint: MQTT server is always running on the master device
MQTT_IP_ADDR = "localhost"
MQTT_PORT = 1883
MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
success_tts = ['Got it', 'Sure', 'Done', 'Ok']
fail_tts = ["Sorry, I can't do that", "Sorry, that doesn't work"]
no_slot_tts = ["What do you mean?", "Don't waste my time", "I can't do anything with that", "Please stop bothering me", "No", "I'd rather not", "No, I don't think I will"]
class ALSAVolume(object):
"""Class used to wrap action code with mqtt connection
Please change the name refering to your application
"""
def __init__(self):
# get the configuration if needed
try:
self.config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except :
self.config = None
# start listening to MQTT
self.start_blocking()
# --> Sub callback function, one per intent
def setVolumeCallback(self, hermes, intent_message):
# terminate the session first if not continue
#hermes.publish_end_session(intent_message.session_id, "")
# action code goes here...
print '[Received] intent: {}'.format(intent_message.intent.intent_name)
volumeSet = False
for (slot_value, slot) in intent_message.slots.items():
if slot_value == "Volume":
self.Volume = slot.first().value.encode("utf8")
volumeSet = True
if volumeSet:
try:
volume = int(self.Volume)
deviceName = self.config['secret']['device_name']
call(["amixer", "set", deviceName, str(volume)+"%"])
except ValueError:
pass
tts = random.choice(success_tts) + ", " + str(volume) + " percent"
else:
if(self.config['secret']['snarky_response']) == "y":
tts = random.choice(no_slot_tts)
else:
tts = random.choice(fail_tts)
hermes.publish_end_session(intent_message.session_id, tts)
# --> Master callback function, triggered everytime an intent is recognized
def master_intent_callback(self,hermes, intent_message):
coming_intent = intent_message.intent.intent_name
if coming_intent == 'thejonnyd:SetVolume':
self.setVolumeCallback(hermes, intent_message)
# more callback and if condition goes here...
# --> Register callback function and start MQTT
def start_blocking(self):
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.master_intent_callback).start()
if __name__ == "__main__":
ALSAVolume()
|
[
"jonathandavies97@gmail.com"
] |
jonathandavies97@gmail.com
|
c5d07fd2aa4808c4d1c07db78de1188eb130b079
|
093f0e76fbec79741b21c4ae6a2c426c8d1be5c5
|
/2_ sheet.py
|
5ee4db11dbf5833b2ff8912a414d44fec2b59865
|
[] |
no_license
|
sc2bat/rpa-excel
|
e3192a888afc3728a0127c3f87f014b3167aa84d
|
ef5d8c1c93dcf8d806b45e9b1f4a474bd1d392e0
|
refs/heads/main
| 2023-07-03T10:20:43.885572
| 2021-08-15T11:42:37
| 2021-08-15T11:42:37
| 394,073,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
# 시트별로 관리하는 경우
from openpyxl import Workbook
wb = Workbook()
# wb.active
ws = wb.create_sheet() # 새로운 sheet 기본 이름으로 생성
ws.title = "MySheet" # sheet 이름 변경
ws.sheet_properties.tabColor = "0000cc" # RGB 형태로 값을 넣어주면
# Google search 'RGB'
# https://www.w3schools.com/colors/colors_rgb.asp
ws1 = wb.create_sheet("YSheet") # 생성과 동시에 닉변
ws2 = wb.create_sheet("NSheet", 2) # 2번때 idx 값에 sheet 생성
new_ws = wb["NSheet"] # Dict 형태로 sheet 에 접근
print(wb.sheetnames) # 모든 시트 이름 확인
# Sheet 복사
new_ws["A1"] = "Test"
target = wb.copy_worksheet(new_ws)
target.title = "Copied Sheet"
wb.save("smaple2.xlsx")
|
[
"noreply@github.com"
] |
sc2bat.noreply@github.com
|
86a469d1d858774538e63e7f77ac05194420fe71
|
ea8df164fbd945a8ff7d545f51a54d7102bf2afc
|
/app/auth/forms.py
|
543fbeae3591e7b48fc5f99c6d3367b87a5ffd43
|
[] |
no_license
|
Grievi/upgraded-train
|
310a9153debeff8fe49ed0c6d3de0f73e37eaeb8
|
b6c22ee57c000daf616bafc523f7d4e97b60fd99
|
refs/heads/master
| 2023-08-11T05:18:15.791221
| 2021-09-22T10:52:18
| 2021-09-22T10:52:18
| 407,543,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
from wtforms import ValidationError
from wtforms import StringField,PasswordField,BooleanField,SubmitField
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an existing account with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
|
[
"grievin.okumu@student.moringaschool.com"
] |
grievin.okumu@student.moringaschool.com
|
a88606b2fb5d2e237d8dbca360bcdf1e252ee533
|
82bc8f17bf77ff80d50b8018f75f9bb5baf4319a
|
/Kyphosis Dataset/kyphosis.py
|
76c7e7443fc9cbc1e7aa8ef677f0a84c002920f1
|
[] |
no_license
|
kartikJ-9/Kaggle-Submissions
|
8669207ba630ab981822b79317c2a31034efb0f7
|
6bc87453dddb4dd0c53e80926d1f7af3b21c2ed0
|
refs/heads/master
| 2020-06-06T10:55:01.079516
| 2019-06-19T12:54:17
| 2019-06-19T12:54:17
| 192,721,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("../input/kyphosis.csv")
df.head()
df.info()
sns.pairplot(df,hue = 'Kyphosis')
from sklearn.model_selection import train_test_split
X = df.drop('Kyphosis',axis = 1)
y = df['Kyphosis']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
predictions = dtree.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=200)
rfc.fit(X_train,y_train)
rfc_pred = rfc.predict(X_test)
print(confusion_matrix(y_test,rfc_pred))
print(classification_report(y_test,rfc_pred))
|
[
"noreply@github.com"
] |
kartikJ-9.noreply@github.com
|
0dc1b58633995ce70c987ad0742efd277a281104
|
bee303a11857c62be71d2ced72e2622e3af6f5b4
|
/practice/kansu1.py
|
5cf2297e6604e395ae3df54da18796247a149ada
|
[] |
no_license
|
MinenoLab/ai-iotSeminar3
|
c5e455f200866b8aef6a957c87e9ff82019acff9
|
485331cd41a50bed5a1e2a191d5d03f99925fc44
|
refs/heads/master
| 2020-04-22T00:33:46.032527
| 2019-03-01T10:40:48
| 2019-03-01T10:40:48
| 169,984,344
| 0
| 0
| null | 2019-02-22T08:28:05
| 2019-02-10T14:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 74
|
py
|
def sample(a,b):
c = a + b
return c
d = sample(1,2)
print(d)
|
[
"supo.nyiyan437@gmail.com"
] |
supo.nyiyan437@gmail.com
|
40c17a94c04fa69086b016caaa0cf22d7d962c0f
|
a94f779b762463d80a775db7efc08b47ff60aac1
|
/days/5/testing/test2.py
|
bb4afc3bfcfd1149cf04aaea37a6f59ac04b17c5
|
[] |
no_license
|
sivajipr/python-course
|
602a99d941dc6df1dabb17dfc284dcffd140e003
|
176c04426f0cbef1c4beb888300dd911eb708b97
|
refs/heads/master
| 2016-09-05T09:29:30.649858
| 2014-09-19T08:18:36
| 2014-09-19T08:18:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
from nose.tools import *
def fact(n):
if (n == 0): return 1
else: return n * fact(n - 1)
def test_fact():
assert_equal(fact(0), 1)
assert_equal(fact(1), 1)
assert_equal(fact(2), 2)
assert_equal(fact(5), 120)
def fun():
assert True
|
[
"sivajipoonithara@gmail.com"
] |
sivajipoonithara@gmail.com
|
69168380efb9d8171f25fff746ebc97b45061216
|
d8466eb8b2c214e887f306fc9744d87cad4f98d9
|
/functional_tests/base.py
|
a70d9e0c4ad9322ab8475d4248e3bdec4e22e45a
|
[] |
no_license
|
evgnysmirnov/TDDPython
|
fabbb4fcc272cef5c055ad91612cd4682e7dcd0c
|
5afdb5d2635c7610b1dd52126d7800d1aae31d46
|
refs/heads/master
| 2020-08-31T15:51:48.558097
| 2019-10-04T10:25:56
| 2019-10-04T10:25:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
import os
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
import time
MAX_WAIT = 10
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Chrome()
staging_server = os.environ.get('STAGING_SERVER')
if staging_server:
self.live_server_url = 'http://' + staging_server
def tearDown(self):
self.browser.quit()
def wait_for(self, fn):
start_time = time.time()
while True:
try:
return fn()
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
|
[
"objectisliper@gmail.com"
] |
objectisliper@gmail.com
|
e62c2b7a3fa4fa4ac700ab6e41ae2805309a90d3
|
6e49e2237979ba8b38f8384501878cd8488e6601
|
/celery/tests/test_datastructures.py
|
9aefc3be376ee63b982ce084fb26dbd873f369b8
|
[
"BSD-3-Clause"
] |
permissive
|
fberger/celery
|
a39ba061781c9861a89fa2d669e001e478e5e226
|
6715848bfd470ded836c2b54b5c135b2b14c05c5
|
refs/heads/master
| 2021-01-17T23:28:28.007753
| 2010-03-07T00:09:46
| 2010-03-07T00:09:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
import sys
import unittest
from Queue import Queue
from celery.datastructures import PositionQueue, ExceptionInfo, LocalCache
from celery.datastructures import LimitedSet, SharedCounter, consume_queue
class TestPositionQueue(unittest.TestCase):
def test_position_queue_unfilled(self):
q = PositionQueue(length=10)
for position in q.data:
self.assertTrue(isinstance(position, q.UnfilledPosition))
self.assertEquals(q.filled, [])
self.assertEquals(len(q), 0)
self.assertFalse(q.full())
def test_position_queue_almost(self):
q = PositionQueue(length=10)
q[3] = 3
q[6] = 6
q[9] = 9
self.assertEquals(q.filled, [3, 6, 9])
self.assertEquals(len(q), 3)
self.assertFalse(q.full())
def test_position_queue_full(self):
q = PositionQueue(length=10)
for i in xrange(10):
q[i] = i
self.assertEquals(q.filled, list(xrange(10)))
self.assertEquals(len(q), 10)
self.assertTrue(q.full())
class TestExceptionInfo(unittest.TestCase):
def test_exception_info(self):
try:
raise LookupError("The quick brown fox jumps...")
except LookupError:
exc_info = sys.exc_info()
einfo = ExceptionInfo(exc_info)
self.assertEquals(str(einfo), einfo.traceback)
self.assertTrue(isinstance(einfo.exception, LookupError))
self.assertEquals(einfo.exception.args,
("The quick brown fox jumps...", ))
self.assertTrue(einfo.traceback)
r = repr(einfo)
self.assertTrue(r)
class TestUtilities(unittest.TestCase):
def test_consume_queue(self):
x = Queue()
it = consume_queue(x)
self.assertRaises(StopIteration, it.next)
x.put("foo")
it = consume_queue(x)
self.assertEquals(it.next(), "foo")
self.assertRaises(StopIteration, it.next)
class TestSharedCounter(unittest.TestCase):
def test_initial_value(self):
self.assertEquals(int(SharedCounter(10)), 10)
def test_increment(self):
c = SharedCounter(10)
c.increment()
self.assertEquals(int(c), 11)
c.increment(2)
self.assertEquals(int(c), 13)
def test_decrement(self):
c = SharedCounter(10)
c.decrement()
self.assertEquals(int(c), 9)
c.decrement(2)
self.assertEquals(int(c), 7)
def test_iadd(self):
c = SharedCounter(10)
c += 10
self.assertEquals(int(c), 20)
def test_isub(self):
c = SharedCounter(10)
c -= 20
self.assertEquals(int(c), -10)
def test_repr(self):
self.assertTrue(repr(SharedCounter(10)).startswith("<SharedCounter:"))
class TestLimitedSet(unittest.TestCase):
def test_add(self):
s = LimitedSet(maxlen=2)
s.add("foo")
s.add("bar")
for n in "foo", "bar":
self.assertTrue(n in s)
s.add("baz")
for n in "bar", "baz":
self.assertTrue(n in s)
self.assertTrue("foo" not in s)
def test_iter(self):
s = LimitedSet(maxlen=2)
items = "foo", "bar"
map(s.add, items)
l = list(iter(items))
for item in items:
self.assertTrue(item in l)
def test_repr(self):
s = LimitedSet(maxlen=2)
items = "foo", "bar"
map(s.add, items)
self.assertTrue(repr(s).startswith("LimitedSet("))
class TestLocalCache(unittest.TestCase):
def test_expires(self):
limit = 100
x = LocalCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertEquals(x.keys(), slots[limit:])
|
[
"askh@opera.com"
] |
askh@opera.com
|
11778df6a7b9803d75dcda64e640e9ecd565a1d9
|
763baffec3ffda51a92a93704ff8e7485be1e576
|
/trainers/bayes_uts_classification_trainer_2.py
|
41ea8fd2b189dfec42482efb5ace6f40fe3eceb3
|
[
"ICU",
"Apache-2.0"
] |
permissive
|
Neronjust2017/DL-TSC
|
300d30f58858313749d49575b74d3a139abbe5e0
|
919e67e10b0bf518eb9cc63df68c79fe2bb71b36
|
refs/heads/master
| 2023-02-02T14:23:17.862623
| 2020-12-15T08:29:00
| 2020-12-15T08:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,029
|
py
|
from base.base_trainer import BaseTrain
import os
import time
# import keras_contrib
import numpy as np
from utils.uts_classification.utils import save_training_logs
from utils.uts_classification.metric import precision, recall, f1
from comet_ml import Optimizer
class UtsClassificationTrainer(BaseTrain):
def __init__(self, model, data, config):
super(UtsClassificationTrainer, self).__init__(model, data, config)
self.callbacks = []
self.loss = []
self.acc = []
self.val_loss = []
self.val_acc = []
self.precision = []
self.recall = []
self.f1 = []
self.val_precision = []
self.val_recall = []
self.val_f1 = []
self.init_callbacks()
def init_callbacks(self):
if (self.config.model.name == "encoder"):
import keras
else:
import tensorflow.keras as keras
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
self.callbacks.append(
ModelCheckpoint(
filepath=os.path.join(self.config.callbacks.checkpoint_dir, '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp.name),
monitor=self.config.callbacks.checkpoint_monitor,
mode=self.config.callbacks.checkpoint_mode,
save_best_only=self.config.callbacks.checkpoint_save_best_only,
save_weights_only=self.config.callbacks.checkpoint_save_weights_only,
verbose=self.config.callbacks.checkpoint_verbose,
)
)
self.callbacks.append(
ModelCheckpoint(
filepath=os.path.join(self.config.callbacks.checkpoint_dir,'best_model-%s.hdf5'%self.config.callbacks.checkpoint_monitor),
monitor=self.config.callbacks.checkpoint_monitor,
mode=self.config.callbacks.checkpoint_mode,
save_best_only=self.config.callbacks.checkpoint_save_best_only,
)
)
self.callbacks.append(
ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=50,
min_lr=0.0001
)
)
self.callbacks.append(
TensorBoard(
log_dir=self.config.callbacks.tensorboard_log_dir,
write_graph=self.config.callbacks.tensorboard_write_graph,
histogram_freq=1,
)
)
# if hasattr(self.config,"comet_api_key"):
if ("comet_api_key" in self.config):
from comet_ml import Experiment
experiment = Experiment(api_key=self.config.comet_api_key, project_name=self.config.exp_name)
experiment.disable_mp()
experiment.log_parameters(self.config["trainer"])
self.callbacks.append(experiment.get_callback('keras'))
def train(self):
if (self.config.model.name == "encoder"):
import keras
else:
import tensorflow.keras as keras
start_time = time.time()
## bayes optimization, refer to https://www.comet.ml/docs/python-sdk/introduction-optimizer/
config = {
"algorithm": "bayes",
"parameters": {
"learning_rate": {"type": "", },
"batch_size": {"type": "integer", "min": 16, "max": 32},
"num_epochs": {"type": "integer", "min": 50, "max": 100},
},
"spec": {
""
"metric": "val_loss",
"objective": "minimize"
},
}
opt = Optimizer(config, api_key=self.config.comet_api_key, project_name=self.config.exp_name)
for exp in opt.get_experiments():
history = self.model.fit(
self.data[0], self.data[1],
epochs=self.config.trainer.num_epochs,
verbose=self.config.trainer.verbose_training,
batch_size=exp.get_parameter('batch'),
validation_split=self.config.trainer.validation_split,
callbacks=self.callbacks,
)
val_loss = min(history.history['val_loss'])
print(val_loss)
exp.log_metric("val_loss", val_loss)
# history = self.model.fit(
# self.data[0], self.data[1],
# epochs=self.config.trainer.num_epochs,
# verbose=self.config.trainer.verbose_training,
# batch_size=self.config.trainer.batch_size,
# validation_split=self.config.trainer.validation_split,
# callbacks=self.callbacks,
# )
self.duration = time.time()-start_time
self.history = history
# if(self.config.model.name == "encoder"):
#
# self.best_model = keras.models.load_model(os.path.join(self.config.callbacks.checkpoint_dir,'best_model-%s.hdf5'%self.config.callbacks.checkpoint_monitor),
# custom_objects={'precision': precision, 'recall': recall,'f1': f1,
# 'InstanceNormalization': keras_contrib.layers.InstanceNormalization()})
# else:
self.best_model = keras.models.load_model(os.path.join(self.config.callbacks.checkpoint_dir,'best_model-%s.hdf5'%self.config.callbacks.checkpoint_monitor),
custom_objects={'precision': precision, 'recall': recall, 'f1': f1})
self.loss.extend(history.history['loss'])
self.acc.extend(history.history['accuracy'])
self.val_loss.extend(history.history['val_loss'])
self.val_acc.extend(history.history['val_accuracy'])
self.precision.extend(history.history['precision'])
self.recall.extend(history.history['recall'])
self.f1.extend(history.history['f1'])
self.val_precision.extend(history.history['val_precision'])
self.val_recall.extend(history.history['val_recall'])
self.val_f1.extend(history.history['val_f1'])
best_model = save_training_logs(self.config.log_dir,history)
self.best_model_train_loss = best_model.loc[0, 'best_model_train_loss']
self.best_model_val_loss = best_model.loc[0, 'best_model_val_loss']
self.best_model_train_acc = best_model.loc[0, 'best_model_train_acc']
self.best_model_val_acc = best_model.loc[0, 'best_model_val_acc']
self.best_model_train_precision = best_model.loc[0, 'best_model_train_precision']
self.best_model_val_precision = best_model.loc[0, 'best_model_val_precision']
self.best_model_train_recall = best_model.loc[0, 'best_model_train_recall']
self.best_model_val_recall = best_model.loc[0, 'best_model_val_recall']
self.best_model_train_f1 = best_model.loc[0, 'best_model_train_f1']
self.best_model_val_f1 = best_model.loc[0, 'best_model_val_f1']
self.best_model_learning_rate = best_model.loc[0, 'best_model_learning_rate']
self.best_model_nb_epoch = best_model.loc[0, 'best_model_nb_epoch']
|
[
"1036758468@qq.com"
] |
1036758468@qq.com
|
a9c097455eea55b0000645d76b287ac5dd604c19
|
765b558714acf20438ff717e57beadd9890fe1be
|
/galcon/groups/migrations/0002_add_superadmins.py
|
9a38e2054a96f7a31b96f2bc6bb1820a9f01808c
|
[] |
no_license
|
marky1991/galcon_clone
|
279cf4ec6adb266f5afabc0a0a61435a52a60119
|
12923b001d593c75934e99ed201627d8767462c2
|
refs/heads/master
| 2020-06-06T20:24:03.684856
| 2013-09-06T15:52:45
| 2013-09-06T15:52:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,824
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field superadmins on 'Group'
m2m_table_name = db.shorten_name('groups_group_superadmins')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['groups.group'], null=False)),
('player', models.ForeignKey(orm['galcon.player'], null=False))
))
db.create_unique(m2m_table_name, ['group_id', 'player_id'])
def backwards(self, orm):
# Removing M2M table for field superadmins on 'Group'
db.delete_table(db.shorten_name('groups_group_superadmins'))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'galcon.player': {
'Meta': {'object_name': 'Player'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friends_rel_+'", 'blank': 'True', 'to': "orm['galcon.Player']"}),
'get_newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['groups.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'post_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rank': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['galcon.Rank']", 'unique': 'True'}),
'registration_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'registration_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'trophies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'players'", 'blank': 'True', 'to': "orm['galcon.Trophy']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'galcon.rank': {
'Meta': {'object_name': 'Rank'},
'classic_rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'flash_rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'fusion_rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iphone_rank': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'galcon.trophy': {
'Meta': {'object_name': 'Trophy'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'object_name': 'Group'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'admined_groups'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['galcon.Player']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '65000'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'join_requires_approval': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'}),
'superadmins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'superadmined_groups'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['galcon.Player']"})
}
}
complete_apps = ['groups']
|
[
"marky1991@gmail.com"
] |
marky1991@gmail.com
|
b54174c4926024c13e88b2eee8c899feaf71d99a
|
cec670ddc7119595183d6c42cecd6bc8e67c7a8c
|
/edumanage/migrations/0002_chg_field_instserver_instid.py
|
0b453925e4067e81d379a34b54b8186c6f2fef57
|
[
"LicenseRef-scancode-mit-taylor-variant",
"ISC"
] |
permissive
|
itminedu/djnro
|
7dd5c5e1c20d613d4d7ba84f33c0f40e7317f3d8
|
06e5ceb38a2d81750e2bbcc1240217befa74abdd
|
refs/heads/master
| 2023-05-28T10:18:45.149917
| 2022-10-10T11:36:35
| 2022-10-10T11:36:35
| 63,783,642
| 0
| 0
|
NOASSERTION
| 2023-05-22T21:34:50
| 2016-07-20T13:27:22
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edumanage', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='instserver',
name='instid',
field=models.ManyToManyField(related_name='servers', to='edumanage.Institution', blank=True),
),
]
|
[
"zmousm@noc.grnet.gr"
] |
zmousm@noc.grnet.gr
|
bc333a1b121f009462583b86bf3a3a8f5f195612
|
484c38ba0fe2f77bbb5b7d97f44b370bda3fd950
|
/keras/model_test.py
|
fb77b79960824a62c5748b82d799cf2d079883b1
|
[] |
no_license
|
AirFishWang/DeepLearning
|
0bb0467e0246438300d38ed20bd15e592ccc0b9f
|
b5a8964c2c0a04046e358237e25cc0f06700db3a
|
refs/heads/master
| 2021-06-17T01:20:54.895160
| 2021-01-21T08:18:18
| 2021-01-21T08:18:18
| 144,096,334
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,067
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: model_test
Description :
Author : wangchun
date: 19-3-2
-------------------------------------------------
Change Activity:
19-3-2:
-------------------------------------------------
"""
import math
import numpy as np
import tensorflow as tf
class PriorProbability(tf.keras.initializers.Initializer):
""" Apply a prior probability to the weights.
"""
def __init__(self, probability=0.01):
self.probability = probability
def get_config(self):
return {
'probability': self.probability
}
def __call__(self, shape, dtype=None, partition_info=None):
# set bias to -log((1 - p)/p) for foreground
dtype = None
result = np.ones(shape, dtype=dtype) * -math.log((1 - self.probability) / self.probability)
return result
def default_regression_model(num_anchors, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'):
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'kernel_initializer': tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
'bias_initializer': 'zeros'
}
inputs = tf.keras.layers.Input(shape=(512, 512, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = tf.keras.layers.Conv2D(
filters=regression_feature_size,
activation='relu',
name='pyramid_regression_{}'.format(i),
**options
)(outputs)
outputs = tf.keras.layers.Conv2D(num_anchors * 4, name='pyramid_regression', **options)(outputs)
outputs = tf.keras.layers.Reshape((-1, 4), name='pyramid_regression_reshape')(outputs)
return tf.keras.models.Model(inputs=inputs, outputs=outputs, name=name)
def default_classification_model(
num_classes,
num_anchors,
pyramid_feature_size=256,
prior_probability=0.01,
classification_feature_size=256,
name='classification_submodel'
):
""" Creates the default regression submodel.
Args
num_classes : Number of classes to predict a score for at each feature level.
num_anchors : Number of anchors to predict classification scores for at each feature level.
pyramid_feature_size : The number of filters to expect from the feature pyramid levels.
classification_feature_size : The number of filters to use in the layers in the classification submodel.
name : The name of the submodel.
Returns
A keras.models.Model that predicts classes for each anchor.
"""
options = {
'kernel_size' : 3,
'strides' : 1,
'padding' : 'same',
}
inputs = tf.keras.layers.Input(shape=(512, 512, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = tf.keras.layers.Conv2D(
filters=classification_feature_size,
activation='relu',
name='pyramid_classification_{}'.format(i),
kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer='zeros',
**options
)(outputs)
outputs = tf.keras.layers.Conv2D(
filters=num_classes * num_anchors,
kernel_initializer=tf.keras.initializers.Zeros(),
bias_initializer=PriorProbability(probability=prior_probability),
name='pyramid_classification',
**options
)(outputs)
# reshape output and apply sigmoid
outputs = tf.keras.layers.Reshape((-1, num_classes), name='pyramid_classification_reshape')(outputs)
outputs = tf.keras.layers.Activation('sigmoid', name='pyramid_classification_sigmoid')(outputs)
return tf.keras.models.Model(inputs=inputs, outputs=outputs, name=name)
if __name__ == "__main__":
model = default_regression_model(9)
model = default_classification_model(1, 9)
|
[
"1013812915@qq.com"
] |
1013812915@qq.com
|
9f9a6bd87a6ee3346f60a3f4b5ef8a1bb49673d5
|
d88acb7dc568463f39bbc1a633fdc5ec84d33bda
|
/Others/HK/photos.py
|
550450167f3de92b07835844fec3a64726a5501f
|
[] |
no_license
|
sarathm09/Hobby_Projects
|
aff0207b382db236725dc6fa8d48907d458774d5
|
375c6a95abf9a4b4b663f7d4d20ec5404237bf11
|
refs/heads/master
| 2020-04-22T03:19:37.445820
| 2015-03-09T17:46:37
| 2015-03-09T17:46:37
| 26,056,072
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
__author__ = 'T90'
__version__ = '1.0.0'
from operator import methodcaller
import urllib
def download():
f = open('fin.csv', 'r').read().split("\n")
ids = map(methodcaller('split',', '), f)
url = "http://www.rajagiritech.ac.in/stud/Photo/"
for i in ids:
urllib.urlretrieve(url + i[0] + ".jpg", "files/" + i[0] + ".jpg")
print "Completed : " + i[0]
print 'Success!!!'
if __name__ == "__main__":
download()
|
[
"sarathm09@gmail.com"
] |
sarathm09@gmail.com
|
4efb213edffd3e7e2785c1cea7974573fdece200
|
cc6e54bf04f664a579ae3a1db0753e3041ba842c
|
/main.py
|
891402d52c00183d207334cd473b0680062e13d1
|
[] |
no_license
|
sabbirDIU-222/python-OOP
|
18e33734bdcdbe0df923251c96bcb65a2dda1549
|
398449c7088e889f3d86eeac7ae2bf4a505e5833
|
refs/heads/main
| 2023-01-06T02:33:16.121261
| 2020-10-26T15:05:53
| 2020-10-26T15:05:53
| 304,537,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# so in this program i do a things that can download image from internet
import random
import urllib.request
# now we make a function that can take the image url and can download
def download_from_web(url):
name = random.randrange(1,1000)
full_name = str(name)+".jpg"
urllib.request.urlretrieve(url,full_name)
download_from_web("http://tinyurl.com/y47elp8p")
|
[
"noreply@github.com"
] |
sabbirDIU-222.noreply@github.com
|
0a2d80bbd9d259005f0beeb8fb68f618e87486c3
|
c66e2b22b4ed179b53012b2847ca9f7bfe5fada4
|
/ComicGetter/pipelines.py
|
6ecc3192ae566bbb4977427e90dfd87a6f5c7638
|
[] |
no_license
|
zaneisdyy/Comic
|
6587b50013dd768a56913f0dd5f72be281112c5e
|
ca6b240c1ef5cba42c652ac9588f0b8fb71452c3
|
refs/heads/master
| 2021-01-23T02:00:17.609308
| 2017-05-31T13:15:53
| 2017-05-31T13:15:53
| 92,903,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ComicgetterPipeline(object):
def process_item(self, item, spider):
return item
|
[
"zaneisdyy@qq.com"
] |
zaneisdyy@qq.com
|
9ab7444ce0ac325bec9b3674b19595013263df40
|
fb04bf32412d470fd642b164086607daa929462c
|
/synthetic/visualization.py
|
f0a9f5ec2f53c482f6fb174ce7358a45d15b3a0c
|
[
"MIT"
] |
permissive
|
irfan-gh/MO-PaDGAN-Optimization
|
348d52e4260a721e699c0829b2f02abc6934d618
|
101d389049e16574dac0b3dbae3a2578d600560c
|
refs/heads/master
| 2023-08-10T06:42:31.372810
| 2021-05-18T04:52:08
| 2021-05-18T04:52:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,276
|
py
|
import numpy as np
from scipy.stats import kde
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
plt.rcParams.update({'font.size': 18})
def plot_data(ax, data=None, gen_data=None, axis_off=True, xlim=None, ylim=None):
if data is not None:
if xlim is None:
xlim = (np.min(data[:,0]), np.max(data[:,0]))
if ylim is None:
ylim = (np.min(data[:,1]), np.max(data[:,1]))
if data is not None:
plt.scatter(data[:,0], data[:,1], marker='o', s=10, c='g', alpha=0.7, edgecolor='none', label='data')
if gen_data is not None:
assert gen_data.shape[1] == 2
plt.scatter(gen_data[:,0], gen_data[:,1], marker='+', s=10, c='b', alpha=0.7, edgecolor='none', label='generated')
plt.legend()
plt.axis('equal')
if axis_off:
plt.axis('off')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
def plot_density(ax, data, func=None, gen_data=None, scatter=False, axis_off=True, xlim=None, ylim=None):
if xlim is None:
xlim = (np.min(data[:,0]), np.max(data[:,0]))
if ylim is None:
ylim = (np.min(data[:,1]), np.max(data[:,1]))
if func is not None:
n = 100
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], n),
np.linspace(ylim[0], ylim[1], n))
grid = np.vstack((xx.ravel(), yy.ravel())).T
val = func(grid)
if func is not None:
if gen_data is not None:
plt.contour(xx, yy, val.reshape(xx.shape), 15, linewidths=0.3, alpha=0.5)
else:
plt.contourf(xx, yy, val.reshape(xx.shape), 15)
plt.colorbar()
if gen_data is not None:
assert gen_data.shape[1] == 2
if scatter:
plt.scatter(gen_data[:,0], gen_data[:,1], marker='o', s=10, c='b', alpha=0.7, edgecolor='none')
else:
# Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
k = kde.gaussian_kde(gen_data.T)
nbins = 20
xi, yi = np.mgrid[xlim[0]:xlim[1]:nbins*1j, ylim[0]:ylim[1]:nbins*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
plt.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap='Blues')
plt.colorbar(label='Density')
plt.axis('equal')
if axis_off:
plt.axis('off')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
def visualize_2d(data, func_obj, gen_data=None, save_path=None, axis_off=True, xlim=None, ylim=None):
assert data.shape[1] == 2
n_obj = func_obj.n_obj
n_subplots = n_obj + 1
fig = plt.figure(figsize=(5, 5*n_subplots))
# Subplot 1
ax = fig.add_subplot(n_subplots, 1, 1)
plot_data(ax, data, gen_data, axis_off, xlim, ylim)
# Subplot 2
for i in range(n_obj):
func = lambda x: func_obj.evaluate(x)[:,i]
ax = fig.add_subplot(n_subplots, 1, i+2)
plot_density(ax, data, func, gen_data, False, axis_off, xlim, ylim)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
plt.close()
else:
plt.show()
|
[
"wchen459@gmail.com"
] |
wchen459@gmail.com
|
92401b9446b5c618b34eb47f9bb3233834103b16
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/l1config/ethernet/txlane/txlane.py
|
0f268662a24c9427a25b1a41aba34180128d4784
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 4,268
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class TxLane(Base):
"""
The TxLane class encapsulates a required txLane resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'txLane'
def __init__(self, parent):
super(TxLane, self).__init__(parent)
@property
def IsSkewSynchronized(self):
"""
Returns:
bool
"""
return self._get_attribute('isSkewSynchronized')
@IsSkewSynchronized.setter
def IsSkewSynchronized(self, value):
self._set_attribute('isSkewSynchronized', value)
@property
def LaneMappingType(self):
"""
Returns:
str(custom|decrement|default|increment|random)
"""
return self._get_attribute('laneMappingType')
@LaneMappingType.setter
def LaneMappingType(self, value):
self._set_attribute('laneMappingType', value)
@property
def MaxSkewVal(self):
"""
Returns:
number
"""
return self._get_attribute('maxSkewVal')
@property
def MinSkewVal(self):
"""
Returns:
number
"""
return self._get_attribute('minSkewVal')
@property
def NoOfLanes(self):
"""
Returns:
number
"""
return self._get_attribute('noOfLanes')
@property
def PcsLane(self):
"""
Returns:
list(number)
"""
return self._get_attribute('pcsLane')
@PcsLane.setter
def PcsLane(self, value):
self._set_attribute('pcsLane', value)
@property
def PhysicalLanes(self):
"""
Returns:
list(str)
"""
return self._get_attribute('physicalLanes')
@property
def Resolution(self):
"""
Returns:
number
"""
return self._get_attribute('resolution')
@property
def SkewValues(self):
"""
Returns:
list(number)
"""
return self._get_attribute('skewValues')
@SkewValues.setter
def SkewValues(self, value):
self._set_attribute('skewValues', value)
@property
def SynchronizedSkewVal(self):
"""
Returns:
number
"""
return self._get_attribute('synchronizedSkewVal')
@SynchronizedSkewVal.setter
def SynchronizedSkewVal(self, value):
self._set_attribute('synchronizedSkewVal', value)
def update(self, IsSkewSynchronized=None, LaneMappingType=None, PcsLane=None, SkewValues=None, SynchronizedSkewVal=None):
"""Updates a child instance of txLane on the server.
Args:
IsSkewSynchronized (bool):
LaneMappingType (str(custom|decrement|default|increment|random)):
PcsLane (list(number)):
SkewValues (list(number)):
SynchronizedSkewVal (number):
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
d2635763148ba5c437dbcc1d1a648147347ebd43
|
4e12169939cef99076d8088ddb6f53066291aaaa
|
/Haldane_model/squarelattice/nn/Edge_nn.py
|
4467b7a1940ced38b23cf0b9e9e87af23cb602ee
|
[] |
no_license
|
saunter999/Simplemodel
|
0c01a088965098bed5d1c9f1d1f7f9b08596b684
|
a8bceee8432d09011effd1d6b85395a70633ac98
|
refs/heads/master
| 2020-04-05T04:33:11.834433
| 2018-11-07T14:07:32
| 2018-11-07T14:07:32
| 156,555,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
#!/usr/bin/env python
from scipy import *
from pylab import *
from numpy import linalg as LA
def Hinc_matrix(k):
Hinc[0,0]=0.0
Hinc[1,1]=0.0
Hinc[0,1]=-2.0*t*cos(k)*exp(-1j*phi)
Hinc[1,0]=conjugate(Hinc[0,1])
def Hacc_matrix(k):
Hacc[0,0]=0.0
Hacc[1,1]=0.0
Hacc[0,1]=-2.0*t*exp(1j*phi)/2.0
Hacc[1,0]=conjugate(Hacc[0,1])
if __name__=='__main__':
N=100
kp=linspace(-pi,pi,N)
Nj=100 #number of chains in y direction
ham=zeros((2*Nj,2*Nj),dtype=complex) #kernal of the Hamiltonian
kxedge=zeros(2*Nj)
Hinc=zeros((2,2),dtype=complex) #Hinc:H_in_chain hamiltonian
Hacc=zeros((2,2),dtype=complex) #Hacc:H_ac_chain hamiltonian
t=1.0
phi=pi/4.
Eigk=zeros((N,2*Nj))
for indk,kx in enumerate(kp):
Hinc_matrix(kx)
Hacc_matrix(kx)
for i in range(Nj):
ham[2*i,2*i]=Hinc[0,0]
ham[2*i,2*i+1]=Hinc[0,1]
ham[2*i+1,2*i]=Hinc[1,0]
ham[2*i+1,2*i+1]=Hinc[1,1]
if (i !=0) and (i !=(Nj-1)):
ham[2*i,2*i-2]=Hacc[0,0]
ham[2*i,2*i-1]=Hacc[0,1]
ham[2*i+1,2*i-2]=Hacc[1,0]
ham[2*i+1,2*i-1]=Hacc[1,1]
ham[2*i,2*i+2]=Hacc[0,0]
ham[2*i,2*i+3]=Hacc[0,1]
ham[2*i+1,2*i+2]=Hacc[1,0]
ham[2*i+1,2*i+3]=Hacc[1,1]
if i==0:
ham[0,2]=Hacc[0,0]
ham[0,3]=Hacc[0,1]
ham[1,2]=Hacc[1,0]
ham[1,3]=Hacc[1,1]
if i==Nj-1:
ham[2*i,2*i-2]=Hacc[0,0]
ham[2*i,2*i-1]=Hacc[0,1]
ham[2*i+1,2*i-2]=Hacc[1,0]
ham[2*i+1,2*i-1]=Hacc[1,1]
Eig,v=LA.eig(ham)
idx=Eig.argsort()
Eig=Eig[idx]
v=v[:,idx]
for i in range(2*Nj):
Eigk[indk,i]=Eig[i].real
for i in range(2*Nj):
plot(kp,Eigk[:,i])
grid()
savefig("edge.png")
show()
|
[
"qhan555@163.com"
] |
qhan555@163.com
|
7b0665c9b189e1806ae352de291937e6dc6eb4d1
|
23270cfeaa5b4bc794594721e1b6ffe4e2507a0e
|
/thread/07threading.py
|
1c6df0b0156be48b13f620cc88b817300f04fe1e
|
[] |
no_license
|
kun0769/studypy
|
d69bd1688378784b22885a9eb8f950f47fd94b65
|
290fc768d6040ccfad5e92e5f7b4d0ced3d54e7c
|
refs/heads/master
| 2021-08-07T20:21:00.975960
| 2020-04-19T14:51:45
| 2020-04-19T14:51:45
| 157,394,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
#!/usr/bin/env python
#coding:utf8
#
#program:
# 使用多个线程处理同一个变量,使其值+1
#
#histoty:
#2019/12/02 kun V1.0
import threading
import time
count=0
class MyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global count
time.sleep(0.5)
lock.acquire()
count +=1
print 'I am %s ,set nount: %s' %(self.name,count)
lock.release()
if __name__=='__main__':
#定义锁对象,让其线程执行是按照顺序来
lock=threading.Lock()
for i in xrange(200):
t=MyThread()
t.start()
|
[
"591619648@qq.com"
] |
591619648@qq.com
|
2fbbff568376cb9da76e1a1ff78a5662c22c7c28
|
2378ee16470fa91fe23fc3b6f6040d7e54735e94
|
/script.py
|
219014760cf4252af387dab62b43e629a64eb4e4
|
[] |
no_license
|
mousaayoubi/gradebook
|
d735115fd89f6fd7d651c9dbca3b67c2be56fe84
|
b6648f81d8256f285ac64b7e65ad6d292ffe5ab9
|
refs/heads/master
| 2020-06-14T02:43:10.646669
| 2019-07-02T13:54:57
| 2019-07-02T13:54:57
| 194,871,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
last_semester_gradebook = [("politics", 80), ("latin", 96), ("dance", 97), ("architecture", 65)]
print(last_semester_gradebook)
#Create a subjects list
subjects = ["physics", "calculus", "poetry", "history"]
subjects.append("computer science")
print(subjects)
#Create a grades list
grades = [98, 97, 85, 88]
grades.append(100)
print(grades)
#Combines subjects and grades list using zip
gradebook = list(zip(subjects, grades))
gradebook.append(("visual arts", 93))
print(gradebook)
#Print Full Gradebook
full_gradebook = last_semester_gradebook + gradebook
print(full_gradebook)
|
[
"mousaayoubi@gmail.com"
] |
mousaayoubi@gmail.com
|
6ee08ae35fc342bfd50bcac10dc9f98a817a191d
|
6f98ca1f44fb7ed02b31d80e1a2e3913291bbe3a
|
/Study/com/codemao/study/state 1/day11 文件和异常/day11.6.py
|
595e0a05b60bca47c366965246f84f2c129b7a95
|
[] |
no_license
|
yyq2012/yushihu
|
982ef7fd1c455213b13b3082d33b23c91ac01f30
|
9e8c85220b6176dc05c595e96e197ab5cefe6b20
|
refs/heads/master
| 2020-05-31T14:24:59.040191
| 2019-06-05T10:39:05
| 2019-06-05T10:39:05
| 190,326,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# 读写二进制文件
# 知道了如何读写文本文件要读写二进制文件也就很简单了,
# 下面的代码实现了复制图片文件的功能。
def main():
try:
with open('image/1.jpg', 'rb') as fs1:
data = fs1.read()
print(type(data)) # <class 'bytes'>
with open('image/1复制来的.jpg', 'wb') as fs2:
fs2.write(data)
except FileNotFoundError as e:
print('指定的文件无法打开.')
except IOError as e:
print('读写文件时出现错误.')
print('程序执行结束.')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
yyq2012.noreply@github.com
|
8f4cfa7569f2a68bdaa03df4ee02267642253997
|
e872b85b3e0897cf1f3f2c5a27d545569f25b8a2
|
/esphome/components/tuya/__init__.py
|
541f10f862d27c230eba7daa0d0f9759ea9a818c
|
[
"GPL-1.0-or-later",
"MIT",
"GPL-3.0-only"
] |
permissive
|
s00500/esphome
|
3b8bab3506c51c5b6b6a6c69f17d29d04877644f
|
51ab0f0b78762a804a567cfb54bdefa15d2ef29d
|
refs/heads/master
| 2021-07-12T05:02:42.732274
| 2020-06-24T01:39:24
| 2020-06-24T01:39:24
| 172,741,405
| 0
| 0
|
MIT
| 2019-02-26T15:47:51
| 2019-02-26T15:47:51
| null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import uart
from esphome.const import CONF_ID
DEPENDENCIES = ['uart']
tuya_ns = cg.esphome_ns.namespace('tuya')
Tuya = tuya_ns.class_('Tuya', cg.Component, uart.UARTDevice)
CONF_TUYA_ID = 'tuya_id'
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(Tuya),
}).extend(cv.COMPONENT_SCHEMA).extend(uart.UART_DEVICE_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield uart.register_uart_device(var, config)
|
[
"otto@otto-winter.com"
] |
otto@otto-winter.com
|
9d7e54738f49cfe013cf1eaeabc8dde21e3d27c1
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_3/tshkgo003/question4.py
|
fa5415ba70ba561ca688fdcd9b4a1abbd9a12026
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
N=eval(input('Enter the starting point N:\n'))
M=eval(input('Enter the ending point M:\n'))
i=N+1
print("The palindromic primes are:")
while i<M:
j=str(i)
jreverse = j[::-1]
i=i+1
if j == jreverse:
f=eval(j)
if f > 1:
for k in range(2,f):
if (f % k) == 0:
break
elif f/k==type(int):
print(f,"isnt a prime number")
else:
print(f)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
75fa5d6024a5577bb3a53206ef19ef912b1575cb
|
6fa0d5d3b61fbce01fad5a7dd50258c09298ee00
|
/Python/FLASK_EX/4th_CHATBOT/set_webhook.py
|
d7a824491adf17f4320106274b83906bc9459375
|
[] |
no_license
|
athletejuan/TIL
|
c8e6bd9f7e2c6f999dbac759adcdb6b2959de384
|
16b854928af2f27d91ba140ebc1aec0007e5eb04
|
refs/heads/master
| 2023-02-19T13:59:06.495110
| 2022-03-23T15:08:04
| 2022-03-23T15:08:04
| 188,750,527
| 1
| 0
| null | 2023-02-15T22:54:50
| 2019-05-27T01:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
import requests
from decouple import config
token = config('TOKEN')
app_url = f'https://api.telegram.org/bot{token}'
ngrok_url = '###'
python_anywhere_url = 'https://juan.pythonanywhere.com/'
ngrok_setting = f'{app_url}/setWebhook?url={ngrok_url}/telegram'
anywhere_setting = f'{app_url}/setWebhook?url={python_anywhere_url}/telegram'
res = requests.get(anywhere_setting)
# print(res.text)
|
[
"vanillasky84.0627@gmail.com"
] |
vanillasky84.0627@gmail.com
|
101287850920a2d6fd015698c178a5b7808a398f
|
c928aad72f4a9334ed141843ba6918d1ab0435e5
|
/jenkins_event.py
|
5bbb331a0550efddad9a47fcc3517cb169a34d65
|
[] |
no_license
|
jordant/jenkins-nagios-event-handler
|
3cccf6031987a0b45c4d65ab338d9173f92451c1
|
98a6577fedc6ab04e5cf53fe6a80e1c7feaf6896
|
refs/heads/master
| 2020-12-24T17:54:59.429706
| 2015-05-11T22:05:16
| 2015-05-11T22:05:16
| 35,451,828
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
import json
import optparse
import sys
from jenkinsapi.jenkins import Jenkins
def parse_options():
parser = optparse.OptionParser()
parser.add_option(
"-j", "--job_name", metavar="jobname", action="store",
type="string", dest="job_name", default=False,
help="name of jenkins job"
)
parser.add_option(
"-b", "--buildarams", metavar="BUILDPARAMS", action="store",
default=False, dest="build_params",
help="job build params in json"
)
parser.add_option(
"--url", metavar="URL", action="store",
default="http://localhost:8080", dest="jenkins_url",
help="Jenkins server URL"
)
parser.add_option(
"--username", metavar="USERNAME", action="store",
default="jenkins", dest="username",
help="Jenkins server username"
)
parser.add_option(
"--password", metavar="PASSWORD", action="store",
default="jenkins", dest="password",
help="Jenkins server password"
)
options = parser.parse_args(sys.argv)[0]
if not options.job_name:
parser.error('job_name not defined')
return options
def build_param_dict():
options = parse_options()
dict = json.loads(options.build_params)
return dict
def get_jenkins():
options = parse_options()
return Jenkins(options.jenkins_url,
username=options.username,
password=options.password)
def invoke_jenkins(job_name, params):
leeroy = get_jenkins()
if (leeroy.has_job(job_name)):
job = leeroy.get_job(job_name)
return job.invoke(build_params=params)
else:
raise("Cannot find job %s" % job_name)
return
def main():
options = parse_options()
invoke_jenkins(options.job_name, build_param_dict())
if __name__ == "__main__":
main()
|
[
"jordan@dreamhost.com"
] |
jordan@dreamhost.com
|
c91e350fa02a1173d2effbf6fd7881fbba5ee7ba
|
904c691e8b973ae9b88974c132125852b82a24b7
|
/aminoacid.py
|
7499fa54f083689aeb5cdf2706eea67ca14d3156
|
[] |
no_license
|
jorisdhondt/GliadinScattering
|
cf3c239662f22a6a9cbaff8415c580e937b34625
|
d9ceb58b6e2077e83a34b734352f6dca05179d35
|
refs/heads/master
| 2020-04-21T23:21:00.245077
| 2019-11-13T06:02:07
| 2019-11-13T06:02:07
| 169,944,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
import math
import random
import math
# Define an aminoacid class
class Aminoacid:
def __init__(self, label, abbrevation,coor, radius):
self.label = label
self.abbreviation = abbrevation
self.coor = coor
self.radius = radius #3 different sizes
self.bridgedAcides = None
self.previousCoor = None
#angles between bonds 120 degrees
#angle needs to be maintained, only doable via rotation
#bonds have a certain length
#sphere are the amino-acid
#disulphide bridge
#bonde stredge model
#hydrophobic energy
#maximum: (r1+r2)*1.2
#minimum: (r1+r2)
#two options:
# - 120 degrees
# - necklace shaped
#50 enzymes
self.epsilon = 1
self.sigma = 2
def getBridgedAcides(self):
return self.bridgedAcides
def setBridgeAcid(self,acid):
self.bridgedAcides = self.bridgedAcides.add(acid)
def revertPosition(self):
self.coor = self.previousCoor
self.previousCoor = None
def getPosition(self):
return self.coor
def getRadius(self):
return self.radius
def getLennartJonesPotential(self,atom):
coor1 = atom.getPosition()
coor2 = self.getPosition()
r = math.sqrt((coor1[0] - coor2[0])**2 + (coor1[1] - coor2[1])**2 + (coor1[2] - coor2[2])**2)
epsilon = 0.5
sigma = 0.2
power = 4*epsilon*((sigma/r)**12 - ((sigma/r))**6)
return power
#lattice new.
def getElectricCharge(self):
if self.label == 'K' or self.label == "R" or self.label == "H":
return 1
elif self.label == "D" or self.label == "E":
return -1
else:
return 0
def translate(self):
self.previousCoor = self.coor
x_shift = random.uniform(-0.1, 0.1)
y_shift = random.uniform(-0.1, 0.1)
z_shift = random.uniform(-0.1, 0.1)
delta = (x_shift,y_shift,z_shift)
self.coor = tuple(sum(t) for t in zip(self.coor, delta))
def rotate(self):
#not_necessary
print("irrelevant")
|
[
"joris.dhondt@gmail.com"
] |
joris.dhondt@gmail.com
|
ac316ea28bb65fe24b53d37558562520af9a4b68
|
06a8947680a4d61e82ef36d2141ab9519ab8c7df
|
/framework/report/header.py
|
3737630f9a563abb23f7ecd584e24256dda413e1
|
[] |
no_license
|
koto/owtf
|
fac846db6508407826027a29f5f98475153f3671
|
53111f07150ee52bab09f4513413cb1131d85820
|
refs/heads/master
| 2021-01-18T06:45:34.753800
| 2012-01-19T06:37:29
| 2012-01-19T06:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,905
|
py
|
#!/usr/bin/env python
'''
owtf is an OWASP+PTES-focused try to unite great tools and facilitate pen testing
Copyright (c) 2011, Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright owner nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The reporter module is in charge of producing the HTML Report as well as provide plugins with common HTML Rendering functions
'''
import os, re, cgi, sys
from framework.lib.general import *
from collections import defaultdict
class Header:
def __init__(self, CoreObj):
self.Core = CoreObj # Keep Reference to Core Object
self.Init = False
def CopyAccessoryFiles(self):
cprint("Copying report images ..")
self.Core.Shell.shell_exec("cp -r "+self.FrameworkDir+"/images/ "+self.TargetOutputDir)
cprint("Copying report includes (stylesheet + javascript files)..")
self.Core.Shell.shell_exec("cp -r "+self.FrameworkDir+"/includes/ "+self.TargetOutputDir)
def DrawRunDetailsTable(self):
Table = self.Core.Reporter.Render.CreateTable({'class' : 'run_log'})
Table.CreateCustomRow('<tr><th colspan="5">Run Log</th></tr>')
Table.CreateRow(['Start', 'End', 'Runtime', 'Command', 'Status'], True)
for line in self.Core.DB.GetData('RUN_DB'):
Start, End, Runtime, Command, Status = line
Table.CreateRow(Table.EscapeCells([Start, End, Runtime, Command, Status]))
return Table.Render()
def GetDBButtonLabel(self, LabelStart, RedFound, NormalNotFound, DBName):
DBLabel = LabelStart
if self.Core.DB.GetLength(DBName) > 0:
DBLabel += RedFound
DBLabel = "<font color='red'>"+DBLabel+"</font>"
else:
DBLabel += NormalNotFound
return DBLabel
def DrawReviewDeleteOptions(self):
return self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([
['Delete THIS Review', 'ClearReview()']
, ['Delete ALL Reviews', "DeleteStorage()"]
], 'DrawButtonJSLink', {})
def DrawReviewMiscOptions(self):
return self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([
['Show Used Memory (KB)', "ShowUsedMem()"]
, ['Show Used Memory (%)', 'ShowUsedMemPercentage()']
, ['Show Debug Window', 'ShowDebugWindow()']
, ['Hide Debug Window', 'HideDebugWindow()']
], 'DrawButtonJSLink', {})
def DrawReviewImportExportOptions(self):
return self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([
['Import Review', "ImportReview()"]
, ['Export Review as text', 'ExportReviewAsText()']
#, ['Export Review as file', 'ExportReviewAsFile()']
#<- Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIDOMHTMLDocument.execCommand]
#[Break On This Error] dlg = execCommand('SaveAs', false, filename+'.txt');
], 'DrawButtonJSLink', {})+'<textarea rows="20" cols="100" id="import_export_box"></textarea>'
def DrawGeneralLogs(self):
return self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([
[ self.GetDBButtonLabel('Errors: ', 'Found, please report!', 'Not found', 'ERROR_DB'), str(self.Core.Config.GetAsPartialPath('ERROR_DB')) ]
, [ self.GetDBButtonLabel('Unreachable targets: ', 'Yes!', 'No', 'UNREACHABLE_DB'), str(self.Core.Config.GetAsPartialPath('UNREACHABLE_DB')) ]
, [ 'Transaction Log (HTML)', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_HTML') ]
#, [ 'All Downloaded Files', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_FILES') ]
, [ 'All Downloaded Files - To be implemented', '#' ]
, [ 'All Transactions', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_TRANSACTIONS') ]
, [ 'All Requests', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_REQUESTS') ]
, [ 'All Response Headers', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_RESPONSE_HEADERS') ]
, [ 'All Response Bodies', self.Core.Config.GetAsPartialPath('TRANSACTION_LOG_RESPONSE_BODIES') ]
], 'DrawButtonLink', {}) # {} avoid nasty python issue where it keeps a reference to the latest attributes before
def DrawURLDBs(self, DBPrefix = ""):
return self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([
['All URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'ALL_URLS_DB')]
, ['File URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'FILE_URLS_DB')]
, ['Fuzzable URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'FUZZABLE_URLS_DB')]
, ['Image URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'IMAGE_URLS_DB')]
, ['Error URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'ERROR_URLS_DB')]
, ['External URLs', self.Core.Config.GetAsPartialPath(DBPrefix+'EXTERNAL_URLS_DB')]
], 'DrawButtonLink', {})
def DrawFilters(self):
return self.Core.Reporter.DrawCounters(True)
def AddMiscelaneousTabs(self, Tabs):
Tabs.AddCustomDiv('Miscelaneous:') # First create custom tab, without javascript
Tabs.AddDiv('exploit', 'Exploitation', self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([ ['Hackvertor', 'http://hackvertor.co.uk/public'] , [ 'Hackarmoury', 'http://hackarmoury.com/' ], ['ExploitDB', 'http://www.exploit-db.com/'] , ['ExploitSearch', 'http://www.exploitsearch.net'], [ 'hackipedia', 'http://www.hakipedia.com/index.php/Hakipedia' ] ], 'DrawButtonLink'))
Tabs.AddDiv('methodology', 'Methodology', self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([ ['OWASP', 'https://www.owasp.org/index.php/OWASP_Testing_Guide_v3_Table_of_Contents'] , ['Pentest Standard', 'http://www.pentest-standard.org/index.php/Main_Page'], ['OSSTMM', 'http://www.isecom.org/osstmm/'] ], 'DrawButtonLink'))
Tabs.AddDiv('calculators', 'Calculators', self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([ ['CVSS Advanced', 'http://nvd.nist.gov/cvss.cfm?adv&calculator&version=2'] , ['CVSS Normal', 'http://nvd.nist.gov/cvss.cfm?calculator&version=2'] ], 'DrawButtonLink'))
Tabs.AddDiv('learn', 'Test/Learn', self.Core.Reporter.Render.DrawLinkPairsAsHTMLList([ [ 'taddong', 'http://blog.taddong.com/2011/10/hacking-vulnerable-web-applications.html' ], [ 'securitythoughts', 'http://securitythoughts.wordpress.com/2010/03/22/vulnerable-web-applications-for-learning/' ] , [ 'danielmiessler', 'http://danielmiessler.com/projects/webappsec_testing_resources/'] ], 'DrawButtonLink'))
def DrawOWTFBox(self):
OWTF = self.Core.Reporter.Render.CreateTable( { 'class' : 'report_intro' } )
OWTF.CreateRow( [ 'Seed', 'Review Size', 'Total Size', 'Version', 'Site' ], True)
OWTF.CreateRow( [ '<span id="seed">'+self.Core.GetSeed()+'</span>', '<div id="js_db_size"></div>', '<div id="total_js_db_size"></div>', self.Version, self.Core.Reporter.Render.DrawButtonLink('owtf.org', 'http://owtf.org') ] )
return '<div style="position: absolute; top: 6px; right: 6px; float: right">'+OWTF.Render()+'</div>'
def DrawBackToSummaryIcon(self):
return self.Core.Reporter.Render.DrawLink(self.Core.Reporter.DrawImageFromConfigPair( [ 'FIXED_ICON_TO_SUMMARY', 'NAV_TOOLTIP_TO_SUMMARY' ] ), self.Core.Config.Get('HTML_REPORT'), { 'target' : '' } )
def ExpandDetailedReportJS(self):
return "window.parent.document.getElementById('iframe_"+self.Core.Config.Get('REVIEW_OFFSET')+"').style.height = '100%';"
def CollapseDetailedReportJS(self):
return "var iframe = window.parent.document.getElementById('iframe_"+self.Core.Config.Get('REVIEW_OFFSET')+"'); iframe.style.height = '"+self.Core.Config.Get('COLLAPSED_REPORT_SIZE')+"'; window.location.hash = ''; window.parent.location.hash = '';"
def AnalyseDetailedReportJS(self):
return self.ExpandDetailedReportJS() + "window.parent.location.hash = 'anchor_"+self.Core.Config.Get('REVIEW_OFFSET')+"';"
def DrawURLTop(self, Embed = ''):
AlternativeIPsStr = ""
if len(self.AlternativeIPs) > 0:
AlternativeIPsStr = " [Alternative IPs: "+", ".join(self.AlternativeIPs)+"]"
Target = self.Core.Reporter.Render.CreateTable({'class' : 'report_intro'})
# Target.CreateRow( [ 'Target URL', 'Target IP(s)', ' ' ], True)
# Target.CreateRow( [ self.Core.Reporter.Render.DrawButtonLink(self.TargetURL, self.TargetURL, {'id' : 'target_url'}), self.HostIP+AlternativeIPsStr, self.DrawBackToSummaryIcon() ] )
Icons = " " * 1 + (" " * 1).join(self.Core.Reporter.Render.DrawLinkPairs( [
[ self.Core.Reporter.DrawImageFromConfigPair( [ 'FIXED_ICON_ANALYSE_REPORT', 'NAV_TOOLTIP_ANALYSE_REPORT' ]), self.AnalyseDetailedReportJS() ],
[ self.Core.Reporter.DrawImageFromConfigPair( [ 'FIXED_ICON_EXPAND_REPORT', 'NAV_TOOLTIP_EXPAND_REPORT' ]), self.ExpandDetailedReportJS() ],[ self.Core.Reporter.DrawImageFromConfigPair( [ 'FIXED_ICON_CLOSE_REPORT', 'NAV_TOOLTIP_CLOSE_REPORT' ]), self.CollapseDetailedReportJS() ] ], 'DrawButtonJSLink', { 'class' : 'icon' }))
Target.CreateCustomRow('<tr><th>'+self.Core.Reporter.Render.DrawButtonLink(self.TargetURL, self.TargetURL, {'id' : 'target_url'})+'</th><td>'+self.HostIP+AlternativeIPsStr+'</td><td class="disguise">'+Icons+'</td></tr>')
#<td class="disguise">'+Embed+'</td>
#return '<div style="display:inline; align:left; position:fixed; top:0px; z-index:0; opacity:1; background-color:red; width:100%;">'+self.WrapTop(Target.Render())
Content = Target.Render() + '<div style="position: absolute; top: 6px; right: 6px; float: right;">'+Embed+'</div>'
return '<div class="detailed_report">'+self.WrapTop(Content)+'</div>' + '<br />' * 2
#return self.WrapTop(Target.Render())
def WrapTop(self, LeftBoxStr):
Output = '<div style="display: inline; align: left">'+LeftBoxStr+'</div>'
PrePad = PostPad = ''
if self.ReportType != 'NetMap':
PrePad = "<div style='display:none;'>"
PostPad = "</div>"
Output += PrePad+self.DrawOWTFBox()+PostPad
return Output
def DrawTop(self, Embed = ''):
if self.ReportType == 'URL':
return self.DrawURLTop(Embed)
elif self.ReportType == 'NetMap':
return self.WrapTop('<h2>Summary Report</h2><br />')
elif self.ReportType == 'AUX':
return self.WrapTop('<h2>Auxiliary Plugins '+self.DrawBackToSummaryIcon()+'</h2>')
def GetJavaScriptStorage(self): # Loads the appropriate JavaScript library files depending on the configured JavaScript Storage
Libraries = []
for StorageLibrary in self.Core.Config.Get('JAVASCRIPT_STORAGE').split(','):
Libraries.append('<script type="text/javascript" src="includes/'+StorageLibrary+'"></script>')
return "\n".join(Libraries)
def Save(self, Report, Options):
self.TargetOutputDir, self.FrameworkDir, self.Version, self.TargetURL, self.HostIP, self.TransactionLogHTML, self.AlternativeIPs = self.Core.Config.GetAsList(['OUTPUT_PATH', 'FRAMEWORK_DIR', 'VERSION', 'TARGET_URL', 'HOST_IP', 'TRANSACTION_LOG_HTML', 'ALTERNATIVE_IPS'])
self.ReportType = Options['ReportType']
if not self.Init:
self.CopyAccessoryFiles()
self.Init = True # The report is re-generated several times, this ensures images, stylesheets, etc are only copied once at the start
with open(self.Core.Config.Get(Report), 'w') as file:
ReviewTabs = self.Core.Reporter.Render.CreateTabs()
ReviewTabs.AddDiv('review_import_export', 'Import/Export', self.DrawReviewImportExportOptions())
ReviewTabs.AddDiv('review_delete', 'Delete', self.DrawReviewDeleteOptions())
ReviewTabs.AddDiv('review_miscelaneous', 'Miscelaneous', self.DrawReviewMiscOptions())
ReviewTabs.CreateTabs()
ReviewTabs.CreateTabButtons()
Tabs = self.Core.Reporter.Render.CreateTabs()
Tabs.AddDiv('filter', 'Filter', self.DrawFilters())
Tabs.AddDiv('review', 'Review', ReviewTabs.Render())
Tabs.AddDiv('runlog', 'History', self.DrawRunDetailsTable())
LogTable = self.Core.Reporter.Render.CreateTable({ 'class' : 'run_log' })
LogTable.CreateRow(['General', 'Verified URLs', 'Potential URLs'], True)
LogTable.CreateRow([self.DrawGeneralLogs(), self.DrawURLDBs(), self.DrawURLDBs("POTENTIAL_")])
Tabs.AddDiv('logs', 'Logs', LogTable.Render())
BodyAttribsStr = ""
if self.ReportType == 'NetMap':
self.AddMiscelaneousTabs(Tabs)
BodyAttribsStr = ' style="overflow-x:hidden;"'
Tabs.CreateTabs() # Now create the tabs from Divs Above
Tabs.CreateTabButtons() # Add navigation buttons to the right
if self.ReportType != 'NetMap': # Embed tabs in detailed report header
RenderTopStr = self.DrawTop(Tabs.RenderTabs()) # Embed Tabs in Top div
TabsStr = Tabs.RenderDivs() # Render Divs below
else: # Normal tab render
RenderTopStr = self.DrawTop()
TabsStr = Tabs.Render()
file.write("""<html>
<head>
<title>"""+Options['Title']+"""</title>
<link rel="stylesheet" href="includes/stylesheet.css" type="text/css">
<link rel="stylesheet" href="includes/jquery-ui-1.9m6/themes/base/jquery.ui.all.css">
</head>
<body"""+BodyAttribsStr+""">\n
"""+RenderTopStr+"""
"""+TabsStr+"""
<script type="text/javascript" src="includes/jquery-1.6.4.js"></script>\n
<script type="text/javascript" src="includes/owtf.js"></script>\n
<script type="text/javascript" src="includes/jsonStringify.js"></script>\n
"""+self.GetJavaScriptStorage()+"""
""") # Init HTML Report
|
[
"abraham.aranguren@gmail.com"
] |
abraham.aranguren@gmail.com
|
11b329a87220afc8ca33490ff4030ba5f721225c
|
6a924239453663349e32c52632e780cc4bd4b7ba
|
/project_1/homework_1_1.py
|
9c157fd57f98642ce8b8dd5fc62280f9231e57f9
|
[] |
no_license
|
Czzzzzzzz/ML_Projects
|
18ba0d57c651d0ff2d60aa054312cea2e33c7d83
|
bfe79a9af1e8f15143ace094f3ccbb0530c5b8e6
|
refs/heads/master
| 2021-10-23T21:22:51.338483
| 2019-03-20T06:21:59
| 2019-03-20T06:21:59
| 176,668,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,418
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 27 21:30:24 2018
@author: zhengcao
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from DrawingTool import DrawingTool
from FileHandler import FileHandler
from DataHandler import DataHandler
def draw_scatter_matrix(data, label):
drawingTool = DrawingTool()
label_name = ['var of WT img', 'var of WT img', 'curtosis of WT img', 'entropy of img']
fig = drawingTool.scatterplot_matrix_multiclass(np.transpose(data), label, ['r', 'b'], label_name,
marker='+')
fig.suptitle('Simple Scatterplot Matrix')
plt.show()
def draw_box_plots(data, labels):
data_handler = DataHandler()
separated_data = data_handler.separateDataByLabel(data, labels)
label_name = [ "class " + str(int(label)) for label in np.unique(labels)]
colors = ['lightblue', 'lightgreen']
variable_name = ['var of WT img', 'var of WT img', 'curtosis of WT img', 'entropy of img']
for i_variables, var_name in zip(range(data.shape[1]), variable_name):
data_for_i_var = [separated_data[i_labels][:, i_variables] for i_labels in range(np.unique(labels).shape[0])]
plt.figure(i_variables, figsize=(8, 8))
plt.ylabel(var_name)
bplot = plt.boxplot(data_for_i_var, labels=label_name, patch_artist=True)
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
def knn_plot_accuracy(X, y_train_accuracy, y_test_accuracy):
plt.figure(figsize=(8, 7))
# plt.plot(ks_inverse, train_accuracy)
# plt.plot(ks_inverse, test_accuracy)
plt.plot(X, y_train_accuracy, label='train error')
plt.plot(X, y_test_accuracy, label='test error')
plt.legend(loc=2, bbox_to_anchor=(1.05, 1))
plt.ylabel("error rate")
plt.xlabel("k")
def knn_plot_learn_curve(X, y):
plt.figure(figsize=(8, 7))
plt.plot(X, y)
plt.ylabel('test error')
plt.xlabel("N")
plt.show()
def knn_train(train_data, test_data, train_label, test_label):
"""
Train the model by KNN. Gain the optimal k ranging from the interval [1, train_data.shape[0]]
and lowest test error
Parameters
----------
train_data
test_data
train_label
test_label
Returns
-------
the lowest test error
"""
ks = np.arange(1, train_data.shape[0], 40)
# ks = np.arange(1, 20, 3)
train_accuracy = np.array([])
test_accuracy = np.array([])
for k in ks:
knn = KNeighborsClassifier(n_neighbors=k, metric='euclidean')
knn.fit(train_data, train_label)
train_acc = 1 - accuracy_score(train_label, knn.predict(train_data))
test_acc = 1 - accuracy_score(test_label, knn.predict(test_data))
train_accuracy = np.append(train_accuracy, train_acc)
test_accuracy = np.append(test_accuracy, test_acc)
# print(train_accuracy)aa
# print(test_accuracy)
print("k={0} : train error {1}, test error {2}".format(k, train_acc, test_acc))
# ks_inverse = np.reciprocal(ks, dtype='float')
# print(ks_inverse)
# knn_plot_accuracy(k, train_accuracy, test_accuracy)
optimal_k = ks[np.argmin(test_accuracy)]
# return optimal_k
return np.min(test_accuracy), optimal_k
def knn_compute_confusion(train_data, test_data, train_label, test_label, optimal_k):
knn = KNeighborsClassifier(n_neighbors=optimal_k, metric='euclidean')
knn.fit(train_data, train_label)
predicted_test_label = knn.predict(test_data)
confusion_mat = confusion_matrix(test_label, predicted_test_label)
tn, fp, fn, tp = confusion_mat.ravel()
print('Confusion Matrix:\n')
print(confusion_mat)
tp_rate = float(tp) / (tp + fn)
tn_rate = float(tn) / (tn + fp)
precision = float(tp) / (tp + fp)
f1 = f1_score(test_label, predicted_test_label)
print(' true positive rate: {0}\n true negative rate: {1}\n precision: {2}\n f1-score: {3}'.format(tp_rate, tn_rate, precision, f1))
def argmin_last(np_array):
min_val = np_array[0]
index = 0
for i, v in enumerate(np_array):
if v <= min_val:
index = i
return index
def knn_train_metrics(train_data, test_data, train_label, test_label):
metrics = ['manhattan', 'mahalanobis']
metric_parameters = [None, {'V': np.cov(train_data)}]
ks = np.arange(1, 902, 10)
manhattan_test_error = np.array([])
# mahalanobis_test_error = np.array([])
for k in ks:
knn = KNeighborsClassifier(n_neighbors=k, metric='manhattan')
knn.fit(train_data, train_label)
predicted_label = knn.predict(test_data)
test_error = 1 - accuracy_score(test_label, predicted_label)
manhattan_test_error = np.append(manhattan_test_error, test_error)
# for metric, metric_para in zip(metrics, metric_parameters):
# knn = KNeighborsClassifier(n_neighbors=k, metric=metric, metric_params=metric_para)
# knn.fit(train_data, train_label)
# predicted_label = knn.predict(test_data)
# test_error = 1 - accuracy_score(test_label, predicted_label)
# if test_error == metrics[0]:
# manhattan_test_error = np.append(manhattan_test_error, test_error)
# else:
# mahalanobis_test_error = np.append(mahalanobis_test_error, test_error)
plt.figure()
plt.plot(ks, manhattan_test_error, label='manhattan')
plt.legend(loc=2, bbox_to_anchor=(1.05, 1))
# plt.plot(ks, mahalanobis_test_error, label='mahalanobis')
plt.xlabel('K')
plt.ylabel('test error')
optimal_k = ks[argmin_last(manhattan_test_error)]
ps = np.power([10]*10, np.arange(0.1, 1.1, 0.1))
minkowski_test_error = np.array([])
for p in ps:
knn = KNeighborsClassifier(n_neighbors=optimal_k, metric='minkowski', metric_params={'p': p})
knn.fit(train_data, train_label)
predicted_label = knn.predict(test_data)
test_error = 1 - accuracy_score(test_label, predicted_label)
minkowski_test_error = np.append(minkowski_test_error, test_error)
plt.figure()
plt.plot(ps, minkowski_test_error, label='minkowski')
plt.legend(loc=2, bbox_to_anchor=(1.05, 1))
plt.show()
optimal_k = ps[np.argmin(minkowski_test_error)]
print("optimal k: {0}".format(optimal_k))
def main():
fileHandler = FileHandler()
data_and_label = fileHandler.readData("data_banknote_authentication.txt")
data, labels = data_and_label[:, :-1], np.array(data_and_label[:, -1], dtype=int)
# draw_scatter_matrix(data, label)
# draw_box_plots(data, labels)
data_handler = DataHandler()
train_data, test_data, train_label, test_label = data_handler.split_train_test(data, labels, 200)
'''
To draw learning curve
'''
# N = np.arange(50, 901, 50)
# test_errors = np.array([])
# for n in N:
# print("N: {0}".format(n / 2))
# _, train_data_small, _, train_label_small = data_handler.split_train_test(train_data, train_label, int(n / 2))
# print(train_data_small.shape, test_data.shape, train_label_small.shape, test_label.shape)
#
# lowest_test_error, _ = knn_train(train_data_small, test_data, train_label_small, test_label)
# test_errors = np.append(test_errors, lowest_test_error)
#
# print(test_errors)
# knn_plot_learn_curve(N, test_errors)
# Given a optimal k and corresponding data, compute the confusion matrix.
# optimal_k = 1
# knn_compute_confusion(train_data, test_data, train_label, test_label, optimal_k)
'''
To try out different metrics
'''
print(train_data.shape, train_label.shape)
knn_train_metrics(train_data, test_data, train_label, test_label)
def test():
X = np.array([[1., 2, 3],[ 2., 5, 6], [2., 12, 333], [12., 23, 34],[ 2., 55, 6], [2., 12, 333]])
label = np.array([1, 1, 5, 0, 0, 0])
# plt.figure()
# plt.boxplot(a)
# dic = {label: a[]}
# data = a[:, 1:]
# labels = a[:, 0]
# print(labels.shape)
# m = DataHandler.separateDataByLabel(data, labels)
# a = np.random.permutation(6)
# class_counts = np.bincount(label)
# print(np.cumsum(class_counts)[0:-1])
# shuffled_label = label.take(a)
# print(shuffled_label)
# print(np.random.permutation(label))
# X_train, X_test, y_train, y_test = train_test_split(X, label, stratify=label, test_size=2, shuffle=False)
# print(X_train, X_test, y_train, y_test)
# a = []
# a.extend([[1, 2], [2, 3]])
# a.extend([[2, 3], [5, 6]])
# print(confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]))
# print(a)
x = np.arange(0, 5, 1)
y = np.arange(2, 6, 0.5)
xx, yy = np.meshgrid(x, y)
# print(np.c_[xx.ravel(), yy.ravel()])
# m = [None, ]
# a = {'a': 3}
# print(a)
if __name__ == "__main__":
# test()
main()
# print(argmin_last([0, 0, 0, 3]))
|
[
"czfightinglucky@gmail.com"
] |
czfightinglucky@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.