hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
889ac7ffd9a005060bb43a97aad62ce181ad364f | 1,608 | py | Python | lang/python/bottle/todo/todoone.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 8 | 2015-06-07T13:25:48.000Z | 2022-03-22T23:14:50.000Z | lang/python/bottle/todo/todoone.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 30 | 2016-01-29T01:36:41.000Z | 2018-09-19T07:01:22.000Z | lang/python/bottle/todo/todoone.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | null | null | null | from bottle import route, run, debug, template, request
import sqlite3
def pretty_print_POST(req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
print ("----")
print(req.body.getvalue())
print ("----")
items = {1: 'first item', 2: 'second item'}
@route('/new', method="GET")
recent10 = "SELECT * FROM todo ORDER BY id DESC LIMIT 10;"
@route('/')
debug(True)
run(host='0.0.0.0', port=8080, reloader=True)
| 27.724138 | 74 | 0.574627 | from bottle import route, run, debug, template, request
import sqlite3
def pretty_print_POST(req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
print ("----")
print(req.body.getvalue())
print ("----")
items = {1: 'first item', 2: 'second item'}
@route('/new', method="GET")
def new_item():
print request
# req = request.prepare()
pretty_print_POST(request)
if request.GET.get('task').strip():
new = request.GET.get('task', '').strip()
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute('INSERT INTO todo (task,status) VALUES (?,?)', (new, 1))
newid = c.lastrowid
conn.commit()
c.close()
return '{"id": %s, "task": "%s"}' % (newid, new)
else:
return "nothing"
recent10 = "SELECT * FROM todo ORDER BY id DESC LIMIT 10;"
@route('/')
def index():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
# c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
c.execute(recent10)
result = c.fetchall()
c.close()
output = template('todone', rows=result)
return output
debug(True)
run(host='0.0.0.0', port=8080, reloader=True)
| 711 | 0 | 44 |
919d3ccaca0399f9adb321600f7cfa64da99a4b2 | 6,262 | py | Python | python/deal_10_fq_list.py | FireflyTang/WoQu | 9f1763b1971e8fce99d123584e803fac36821756 | [
"MIT"
] | null | null | null | python/deal_10_fq_list.py | FireflyTang/WoQu | 9f1763b1971e8fce99d123584e803fac36821756 | [
"MIT"
] | null | null | null | python/deal_10_fq_list.py | FireflyTang/WoQu | 9f1763b1971e8fce99d123584e803fac36821756 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
#!/usr/bin/python
from dealctrl import *
| 33.666667 | 97 | 0.454168 | # -*- coding: utf-8 -*
#!/usr/bin/python
from dealctrl import *
class deal_10_pb_list(dealctrl):
def __init__(self,con):
dealctrl.__init__(self,con)
def run(self):
userid=int(self.recvdic['userid'])
time=int(self.recvdic['time'])
timelimit=''
if(time):
timelimit=('AND fq_dateline<=%d' % (time))
sql=('''SELECT * FROM
(SELECT * FROM `member_pb` WHERE pb_userid=%d %s LIMIT 10) AS a
LEFT JOIN
`activity_list` AS b
ON a.pb_aid=b.aid
LEFT JOIN
`member_userinfo` AS c
ON b.fqruserid=c.userid
LEFT JOIN
(SELECT * FROM `member_sc` WHERE sc_userid=%d) AS d
ON a.pb_aid=d.sc_aid
LEFT JOIN
(SELECT * FROM `activity_dz` WHERE dz_userid=%d) AS e
ON a.pb_userid=e.dz_userid
LEFT JOIN
(SELECT * FROM `activity_apply` WHERE userid=%d) as f
ON a.pb_aid=f.userid
ORDER BY pb_dateline DESC
LIMIT 10''' % (userid,timelimit,userid,userid,userid))
self.log.write("sql: %s\n" % sql)
self.db.execute(sql)
db_re_rowcount=self.db.rowcount
db_re=self.db.fetchall()
amodifytime=[]
umodifytime=[]
aid=[]
userid=[]
for i in db_re:
amodifytime.append(str(i['amodifytime']))
umodifytime.append(str(i['amodifytime']))
aid.append(str(i['aid']))
userid.append(str(i['userid']))
amodifytime=','.join(amodifytime)
umodifytime=','.join(umodifytime)
aid=','.join(aid)
userid=','.join(userid)
senddic={
'type':'10_pb_list_r',
'count':db_re_rowcount,
'aid':aid,
'userid':userid,
'amodifytime':amodifytime,
'umodifytime':umodifytime
}
self.sendmessage(senddic)
dic=self.getmessage()
uorder=dic['uorder']
aorder=dic['aorder']
if(aorder):
aorder=aorder.split(',')
for i in aorder:
i=int(i)
ainfo=db_re[i]
form=ainfo['form']
title=ainfo['title']
starttime=ainfo['starttime']
lasttime=ainfo['lasttime']
publishtime=ainfo['inserttime']
aid=ainfo['aid']
place=ainfo['place']
description=ainfo['description']
fqruserid=ainfo['fqruserid']
dz=ainfo['dz']
peopleneed=ainfo['peopleneed']
peopleapply=ainfo['peopleapply']
peoplein=ainfo['peoplein']
ispb=1
if(ainfo['sc_dateline']):
issc=1
else:
issc=0
if(ainfo['dz_dateline']):
isdz=1
else:
isdz=0
if(ainfo['applytime']):
isapply=1
else:
isapply=0
if(ainfo['isin']=='1'):
isapply=0
isin=1
else:
isin=0
pbtime=ainfo['pb_dateline']
applymd5=-1
inmd5=-1
applyid=-1
inid=-1
if(peopleapply):
sql=('''SELECT group_concat(portraitmd5),group_concat(userid) FROM
(SELECT * FROM (SELECT * FROM `activity_apply` WHERE aid=%d AND isin=0) LIMIT 3) AS c''' % aid)
self.log.write("sql: %s\n" % sql)
self.db.execute(sql)
db_re=self.db.fetchone()
applymd5=db_re['group_concat(portraitmd5)']
applyid=db_re['group_concat(userid)']
if(peoplein):
sql=('''SELECT group_concat(portraitmd5),group_concat(userid) FROM
( SELECT * FROM (SELECT * FROM `activity_apply` WHERE aid=%d AND isin=1) LIMIT 3 ) AS c''' % aid)
self.log.write("sql: %s\n" % sql)
self.db.execute(sql)
db_re=self.db.fetchone()
inmd5=db_re['group_concat(portraitmd5)']
inid=db_re['group_concat(userid)']
senddic={
'type':'10_pb_list_a',
'form':form,
'title':title,
'starttime':starttime,
'lasttime':lasttime,
'publishtime':publishtime,
'aid':aid,
'place':place,
'des':description,
'userid':fqruserid,
'dz':dz,
'neednum':peopleneed,
'applynum':peopleapply,
'innum':peoplein,
'ispb':ispb,
'issc':issc,
'isdz':isdz,
'applymd5':applymd5,
'applyid':applyid,
'inmd5':inmd5,
'inid':inid,
'isin':isin,
'isapply':isapply,
'pbtime':pbtime
}
self.sendmessage(senddic)
if(uorder):
for i in uorder:
i=int(i)
uinfo=db_re[i]
fqruserid=uinfo['fqruserid']
username=uinfo['username']
portraitmd5=uinfo['portraitmd5']
gender=uinfo['gender']
department=uinfo['department']
year=uinfo['year']
edutype=uinfo['edutype']
mobile=uinfo['mobile']
mail=uinfo['mail']
personalsign=uinfo['personalsign']
senddic={
'type':'type=10_pb_list_u',
'userid':fqruserid,
'username':username,
'portraitmd5':portraitmd5,
'gender':gender,
'department':department,
'year':year,
'edutype':edutype,
'mobile':mobile,
'mail':mail,
'personalsign':personalsign
}
self.sendmessage(senddic)
return 1
| 6,111 | 11 | 75 |
63cb909a668dcccf26c627d74673ec61d29d34cb | 714 | py | Python | bot.py | Snowcola/simbot | 9d7b04bb46a85d5ffdb57a1c725ca0def92442e8 | [
"MIT"
] | null | null | null | bot.py | Snowcola/simbot | 9d7b04bb46a85d5ffdb57a1c725ca0def92442e8 | [
"MIT"
] | null | null | null | bot.py | Snowcola/simbot | 9d7b04bb46a85d5ffdb57a1c725ca0def92442e8 | [
"MIT"
] | null | null | null | import os
import discord
import asyncio
import logging
from discord.ext import commands
from simc import SimC
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(
filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(
logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
TOKEN = os.environ.get("DISCORD_TOKEN")
bot = commands.Bot(
command_prefix=commands.when_mentioned_or('!'),
description='Quick sims in discord')
bot.add_cog(SimC(bot, "C:\Simulationcraft(x64)\simc"))
@bot.event
bot.run(TOKEN)
| 22.3125 | 73 | 0.726891 | import os
import discord
import asyncio
import logging
from discord.ext import commands
from simc import SimC
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(
filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(
logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
TOKEN = os.environ.get("DISCORD_TOKEN")
bot = commands.Bot(
command_prefix=commands.when_mentioned_or('!'),
description='Quick sims in discord')
bot.add_cog(SimC(bot, "C:\Simulationcraft(x64)\simc"))
@bot.event
async def on_ready():
print(f'Logged in as:\n{bot.user} (ID: {bot.user.id})')
bot.run(TOKEN)
| 60 | 0 | 22 |
ad60524a29920db31667f14d371b0c8aa8f3674f | 756 | py | Python | polytester/parsers/protractor.py | skoczen/polytester | c32c99aa61eb4dcfd2b3f6860b5d9d342a7ecfa8 | [
"MIT"
] | 115 | 2015-01-23T13:37:37.000Z | 2020-11-16T09:40:53.000Z | polytester/parsers/protractor.py | skoczen/polytester | c32c99aa61eb4dcfd2b3f6860b5d9d342a7ecfa8 | [
"MIT"
] | 18 | 2015-01-21T14:13:14.000Z | 2021-03-25T21:38:07.000Z | polytester/parsers/protractor.py | skoczen/polytester | c32c99aa61eb4dcfd2b3f6860b5d9d342a7ecfa8 | [
"MIT"
] | 11 | 2015-01-28T19:43:37.000Z | 2017-06-30T13:20:24.000Z | import re
from .default import DefaultParser
| 27 | 97 | 0.599206 | import re
from .default import DefaultParser
class ProtractorParser(DefaultParser):
name = "protractor"
def command_matches(self, command):
return "protractor" in command
def num_passed(self, result):
return self.num_total(result) - self.num_failed(result)
def num_total(self, result):
# 2 tests, 3 assertions, 1 failure
m = re.findall('(\d+) tests?, (\d+) assertions?, (\d+) failures?', result.cleaned_output)
if len(m) > 0:
return int(m[-1][1])
def num_failed(self, result):
# 2 tests, 3 assertions, 1 failure
m = re.findall('(\d+) tests?, (\d+) assertions?, (\d+) failures?', result.cleaned_output)
if len(m) > 0:
return int(m[-1][-1])
| 537 | 149 | 23 |
5eff4f616e1bf74af785abd760f50d2a582846ad | 829 | py | Python | mark_blocks.py | m0t/ida-scripts | 17124a4dfc869064a2b44ba89047d03ab4157230 | [
"MIT"
] | 5 | 2015-03-21T05:48:22.000Z | 2016-12-04T13:35:48.000Z | mark_blocks.py | m0t/ida-scripts | 17124a4dfc869064a2b44ba89047d03ab4157230 | [
"MIT"
] | null | null | null | mark_blocks.py | m0t/ida-scripts | 17124a4dfc869064a2b44ba89047d03ab4157230 | [
"MIT"
] | null | null | null | '''
@author: m0t
'''
#search for blocks colored purple(0x9933cc) and creates a disabled breakpoint at the start of each.
#To be used with process stalker to immediately see "interesting" blocks
from idc import *
from idautils import *
purple = 0x9933cc #our definition of purple...
#get start address of each function, scan it for purple, setbreakpoint()
funit = Functions()
prevFlag = False
while True:
try:
faddr = funit.next()
except StopIteration:
break
itemsit = FuncItems(faddr)
while True:
try:
item = itemsit.next()
except StopIteration:
break
if GetColor(item, 1) == purple and prevFlag == False:
AddBpt(item)
EnableBpt(item, False)
prevFlag = True
#resetting the flag when we go out of "interesting" block
if GetColor(item, 1) != purple and prevFlag == True:
prevFlag = False
| 23.027778 | 99 | 0.714113 | '''
@author: m0t
'''
#search for blocks colored purple(0x9933cc) and creates a disabled breakpoint at the start of each.
#To be used with process stalker to immediately see "interesting" blocks
from idc import *
from idautils import *
purple = 0x9933cc #our definition of purple...
#get start address of each function, scan it for purple, setbreakpoint()
funit = Functions()
prevFlag = False
while True:
try:
faddr = funit.next()
except StopIteration:
break
itemsit = FuncItems(faddr)
while True:
try:
item = itemsit.next()
except StopIteration:
break
if GetColor(item, 1) == purple and prevFlag == False:
AddBpt(item)
EnableBpt(item, False)
prevFlag = True
#resetting the flag when we go out of "interesting" block
if GetColor(item, 1) != purple and prevFlag == True:
prevFlag = False
| 0 | 0 | 0 |
0c4e4f3219b1763eefe5c44b1aa31a75eea348c6 | 2,971 | py | Python | sliding_window/lesson_4.py | Adorism/grok-practice | 62576ef8cce1f9e4289366d9f733618f50c9b648 | [
"MIT"
] | null | null | null | sliding_window/lesson_4.py | Adorism/grok-practice | 62576ef8cce1f9e4289366d9f733618f50c9b648 | [
"MIT"
] | null | null | null | sliding_window/lesson_4.py | Adorism/grok-practice | 62576ef8cce1f9e4289366d9f733618f50c9b648 | [
"MIT"
] | null | null | null | '''
Problem Statement
Given a string with lowercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter, find the length of the longest substring having the same letters after replacement.
Example 1:
Input: String="aabccbb", k=2
Output: 5
Explanation: Replace the two 'c' with 'b' to have a longest repeating substring "bbbbb".
Example 2:
Input: String="abbcb", k=1
Output: 4
Explanation: Replace the 'c' with 'b' to have a longest repeating substring "bbbb".
Example 3:
Input: String="abccde", k=1
Output: 3
Explanation: Replace the 'b' or 'd' with 'c' to have the longest repeating substring "ccc".
'''
# mycode
# answer
main()
'''
Time Complexity
The time complexity of the above algorithm will be O(N) where ‘N’ is the number of letters in the input string.
Space Complexity
As we are expecting only the lower case letters in the input string, we can conclude that the space complexity will be O(26), to store each letter’s frequency in the HashMap, which is asymptotically equal to O(1).
'''
| 34.952941 | 213 | 0.684618 | '''
Problem Statement
Given a string with lowercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter, find the length of the longest substring having the same letters after replacement.
Example 1:
Input: String="aabccbb", k=2
Output: 5
Explanation: Replace the two 'c' with 'b' to have a longest repeating substring "bbbbb".
Example 2:
Input: String="abbcb", k=1
Output: 4
Explanation: Replace the 'c' with 'b' to have a longest repeating substring "bbbb".
Example 3:
Input: String="abccde", k=1
Output: 3
Explanation: Replace the 'b' or 'd' with 'c' to have the longest repeating substring "ccc".
'''
# mycode
def length_of_longest_substring(str, k):
# TODO: Write your code here
win_start, max_len, cnt = 0, 0, 0
dict_str = {}
for win_end in range(len(str)):
if str[win_end] not in dict_str:
dict_str[str[win_end]] = 1
else:
dict_str[str[win_end]] += 1
cnt = max(dict_str.values())
while win_end - win_start + 1 - cnt > k:
dict_str[str[win_start]] -= 1
win_start += 1
max_len = max(max_len, win_end - win_start + 1)
return max_len
# answer
def length_of_longest_substring(str, k):
window_start, max_length, max_repeat_letter_count = 0, 0, 0
frequency_map = {}
# Try to extend the range [window_start, window_end]
for window_end in range(len(str)):
right_char = str[window_end]
if right_char not in frequency_map:
frequency_map[right_char] = 0
frequency_map[right_char] += 1
max_repeat_letter_count = max(
max_repeat_letter_count, frequency_map[right_char])
# Current window size is from window_start to window_end, overall we have a letter which is
# repeating 'max_repeat_letter_count' times, this means we can have a window which has one letter
# repeating 'max_repeat_letter_count' times and the remaining letters we should replace.
# if the remaining letters are more than 'k', it is the time to shrink the window as we
# are not allowed to replace more than 'k' letters
if (window_end - window_start + 1 - max_repeat_letter_count) > k:
left_char = str[window_start]
frequency_map[left_char] -= 1
window_start += 1
max_length = max(max_length, window_end - window_start + 1)
return max_length
def main():
print(length_of_longest_substring("aabccbb", 2))
print(length_of_longest_substring("abbcb", 1))
print(length_of_longest_substring("abccde", 1))
main()
'''
Time Complexity
The time complexity of the above algorithm will be O(N) where ‘N’ is the number of letters in the input string.
Space Complexity
As we are expecting only the lower case letters in the input string, we can conclude that the space complexity will be O(26), to store each letter’s frequency in the HashMap, which is asymptotically equal to O(1).
'''
| 1,864 | 0 | 68 |
1452ea9c83f0c6e864e6bb0c90c52cc4eb4cfc1e | 3,292 | py | Python | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/ce_20000819.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/ce_20000819.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/ce_20000819.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z |
'''Contributed by Carey Evans'''
import sys
from Ft.Xml.Xslt import Processor
"""outenc.py
Test whether 4DOM and 4XSLT produce correct output given different
input strings, using different output encodings. The general testing
procedure goes:
Read document into DOM from string <A>.
Extract text into Unicode string <B>.
Write DOM to another string <X> using specified output encoding.
Read <X> into a DOM, and extract text into Unicode string <Y>.
Check whether <B> == <Y>.
An exception at any stage is also an error. Any Unicode character can
be encoded in any output encoding, e.g. LATIN CAPITAL LETTER C WITH
CARON as Č.
"""
# All the following strings are in UTF-8;
# I'm not trying to test the parser.
input_88591 = '0x0041 is A, 0x00C0 is \303\200.'
input_88592 = '0x0041 is A, 0x010C is \304\214.'
input_both = '0x0041 is A, 0x00C0 is \303\200, 0x010C is \304\214.'
inputs = [('ISO-8859-1', input_88591),
# ('ISO-8859-2', input_88592),
# ('Unicode', input_both)
]
#out_encodings = ['UTF-8', 'ISO-8859-1', 'ISO-8859-2']
out_encodings = ['UTF-8', 'ISO-8859-1']
xslt_input_fmt = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE text [ <!ELEMENT text (#PCDATA)> ]>
<text>%s</text>'''
xslt_identity = '''<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="no" encoding="%s"/>
<xsl:template match="/">
<text><xsl:value-of select="text"/></text>
</xsl:template>
</xsl:stylesheet>'''
#'
try:
from xml.dom.ext.reader import Sax2
import xml.unicode.iso8859
from xml.sax import saxexts
except ImportError:
Sax2 = None
pass
| 25.92126 | 93 | 0.655832 |
'''Contributed by Carey Evans'''
import sys
from Ft.Xml.Xslt import Processor
"""outenc.py
Test whether 4DOM and 4XSLT produce correct output given different
input strings, using different output encodings. The general testing
procedure goes:
Read document into DOM from string <A>.
Extract text into Unicode string <B>.
Write DOM to another string <X> using specified output encoding.
Read <X> into a DOM, and extract text into Unicode string <Y>.
Check whether <B> == <Y>.
An exception at any stage is also an error. Any Unicode character can
be encoded in any output encoding, e.g. LATIN CAPITAL LETTER C WITH
CARON as Č.
"""
# All the following strings are in UTF-8;
# I'm not trying to test the parser.
input_88591 = '0x0041 is A, 0x00C0 is \303\200.'
input_88592 = '0x0041 is A, 0x010C is \304\214.'
input_both = '0x0041 is A, 0x00C0 is \303\200, 0x010C is \304\214.'
inputs = [('ISO-8859-1', input_88591),
# ('ISO-8859-2', input_88592),
# ('Unicode', input_both)
]
#out_encodings = ['UTF-8', 'ISO-8859-1', 'ISO-8859-2']
out_encodings = ['UTF-8', 'ISO-8859-1']
xslt_input_fmt = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE text [ <!ELEMENT text (#PCDATA)> ]>
<text>%s</text>'''
xslt_identity = '''<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="no" encoding="%s"/>
<xsl:template match="/">
<text><xsl:value-of select="text"/></text>
</xsl:template>
</xsl:stylesheet>'''
#'
def get_text(doc):
doc.normalize()
elem = doc.documentElement
child = elem.firstChild
text = child.nodeValue
return text
def process(doc, out_enc):
proc = Processor.Processor()
stylesheet = xslt_identity % (out_enc,)
proc.appendStylesheetString(stylesheet)
return proc.runNode(doc)
def results(input, out_enc):
indoc = None
outdoc = None
indoc = Sax2.FromXml(input)
intext = get_text(indoc)
outstring = process(indoc, out_enc)
outdoc = Sax2.FromXml(outstring)
outtext = get_text(outdoc)
return intext, outtext
def test(tester, inp, out_enc):
tester.startTest(inp[0]+" to "+out_enc)
input = inp[1]
try:
intext, outtext = results(xslt_input_fmt % (input,), out_enc)
except Exception, e:
tester.testError("Exception %s"%e)
return
tester.compare(input, intext)
tester.compare(input, outtext)
tester.testDone()
try:
from xml.dom.ext.reader import Sax2
import xml.unicode.iso8859
from xml.sax import saxexts
except ImportError:
Sax2 = None
pass
def Test(tester):
tester.startTest('Checking Unicode support')
skipped = 0
if sys.version[0] == '2':
tester.message("Test skipped (version >= 2.0)")
skipped = 1
if Sax2 is None:
tester.message("Test skipped (Rquires PyXML)")
skipped = 1
tester.testDone()
if not skipped:
parser = saxexts.XMLParserFactory.make_parser()
if parser.__class__.__name__ != "SAX_expat":
tester.message("Using", parser.__class__, "parser, results are unpredictable.\n")
for out_enc in out_encodings:
for inp in inputs:
test(tester,inp, out_enc)
return
| 1,477 | 0 | 115 |
758d57783fdc7bdff4fbe8436f421b519abc52a6 | 2,584 | py | Python | h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_5008_5386_glm_ordinal_large.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 1 | 2022-03-15T06:08:14.000Z | 2022-03-15T06:08:14.000Z | h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_5008_5386_glm_ordinal_large.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_5008_5386_glm_ordinal_large.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
if __name__ == "__main__":
pyunit_utils.standalone_test(testOrdinalLogit)
else:
testOrdinalLogit()
| 45.333333 | 141 | 0.629644 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def testOrdinalLogit():
Dtrain = h2o.import_file(pyunit_utils.locate("bigdata/laptop/glm_ordinal_logit/ordinal_ordinal_20_training_set.csv"))
Dtrain["C21"] = Dtrain["C21"].asfactor()
Dtest = h2o.import_file(pyunit_utils.locate("bigdata/laptop/glm_ordinal_logit/ordinal_ordinal_20_test_set.csv"))
Dtest["C21"] = Dtest["C21"].asfactor()
print("Fit model on dataset")
regL = [1.0/Dtrain.nrow, 1.0/(10*Dtrain.nrow), 1.0/(100*Dtrain.nrow)]
lambdaL = regL
alphaL = [0.8]
bestAccLH = 0.0
bestAccSQERR = 0.0
for reg in regL:
for lAmbda in lambdaL:
for alpha in alphaL:
model = H2OGeneralizedLinearEstimator(family="ordinal", alpha=alpha, lambda_=lAmbda, obj_reg = reg,
max_iterations=1000, beta_epsilon=1e-8, objective_epsilon=1e-8, seed=12345)
model.train(x=list(range(0,20)), y="C21", training_frame=Dtrain)
predH2O = model.predict(Dtest)
acc = calAcc(predH2O["predict"].as_data_frame(use_pandas=False), Dtest["C21"].as_data_frame(use_pandas=False))
if (acc > bestAccLH):
bestAccLH = acc
model2 = H2OGeneralizedLinearEstimator(family="ordinal", alpha=alpha, lambda_=lAmbda,
obj_reg = reg, max_iterations=1000, beta_epsilon=1e-8,
solver="GRADIENT_DESCENT_SQERR", objective_epsilon=1e-8, seed=12345)
model2.train(x=list(range(0,20)), y="C21", training_frame=Dtrain)
predH2O2 = model2.predict(Dtest)
acc = calAcc(Dtest["C21"].as_data_frame(use_pandas=False), predH2O2['predict'].as_data_frame(use_pandas=False))
if (bestAccSQERR < acc):
bestAccSQERR = acc
print("Best accuracy for GRADIENT_DESCENT_LH is {0} and best accuracy for GRADIENT_DESCENT_SQERR is {1}".format(bestAccLH, bestAccSQERR))
assert bestAccSQERR >= bestAccLH, "Ordinal regression default solver performs better than new solver."
def calAcc(f1, f2):
acc = 0
for index in range(1,len(f1)):
if (f1[index][0]==f2[index][0]):
acc=acc+1.0
return (acc*1.0/(len(f1)-1.0))
if __name__ == "__main__":
pyunit_utils.standalone_test(testOrdinalLogit)
else:
testOrdinalLogit()
| 2,240 | 0 | 46 |
d90ef5da98b8340392ecfb3fada8d9714ad70733 | 537 | py | Python | urlabridge/app/models.py | kaitlinlogie/url-abridge | dc50f1862de3303edeb4e90d3b75f336e808117b | [
"MIT"
] | null | null | null | urlabridge/app/models.py | kaitlinlogie/url-abridge | dc50f1862de3303edeb4e90d3b75f336e808117b | [
"MIT"
] | null | null | null | urlabridge/app/models.py | kaitlinlogie/url-abridge | dc50f1862de3303edeb4e90d3b75f336e808117b | [
"MIT"
] | null | null | null | from django.db import models
| 25.571429 | 86 | 0.6946 | from django.db import models
class Domain(models.Model):
domain = models.CharField(max_length=200)
def __str__(self):
return self.domain
class Path(models.Model):
domain = models.ForeignKey(Domain, on_delete=models.CASCADE)
redirect_from = models.CharField(max_length=5000)
redirect_to = models.CharField(max_length=1000)
class Meta:
unique_together = ('domain', 'redirect_from')
def __str__(self):
return '{}/{} -> {}'.format(self.domain, self.redirect_from, self.redirect_to)
| 108 | 352 | 46 |
a23bb494afe62c84ae4f93b0840f0a82740884cd | 421 | py | Python | ectf'14/exploit/250/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | 2 | 2015-03-24T22:20:08.000Z | 2018-05-12T16:41:13.000Z | ectf'14/exploit/250/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | null | null | null | ectf'14/exploit/250/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | null | null | null | import socket
import struct
from binascii import hexlify
system_plt = 0x080483a0
sh = 0x80485c0 # /bin/bash -c 'cat flag.txt'
payload = "A"*140
payload += struct.pack("<I", system_plt)
payload += "AAAA"
payload += struct.pack("<I", sh)
#open('payload', 'w').write(payload)
s=socket.create_connection(('212.71.235.214', 5000))
print s.recv(1024)
s.send(payload+'\n')
print s.recv(1024)
#flag{assembly_is_awesome!!}
| 18.304348 | 52 | 0.700713 | import socket
import struct
from binascii import hexlify
system_plt = 0x080483a0
sh = 0x80485c0 # /bin/bash -c 'cat flag.txt'
payload = "A"*140
payload += struct.pack("<I", system_plt)
payload += "AAAA"
payload += struct.pack("<I", sh)
#open('payload', 'w').write(payload)
s=socket.create_connection(('212.71.235.214', 5000))
print s.recv(1024)
s.send(payload+'\n')
print s.recv(1024)
#flag{assembly_is_awesome!!}
| 0 | 0 | 0 |
1bb4c5f165814f6f8bae4f8a473bb65e176ed0bb | 1,428 | py | Python | flydata.py | KitesForFuture/visualizer | 2ed1e7741f0998cba7a782f936822b1d5b539d47 | [
"MIT"
] | null | null | null | flydata.py | KitesForFuture/visualizer | 2ed1e7741f0998cba7a782f936822b1d5b539d47 | [
"MIT"
] | null | null | null | flydata.py | KitesForFuture/visualizer | 2ed1e7741f0998cba7a782f936822b1d5b539d47 | [
"MIT"
] | null | null | null | import struct
| 25.5 | 160 | 0.607143 | import struct
class Flydata():
def __init__(self, bytes):
# Cycle-Time (1), Height (1), Gyro-Vector (3), Accel-Vector (3), Rotation-Matrix (9), G-Correction-Axis (3), G-Correction-Angle (1), Position-Matrix (9)
data = struct.unpack('fffffffffffffffffffffffffffffff', bytes)
self.cycle_seconds = data[0]
self.height = data[1]
self.height_derivative = data[2]
self.x_rotation = (data[22], data[25], data[28])
self.y_rotation = (data[23], data[26], data[29])
self.z_rotation = (data[24], data[27], data[30])
print("Accel: " + str(data[6:9]))
#print("Axis / Angle:" + str(data[17:21]))
@property
def cycle_seconds(self):
return self.__cycle_seconds
@cycle_seconds.setter
def cycle_seconds(self, cycle_seconds):
self.__cycle_seconds = cycle_seconds
@property
def x_rotation(self):
return self.__x
@x_rotation.setter
def x_rotation(self, x):
self.__x = x
@property
def y_rotation(self):
return self.__y
@y_rotation.setter
def y_rotation(self, y):
self.__y = y
@property
def z_rotation(self):
return self.__z
@z_rotation.setter
def z_rotation(self, z):
self.__z = z
@property
def height(self):
return self.__height
@height.setter
def height(self, height):
self.__height = height
| 915 | 476 | 23 |
9883d0fce6658908ad9144bec50e2e9e4127d938 | 727 | py | Python | kaijigame.py | theortsac/kaiji-e-card | 3eca9df6ff6cb1890619f7b5593090aa531a7718 | [
"MIT"
] | null | null | null | kaijigame.py | theortsac/kaiji-e-card | 3eca9df6ff6cb1890619f7b5593090aa531a7718 | [
"MIT"
] | null | null | null | kaijigame.py | theortsac/kaiji-e-card | 3eca9df6ff6cb1890619f7b5593090aa531a7718 | [
"MIT"
] | null | null | null | from random import randrange
kingCards = ['C', 'C', 'C', 'C', 'K']
slaveCards = ['C', 'C', 'C', 'C', 'S']
print("""- C = Citizen
- S = Slave
- K = King""")
for i in range(5):
print('Your cards:', slaveCards)
cardIPlay = input('Which card will you play? ')
slaveCards.remove(cardIPlay)
cardKPlay = kingCards[randrange(len(kingCards))]
kingCards.remove(cardKPlay)
print('The enemy played', cardKPlay + '!')
if cardIPlay == 'S' and cardKPlay == 'C':
print('Defeated!')
break
elif cardIPlay == 'C' and cardKPlay == 'K':
print('Defeated!')
break
elif cardIPlay == 'S' and cardKPlay == 'K':
print('Victory!')
break
else:
print('Draw!')
| 27.961538 | 52 | 0.562586 | from random import randrange
kingCards = ['C', 'C', 'C', 'C', 'K']
slaveCards = ['C', 'C', 'C', 'C', 'S']
print("""- C = Citizen
- S = Slave
- K = King""")
for i in range(5):
print('Your cards:', slaveCards)
cardIPlay = input('Which card will you play? ')
slaveCards.remove(cardIPlay)
cardKPlay = kingCards[randrange(len(kingCards))]
kingCards.remove(cardKPlay)
print('The enemy played', cardKPlay + '!')
if cardIPlay == 'S' and cardKPlay == 'C':
print('Defeated!')
break
elif cardIPlay == 'C' and cardKPlay == 'K':
print('Defeated!')
break
elif cardIPlay == 'S' and cardKPlay == 'K':
print('Victory!')
break
else:
print('Draw!')
| 0 | 0 | 0 |
558c916ef8163e6662f8789905d357318c3bed90 | 361 | py | Python | tests/base_testcase.py | projectshift/shift-user | ccab014378d60cd372419bcbf63ae6b4b3559d18 | [
"MIT"
] | null | null | null | tests/base_testcase.py | projectshift/shift-user | ccab014378d60cd372419bcbf63ae6b4b3559d18 | [
"MIT"
] | 16 | 2020-05-05T10:17:40.000Z | 2021-06-06T09:01:23.000Z | tests/base_testcase.py | projectshift/shift-user | ccab014378d60cd372419bcbf63ae6b4b3559d18 | [
"MIT"
] | null | null | null | from boiler.testing.testcase import ViewTestCase
from tests.test_app.app import app as test_app
class BaseTestCase(ViewTestCase):
"""
Base test case
Uses test case from shiftboiler to provide flask-integrated testing
facilities.
"""
| 22.5625 | 71 | 0.67036 | from boiler.testing.testcase import ViewTestCase
from tests.test_app.app import app as test_app
class BaseTestCase(ViewTestCase):
"""
Base test case
Uses test case from shiftboiler to provide flask-integrated testing
facilities.
"""
def setUp(self, app=None):
if not app:
app = test_app
super().setUp(app)
| 79 | 0 | 26 |
0bafb6537a70fec8df830c76f640e26f36eafc3c | 162 | py | Python | random/main.py | alexander-schilling/fintual_test | 3a8a3cd17dea4a7a1203eb1cd58a2af411700207 | [
"MIT"
] | null | null | null | random/main.py | alexander-schilling/fintual_test | 3a8a3cd17dea4a7a1203eb1cd58a2af411700207 | [
"MIT"
] | null | null | null | random/main.py | alexander-schilling/fintual_test | 3a8a3cd17dea4a7a1203eb1cd58a2af411700207 | [
"MIT"
] | null | null | null | from classes.portfolio import Portfolio
from classes.menu import Menu
main()
| 14.727273 | 39 | 0.697531 | from classes.portfolio import Portfolio
from classes.menu import Menu
def main():
portfolio = Portfolio()
menu = Menu(portfolio)
menu.run()
main()
| 61 | 0 | 23 |
e73dabd3fa271e007962498bfd2e13f26651cc6f | 1,184 | py | Python | scripts/dblp/wordcases.py | sandeepsoni/semantic-progressiveness | 824079b388d0eebc92b2197805b27ed320353f8f | [
"MIT"
] | 2 | 2021-04-11T16:28:44.000Z | 2021-07-31T03:22:07.000Z | scripts/dblp/wordcases.py | sandeepsoni/semantic-progressiveness | 824079b388d0eebc92b2197805b27ed320353f8f | [
"MIT"
] | null | null | null | scripts/dblp/wordcases.py | sandeepsoni/semantic-progressiveness | 824079b388d0eebc92b2197805b27ed320353f8f | [
"MIT"
] | 1 | 2021-09-01T22:45:25.000Z | 2021-09-01T22:45:25.000Z | import plac
import os
from collections import defaultdict
import logging
logging.basicConfig (format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
@plac.annotations(
dirname = ("path of the directory", "positional"),
srcfile = ("source filename", "positional"),
tgtfile = ("target filename", "positional")
)
if __name__ == "__main__":
plac.call (main)
| 30.358974 | 104 | 0.654561 | import plac
import os
from collections import defaultdict
import logging
logging.basicConfig (format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
@plac.annotations(
dirname = ("path of the directory", "positional"),
srcfile = ("source filename", "positional"),
tgtfile = ("target filename", "positional")
)
def main (dirname, srcfile, tgtfile):
V = defaultdict (int)
Vlower = defaultdict (int)
VUPPER = defaultdict (int)
VTitle = defaultdict (int)
with open (os.path.join (dirname, srcfile)) as fin:
for i, line in enumerate (fin):
tokens = line.strip().split(" ")
for token in tokens:
if token.isalpha():
lcaseToken = token.lower()
V[lcaseToken] += 1
Vlower[lcaseToken] += int (token.islower())
VUPPER[lcaseToken] += int (token.isupper())
VTitle[lcaseToken] += int (token.istitle())
if (i+1) % 10000 == 0:
logging.info ("{0} lines processed: {1}".format (srcfile, i+1))
with open (os.path.join (dirname, tgtfile), "w") as fout:
for word in sorted (V):
fout.write ("{0},{1},{2},{3},{4}\n".format (word, Vlower[word], VUPPER[word], VTitle[word], V[word]))
if __name__ == "__main__":
plac.call (main)
| 784 | 0 | 22 |
9984656faf22657ca1d0eb854e78379b3f16d39e | 514 | py | Python | example80.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | 21 | 2017-05-01T10:23:42.000Z | 2021-09-27T17:11:43.000Z | example80.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | null | null | null | example80.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | 6 | 2017-05-26T12:23:26.000Z | 2020-06-30T01:57:36.000Z | #!/usr/bin/python3
__author__ = "yang.dd"
"""
example 080
"""
if __name__ == '__main__':
'''
从第五只猴子拿1个的桃子开始算
如果有一只不满足条件,则从头开始计算,直到满足
'''
monkey = 5
peach5th = 1
peach = 1
while monkey > 1:
total = peach * 5 + 1
if total % 4 == 0:
monkey -= 1
peach = total / 4
else:
# 从第5只猴开始算
peach5th += 1
peach = peach5th
monkey = 5
print("沙滩上最少有:%d个桃子。" % (int(peach * 5 + 1)))
| 17.133333 | 49 | 0.451362 | #!/usr/bin/python3
__author__ = "yang.dd"
"""
example 080
"""
if __name__ == '__main__':
'''
从第五只猴子拿1个的桃子开始算
如果有一只不满足条件,则从头开始计算,直到满足
'''
monkey = 5
peach5th = 1
peach = 1
while monkey > 1:
total = peach * 5 + 1
if total % 4 == 0:
monkey -= 1
peach = total / 4
else:
# 从第5只猴开始算
peach5th += 1
peach = peach5th
monkey = 5
print("沙滩上最少有:%d个桃子。" % (int(peach * 5 + 1)))
| 0 | 0 | 0 |
d3cf679068c141105ec4bb19427693ae79759075 | 3,129 | py | Python | s0ngbrew/codec.py | RhythmLunatic/s0ngbrew | a23c96971a1a447bf90f15851e35d1a1ed54b7fb | [
"0BSD"
] | 1 | 2021-04-09T23:43:08.000Z | 2021-04-09T23:43:08.000Z | s0ngbrew/codec.py | RhythmLunatic/s0ngbrew | a23c96971a1a447bf90f15851e35d1a1ed54b7fb | [
"0BSD"
] | null | null | null | s0ngbrew/codec.py | RhythmLunatic/s0ngbrew | a23c96971a1a447bf90f15851e35d1a1ed54b7fb | [
"0BSD"
] | 1 | 2019-12-15T15:18:05.000Z | 2019-12-15T15:18:05.000Z | #!/usr/bin/env python3
import os
import zlib
from struct import pack, unpack
class Codec(object):
"""\
Main codec for DRP.
"""
def run(self):
"""\
Run the codec and write the output file.
"""
with open(self.ifname, 'rb') as f:
self.iofunc(f)
def encode(self, f):
"""\
Encode DRP: Boilderplate header and XML compression
"""
if os.path.basename(self.ofname) == "musicInfo.drp":
type = 0
elif os.path.basename(self.ofname) == "katsu_theme.drp":
type = 1
else:
print("Please name your output file correctly. It should be musicInfo.drp or katsu_theme.drp.")
sys.exit()
rxml_data = f.read()
bxml_data = zlib.compress(rxml_data)
bxmls = (len(bxml_data) + 12) if type == 0 else (len(bxml_data) + 8) # 12 for Taiko 3, 4 for Taiko 1.. And 8 for katsu_theme
checksum = len(rxml_data)
#Margin is different for katsu
unknown_margin = (0x20000001, 0x0310, 0x00010001, 0) if type == 0 else (0x20000001, 0x01B0, 0x00010001, 0)
quadup = lambda x: (x, x, x, x)
align = lambda x: x * b'\x00'
with open(self.ofname, 'wb') as of:
unknown, filecount = 2, 1
of.seek(0x14)
of.write(pack('>HH', unknown, filecount))
of.seek(0x60)
# Notice: the original musicInfo.drp stores the filename
# `musicinfo_db`, which might be game-specific
if type == 0:
of.write(bytes("musicinfo_db".encode('ascii')))
if type == 1:
of.write(bytes("katsu_theme_db".encode('ascii')))
of.seek(0xa0) #Jump to A0 (Where the unknown string is written and the rest of it)
of.write(pack('>9I',
*unknown_margin,
*quadup(bxmls), #???
checksum))
of.write(bxml_data)
remain = of.tell() % 0x10
if remain: of.write(align(0x10 - remain))
def decode(self, f):
"""\
Decode DRP: Decompress XML data
"""
f.seek(0x14)
unknown, filecount = unpack('>HH', f.read(4))
if filecount != 1:
#TODO...
print('Not a single XML compressed file, internal names will be used instead.')
f.seek(0x60)
for i in range(filecount):
fname = f.read(0x40).split(b'\x00')[0].decode("utf-8")
print(fname)
#No idea what this line is.
f.read(0x10)
# bxmls: binary XML size (zlib compressed), rxmls: Raw XML size
# the 4 bxmls are duplicate, and rxmls is for checksum
bxmls, bxmls2, bxmls3, bxmls4, rxmls = unpack('>5I', f.read(4 * 5))
bxml_data = f.read(bxmls - 4) # rxmls is an unsigned integer
if bxmls > 80:
bxml_data = zlib.decompress(bxml_data) # no Unix EOF (\n)
if len(bxml_data) != rxmls:
raise ChecksumError('Checksum failed, file might be broken')
if filecount == 1:
with open(self.ofname, 'wb') as of:
of.write(bxml_data)
else:
with open(fname+".xml", 'wb') as of:
of.write(bxml_data)
| 27.447368 | 126 | 0.652605 | #!/usr/bin/env python3
import os
import zlib
from struct import pack, unpack
class FileCountError(Exception):
pass
class ChecksumError(Exception):
pass
class Codec(object):
"""\
Main codec for DRP.
"""
def __init__(self, ifname='', ofname='', is_bin=True):
self.ifname = ifname
#Currently automatic ofname doesn't work due to cli.py
if ofname == "":
self.ofname = os.path.splittext(ifname)[0]+".xml"
else:
self.ofname = ofname
self.is_bin = is_bin
self.iofunc = (self.encode, self.decode)[self.is_bin]
def run(self):
"""\
Run the codec and write the output file.
"""
with open(self.ifname, 'rb') as f:
self.iofunc(f)
def encode(self, f):
"""\
Encode DRP: Boilderplate header and XML compression
"""
if os.path.basename(self.ofname) == "musicInfo.drp":
type = 0
elif os.path.basename(self.ofname) == "katsu_theme.drp":
type = 1
else:
print("Please name your output file correctly. It should be musicInfo.drp or katsu_theme.drp.")
sys.exit()
rxml_data = f.read()
bxml_data = zlib.compress(rxml_data)
bxmls = (len(bxml_data) + 12) if type == 0 else (len(bxml_data) + 8) # 12 for Taiko 3, 4 for Taiko 1.. And 8 for katsu_theme
checksum = len(rxml_data)
#Margin is different for katsu
unknown_margin = (0x20000001, 0x0310, 0x00010001, 0) if type == 0 else (0x20000001, 0x01B0, 0x00010001, 0)
quadup = lambda x: (x, x, x, x)
align = lambda x: x * b'\x00'
with open(self.ofname, 'wb') as of:
unknown, filecount = 2, 1
of.seek(0x14)
of.write(pack('>HH', unknown, filecount))
of.seek(0x60)
# Notice: the original musicInfo.drp stores the filename
# `musicinfo_db`, which might be game-specific
if type == 0:
of.write(bytes("musicinfo_db".encode('ascii')))
if type == 1:
of.write(bytes("katsu_theme_db".encode('ascii')))
of.seek(0xa0) #Jump to A0 (Where the unknown string is written and the rest of it)
of.write(pack('>9I',
*unknown_margin,
*quadup(bxmls), #???
checksum))
of.write(bxml_data)
remain = of.tell() % 0x10
if remain: of.write(align(0x10 - remain))
def decode(self, f):
"""\
Decode DRP: Decompress XML data
"""
f.seek(0x14)
unknown, filecount = unpack('>HH', f.read(4))
if filecount != 1:
#TODO...
print('Not a single XML compressed file, internal names will be used instead.')
f.seek(0x60)
for i in range(filecount):
fname = f.read(0x40).split(b'\x00')[0].decode("utf-8")
print(fname)
#No idea what this line is.
f.read(0x10)
# bxmls: binary XML size (zlib compressed), rxmls: Raw XML size
# the 4 bxmls are duplicate, and rxmls is for checksum
bxmls, bxmls2, bxmls3, bxmls4, rxmls = unpack('>5I', f.read(4 * 5))
bxml_data = f.read(bxmls - 4) # rxmls is an unsigned integer
if bxmls > 80:
bxml_data = zlib.decompress(bxml_data) # no Unix EOF (\n)
if len(bxml_data) != rxmls:
raise ChecksumError('Checksum failed, file might be broken')
if filecount == 1:
with open(self.ofname, 'wb') as of:
of.write(bxml_data)
else:
with open(fname+".xml", 'wb') as of:
of.write(bxml_data)
| 296 | 33 | 69 |
e5bb9891de1f56d0ec84f6d31b0fb41b00aa32ff | 142 | py | Python | 03 - Strings/Capitalize!.py | LynX-gh/HackerRank-python | 52705f423dd564463c67de1b8a2ded49bbef565e | [
"MIT"
] | null | null | null | 03 - Strings/Capitalize!.py | LynX-gh/HackerRank-python | 52705f423dd564463c67de1b8a2ded49bbef565e | [
"MIT"
] | null | null | null | 03 - Strings/Capitalize!.py | LynX-gh/HackerRank-python | 52705f423dd564463c67de1b8a2ded49bbef565e | [
"MIT"
] | null | null | null | # Complete the solve function below. | 28.4 | 50 | 0.626761 | # Complete the solve function below.
def solve(s):
names = s.split(' ')
name = ' '.join(t.capitalize() for t in names)
return name | 84 | 0 | 22 |
8dbb343f5b36e77896d6fa0de9e2829800a05cda | 3,216 | py | Python | src/osmo/make_tx.py | johnny-wang/staketaxcsv | 5e6ce5f17db780737192947008efb3d2f03b769e | [
"MIT"
] | null | null | null | src/osmo/make_tx.py | johnny-wang/staketaxcsv | 5e6ce5f17db780737192947008efb3d2f03b769e | [
"MIT"
] | null | null | null | src/osmo/make_tx.py | johnny-wang/staketaxcsv | 5e6ce5f17db780737192947008efb3d2f03b769e | [
"MIT"
] | null | null | null |
from common.make_tx import (
make_swap_tx, make_reward_tx, make_transfer_in_tx, make_transfer_out_tx,
make_unknown_tx, make_unknown_tx_with_transfer, _make_tx_exchange
)
from osmo import util_osmo
| 35.733333 | 114 | 0.731965 |
from common.make_tx import (
make_swap_tx, make_reward_tx, make_transfer_in_tx, make_transfer_out_tx,
make_unknown_tx, make_unknown_tx_with_transfer, _make_tx_exchange
)
from osmo import util_osmo
def _edit_row(row, txinfo, msginfo):
row.txid = txinfo.txid + "-" + str(msginfo.msg_index)
if msginfo.msg_index > 0:
row.fee = ""
row.fee_currency = ""
def make_osmo_tx(txinfo, msginfo, sent_amount, sent_currency, received_amount, received_currency,
txid=None, empty_fee=False):
tx_type = util_osmo._make_tx_type(msginfo)
row = _make_tx_exchange(
txinfo, sent_amount, sent_currency, received_amount, received_currency, tx_type,
txid=txid, empty_fee=empty_fee)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_simple_tx(txinfo, msginfo):
row = make_osmo_tx(txinfo, msginfo, "", "", "", "")
return row
def make_osmo_swap_tx(txinfo, msginfo, sent_amount, sent_currency, received_amount, received_currency):
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_reward_tx(txinfo, msginfo, reward_amount, reward_currency):
row = make_reward_tx(txinfo, reward_amount, reward_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_transfer_out_tx(txinfo, msginfo, sent_amount, sent_currency, dest_address=None):
row = make_transfer_out_tx(txinfo, sent_amount, sent_currency, dest_address)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_transfer_in_tx(txinfo, msginfo, received_amount, received_currency):
row = make_transfer_in_tx(txinfo, received_amount, received_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_unknown_tx(txinfo, msginfo):
row = make_unknown_tx(txinfo)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_unknown_tx_with_transfer(txinfo, msginfo, sent_amount, sent_currency, received_amount,
received_currency, empty_fee=False, z_index=0):
row = make_unknown_tx_with_transfer(
txinfo, sent_amount, sent_currency, received_amount, received_currency, empty_fee, z_index)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_lp_deposit_tx(txinfo, msginfo, sent_amount, sent_currency, lp_amount, lp_currency, empty_fee=False):
row = make_osmo_tx(txinfo, msginfo, sent_amount, sent_currency, lp_amount, lp_currency,
txid=None, empty_fee=empty_fee)
return row
def make_osmo_lp_withdraw_tx(txinfo, msginfo, lp_amount, lp_currency, received_amount, received_currency,
empty_fee=False):
row = make_osmo_tx(txinfo, msginfo, lp_amount, lp_currency, received_amount, received_currency,
txid=None, empty_fee=empty_fee)
return row
def make_osmo_lp_stake_tx(txinfo, msginfo, lp_amount, lp_currency):
row = make_osmo_tx(txinfo, msginfo, lp_amount, lp_currency, "", "")
return row
def make_osmo_lp_unstake_tx(txinfo, msginfo, lp_amount, lp_currency):
row = make_osmo_tx(txinfo, msginfo, "", "", lp_amount, lp_currency)
return row
| 2,698 | 0 | 299 |
13245723e2dda68ad0f4118a95b1a23aac9dde6a | 25,424 | py | Python | tests/tools/cpp/find_warnings.py | susundberg/arduino-aquarium-feeder | 3f243b35e8e27eb4fb551d19fae0b45175a4e23c | [
"MIT"
] | 2 | 2017-11-04T00:09:39.000Z | 2020-04-12T08:28:25.000Z | tests/tools/cpp/find_warnings.py | susundberg/esp8266-waterpump | f78a364c4dfec4ebd8fd2a744a190b5e57941479 | [
"MIT"
] | 1 | 2020-05-27T10:26:57.000Z | 2020-05-27T10:26:57.000Z | tests/tools/cpp/find_warnings.py | susundberg/esp8266-waterpump | f78a364c4dfec4ebd8fd2a744a190b5e57941479 | [
"MIT"
] | null | null | null | # Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find warnings for C++ code.
TODO(nnorwitz): provide a mechanism to configure which warnings should
be generated and which should be suppressed. Currently, all possible
warnings will always be displayed. There is no way to suppress any.
There also needs to be a way to use annotations in the source code to
suppress warnings.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from . import ast
from . import headers
from . import keywords
from . import metrics
from . import symbols
from . import tokenize
from . import utils
try:
basestring
except NameError:
basestring = str
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
HEADER_EXTENSIONS = frozenset(['.h', '.hh', '.hpp', '.h++', '.hxx', '.cuh'])
CPP_EXTENSIONS = frozenset(['.cc', '.cpp', '.c++', '.cxx', '.cu'])
# These enumerations are used to determine how a symbol/#include file is used.
UNUSED = 0
USES_REFERENCE = 1
USES_DECLARATION = 2
DECLARATION_TYPES = (ast.Class, ast.Struct, ast.Enum, ast.Union)
class Module(object):
"""Data container representing a single source file."""
| 42.373333 | 79 | 0.55542 | # Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find warnings for C++ code.
TODO(nnorwitz): provide a mechanism to configure which warnings should
be generated and which should be suppressed. Currently, all possible
warnings will always be displayed. There is no way to suppress any.
There also needs to be a way to use annotations in the source code to
suppress warnings.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from . import ast
from . import headers
from . import keywords
from . import metrics
from . import symbols
from . import tokenize
from . import utils
try:
basestring
except NameError:
basestring = str
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
HEADER_EXTENSIONS = frozenset(['.h', '.hh', '.hpp', '.h++', '.hxx', '.cuh'])
CPP_EXTENSIONS = frozenset(['.cc', '.cpp', '.c++', '.cxx', '.cu'])
# These enumerations are used to determine how a symbol/#include file is used.
UNUSED = 0
USES_REFERENCE = 1
USES_DECLARATION = 2
DECLARATION_TYPES = (ast.Class, ast.Struct, ast.Enum, ast.Union)
class Module(object):
"""Data container representing a single source file."""
def __init__(self, filename, ast_list):
self.filename = filename
self.ast_list = ast_list
self.public_symbols = self._get_exported_symbols()
def _get_exported_symbols(self):
if not self.ast_list:
return {}
return dict([(n.name, n) for n in self.ast_list if n.is_exportable()])
def is_header_file(filename):
_, ext = os.path.splitext(filename)
return ext.lower() in HEADER_EXTENSIONS
def is_cpp_file(filename):
_, ext = os.path.splitext(filename)
return ext.lower() in CPP_EXTENSIONS
class WarningHunter(object):
# Cache filename: ast_list
_module_cache = {}
def __init__(self, filename, source, ast_list, include_paths, quiet=False):
self.filename = filename
self.source = source
self.ast_list = ast_list
self.include_paths = include_paths[:]
self.quiet = quiet
self.symbol_table = symbols.SymbolTable()
self.metrics = metrics.Metrics(source)
self.warnings = set()
if filename not in self._module_cache:
self._module_cache[filename] = Module(filename, ast_list)
def _add_warning(self, msg, node, filename=None):
if filename is not None:
contents = utils.read_file(filename)
src_metrics = metrics.Metrics(contents)
else:
filename = self.filename
src_metrics = self.metrics
line_number = get_line_number(src_metrics, node)
self.warnings.add((filename, line_number, msg))
def show_warnings(self):
for filename, line_num, msg in sorted(self.warnings):
if line_num == 0:
print('{}: {}'.format(filename, msg))
else:
print('{}:{}: {}'.format(filename, line_num, msg))
def find_warnings(self):
if is_header_file(self.filename):
self._find_header_warnings()
elif is_cpp_file(self.filename):
self._find_source_warnings()
def _update_symbol_table(self, module):
for name, node in module.public_symbols.items():
self.symbol_table.add_symbol(name, node.namespace, node, module)
def _get_module(self, node):
include_paths = [os.path.dirname(self.filename)] + self.include_paths
source, filename = headers.read_source(node.filename, include_paths)
if source is None:
module = Module(filename, None)
msg = "unable to find '{}'".format(filename)
self._add_warning(msg, node)
elif filename in self._module_cache:
# The cache survives across all instances, but the symbol table
# is per instance, so we need to make sure the symbol table
# is updated even if the module was in the cache.
module = self._module_cache[filename]
self._update_symbol_table(module)
else:
ast_list = None
try:
builder = ast.builder_from_source(source, filename,
quiet=self.quiet)
ast_list = [_f for _f in builder.generate() if _f]
except tokenize.TokenError:
pass
except ast.ParseError as error:
if not self.quiet:
print(
"Exception while processing '{}': {}".format(
filename,
error),
file=sys.stderr)
module = Module(filename, ast_list)
self._module_cache[filename] = module
self._update_symbol_table(module)
return module
def _read_and_parse_includes(self):
# Map header-filename: (#include AST node, module).
included_files = {}
# Map declaration-name: AST node.
forward_declarations = {}
files_seen = {}
for node in self.ast_list:
if isinstance(node, ast.Include):
if node.system:
filename = node.filename
else:
module = self._get_module(node)
filename = module.filename
_, ext = os.path.splitext(filename)
if ext.lower() != '.hxx':
included_files[filename] = node, module
if is_cpp_file(filename):
self._add_warning(
"should not #include C++ source file '{}'".format(
node.filename),
node)
if filename == self.filename:
self._add_warning(
"'{}' #includes itself".format(node.filename),
node)
if filename in files_seen:
include_node = files_seen[filename]
line_num = get_line_number(self.metrics, include_node)
self._add_warning(
"'{}' already #included on line {}".format(
node.filename,
line_num),
node)
else:
files_seen[filename] = node
if isinstance(node, DECLARATION_TYPES) and node.is_declaration():
forward_declarations[node.full_name()] = node
return included_files, forward_declarations
def _verify_include_files_used(self, file_uses, included_files):
"""Find all #include files that are unnecessary."""
for include_file, use in file_uses.items():
if not use & USES_DECLARATION:
node, module = included_files[include_file]
if module.ast_list is not None:
msg = "'{}' does not need to be #included".format(
node.filename)
if use & USES_REFERENCE:
msg += '; use a forward declaration instead'
self._add_warning(msg, node)
def _verify_forward_declarations_used(self, forward_declarations,
decl_uses, file_uses):
"""Find all the forward declarations that are not used."""
for cls in forward_declarations:
if cls in file_uses:
if not decl_uses[cls] & USES_DECLARATION:
node = forward_declarations[cls]
msg = ("'{}' forward declared, "
'but needs to be #included'.format(cls))
self._add_warning(msg, node)
else:
if decl_uses[cls] == UNUSED:
node = forward_declarations[cls]
msg = "'{}' not used".format(cls)
self._add_warning(msg, node)
def _determine_uses(self, included_files, forward_declarations):
"""Set up the use type of each symbol."""
file_uses = dict.fromkeys(included_files, UNUSED)
decl_uses = dict.fromkeys(forward_declarations, UNUSED)
symbol_table = self.symbol_table
for name, node in forward_declarations.items():
try:
symbol_table.lookup_symbol(node.name, node.namespace)
decl_uses[name] |= USES_REFERENCE
except symbols.Error:
module = Module(name, None)
self.symbol_table.add_symbol(node.name, node.namespace, node,
module)
def _add_declaration(name, namespace):
if not name:
# Ignore anonymous struct. It is not standard, but we might as
# well avoid crashing if it is easy.
return
names = [n for n in namespace if n is not None]
if names:
name = '::'.join(names) + '::' + name
if name in decl_uses:
decl_uses[name] |= USES_DECLARATION
def _add_reference(name, namespace):
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
if file_use_node[1].ast_list is None:
decl_uses[name] |= USES_REFERENCE
elif name in file_uses:
# enum and typedef can't be forward declared
if (
isinstance(file_use_node[0], ast.Enum) or
isinstance(file_use_node[0], ast.Typedef)
):
file_uses[name] |= USES_DECLARATION
else:
file_uses[name] |= USES_REFERENCE
def _add_use(name, namespace):
if isinstance(name, list):
# name contains a list of tokens.
name = '::'.join([n.name for n in name])
elif not isinstance(name, basestring):
# Happens when variables are defined with inlined types, e.g.:
# enum {...} variable;
return
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
file_uses[name] = file_uses.get(name, 0) | USES_DECLARATION
def _add_variable(node, namespace, reference=False):
if node.reference or node.pointer or reference:
_add_reference(node.name, namespace)
else:
_add_use(node.name, namespace)
# This needs to recurse when the node is a templated type.
_add_template_use(node.name,
node.templated_types,
namespace,
reference)
def _process_function(function, namespace):
reference = function.body is None
if function.return_type:
return_type = function.return_type
_add_variable(return_type, namespace, reference)
for s in function.specializations:
_add_variable(s, namespace, not function.body)
templated_types = function.templated_types or ()
for p in function.parameters:
node = p.type
if node.name not in templated_types:
if function.body and p.name:
# Assume that if the function has a body and a name
# the parameter type is really used.
# NOTE(nnorwitz): this is over-aggressive. It would be
# better to iterate through the body and determine
# actual uses based on local vars and data members
# used.
_add_use(node.name, namespace)
elif (
p.default and
p.default[0].name != '0' and
p.default[0].name != 'NULL' and
p.default[0].name != 'nullptr'
):
_add_use(node.name, namespace)
elif node.reference or node.pointer or reference:
_add_reference(node.name, namespace)
else:
_add_use(node.name, namespace)
_add_template_use(node.name,
node.templated_types,
namespace,
reference)
def _process_function_body(function, namespace):
previous = None
save = namespace[:]
for t in function.body:
if t.token_type == tokenize.NAME:
previous = t
if not keywords.is_keyword(t.name):
# TODO(nnorwitz): handle static function calls.
# TODO(nnorwitz): handle using statements in file.
# TODO(nnorwitz): handle using statements in function.
# TODO(nnorwitz): handle namespace assignment in file.
_add_use(t.name, namespace)
elif t.name == '::' and previous is not None:
namespace.append(previous.name)
elif t.name in (':', ';'):
namespace = save[:]
def _add_template_use(name, types, namespace, reference=False):
for cls in types or ():
if cls.pointer or cls.reference or reference:
_add_reference(cls.name, namespace)
elif name.endswith('_ptr'):
# Special case templated classes that end w/_ptr.
# These are things like auto_ptr which do
# not require the class definition, only decl.
_add_reference(cls.name, namespace)
else:
_add_use(cls.name, namespace)
_add_template_use(cls.name, cls.templated_types,
namespace, reference)
def _process_types(nodes, namespace):
for node in nodes:
if isinstance(node, ast.Type):
_add_variable(node, namespace)
# Iterate through the source AST/tokens, marking each symbols use.
ast_seq = [self.ast_list]
namespace_stack = []
while ast_seq:
for node in ast_seq.pop():
if isinstance(node, ast.VariableDeclaration):
namespace = namespace_stack + node.namespace
_add_variable(node.type, namespace)
elif isinstance(node, ast.Function):
namespace = namespace_stack + node.namespace
_process_function(node, namespace)
if node.body:
_process_function_body(node, namespace)
elif isinstance(node, ast.Typedef):
namespace = namespace_stack + node.namespace
_process_types(node.alias, namespace)
elif isinstance(node, ast.Friend):
expr = node.expr
namespace = namespace_stack + node.namespace
if isinstance(expr, ast.Type):
_add_reference(expr.name, namespace)
elif isinstance(expr, ast.Function):
_process_function(expr, namespace)
elif isinstance(node, ast.Union) and node.body is not None:
ast_seq.append(node.body)
elif isinstance(node, ast.Class) and node.body is not None:
_add_declaration(node.name, node.namespace)
namespace = namespace_stack + node.namespace
_add_template_use('', node.bases, namespace)
ast_seq.append(node.body)
elif isinstance(node, ast.Using):
if node.names[0].name == 'namespace':
namespace_stack.append(node.names[1].name)
return file_uses, decl_uses
def _find_unused_warnings(self, included_files, forward_declarations,
primary_header=None):
file_uses, decl_uses = self._determine_uses(included_files,
forward_declarations)
if primary_header and primary_header.filename in file_uses:
file_uses[primary_header.filename] |= USES_DECLARATION
self._verify_include_files_used(file_uses, included_files)
self._verify_forward_declarations_used(forward_declarations, decl_uses,
file_uses)
for node in forward_declarations.values():
try:
file_use_node = self.symbol_table.lookup_symbol(node.name,
node.namespace)
except symbols.Error:
continue
name = file_use_node[1].filename
if (
file_use_node[1].ast_list is not None and
name in file_uses and
file_uses[name] & USES_DECLARATION
):
msg = ("'{}' forward declared, "
"but already #included in '{}'".format(node.name, name))
self._add_warning(msg, node)
def _find_incorrect_case(self, included_files):
for (filename, node_and_module) in included_files.items():
base_name = os.path.basename(filename)
try:
candidates = os.listdir(os.path.dirname(filename))
except OSError:
continue
correct_filename = get_correct_include_filename(base_name,
candidates)
if correct_filename:
self._add_warning(
"'{}' should be '{}'".format(base_name, correct_filename),
node_and_module[0])
def _find_header_warnings(self):
included_files, forward_declarations = self._read_and_parse_includes()
self._find_unused_warnings(included_files, forward_declarations)
self._find_incorrect_case(included_files)
def _find_public_function_warnings(self, node, name, primary_header,
all_headers):
# Not found in the primary header, search all other headers.
for _, header in all_headers.values():
if name in header.public_symbols:
# If the primary.filename == header.filename, it probably
# indicates an error elsewhere. It sucks to mask it,
# but false positives are worse.
if primary_header:
msg = ("expected to find '{}' in '{}', "
"but found in '{}'".format(name,
primary_header.filename,
header.filename))
self._add_warning(msg, node)
break
else:
where = 'in any directly #included header'
if primary_header:
where = (
"in expected header '{}' "
'or any other directly #included header'.format(
primary_header.filename))
if name != 'main' and name != name.upper():
self._add_warning("'{}' not found {}".format(name, where),
node)
def _check_public_functions(self, primary_header, all_headers):
"""Verify all the public functions are also declared in a header
file."""
public_symbols = {}
declared_only_symbols = {}
if primary_header:
for name, symbol in primary_header.public_symbols.items():
if isinstance(symbol, ast.Function):
public_symbols[name] = symbol
declared_only_symbols = dict.fromkeys(public_symbols, True)
for node in self.ast_list:
# Make sure we have a function that should be exported.
if not isinstance(node, ast.Function):
continue
if isinstance(node, ast.Method):
# Ensure that for Foo::Bar, Foo is *not* a namespace.
# If Foo is a namespace, we have a function and not a method.
names = [n.name for n in node.in_class]
if names != self.symbol_table.get_namespace(names):
continue
if not (node.is_definition() and node.is_exportable()):
continue
# This function should be declared in a header file.
name = node.name
if name in public_symbols:
declared_only_symbols[name] = False
else:
self._find_public_function_warnings(node,
name,
primary_header,
all_headers)
for name, declared_only in declared_only_symbols.items():
if declared_only:
node = public_symbols[name]
if node.templated_types is None:
msg = "'{}' declared but not defined".format(name)
self._add_warning(msg, node, primary_header.filename)
def _get_primary_header(self, included_files):
basename = os.path.basename(os.path.splitext(self.filename)[0])
include_paths = [os.path.dirname(self.filename)] + self.include_paths
source, filename = headers.read_source(basename + '.h', include_paths)
primary_header = included_files.get(filename)
if primary_header:
return primary_header[1]
if source is not None:
msg = "should #include header file '{}'".format(filename)
self.warnings.add((self.filename, 0, msg))
return None
def _find_source_warnings(self):
included_files, forward_declarations = self._read_and_parse_includes()
self._find_incorrect_case(included_files)
for node in forward_declarations.values():
# TODO(nnorwitz): This really isn't a problem, but might
# be something to warn against. I expect this will either
# be configurable or removed in the future. But it's easy
# to check for now.
msg = (
"'{}' forward declaration not expected in source file".format(
node.name))
self._add_warning(msg, node)
# A primary header is optional. However, when looking up
# defined methods in the source, always look in the
# primary_header first. Expect that is the most likely location.
# Use of primary_header is primarily an optimization.
primary_header = self._get_primary_header(included_files)
self._check_public_functions(primary_header, included_files)
if primary_header and primary_header.ast_list is not None:
includes = [
node.filename
for node in primary_header.ast_list
if isinstance(node, ast.Include)
]
for (node, _) in included_files.values():
if node.filename in includes:
msg = "'{}' already #included in '{}'".format(
node.filename, primary_header.filename)
self._add_warning(msg, node)
# TODO(nnorwitz): other warnings to add:
# * unused forward decls for variables (globals)/classes
# * Functions that are too large/complex
# * Variables declared far from first use
# * primitive member variables not initialized in ctor
def get_line_number(metrics_instance, node):
return metrics_instance.get_line_number(node.start)
def get_correct_include_filename(filename, candidate_filenames):
if filename not in candidate_filenames:
for candidate in candidate_filenames:
if filename.lower() == candidate.lower():
return candidate
return None
def run(filename, source, entire_ast, include_paths, quiet):
hunter = WarningHunter(filename, source, entire_ast,
include_paths=include_paths,
quiet=quiet)
hunter.find_warnings()
hunter.show_warnings()
return len(hunter.warnings)
| 16,951 | 6,493 | 192 |
2266f8b954a983ed3c58cd87ec4a30bca2c1d340 | 1,750 | py | Python | sims/s296/mkreconflux.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims/s296/mkreconflux.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims/s296/mkreconflux.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | import numpy
from pylab import *
import tables
rc('text', usetex=True)
Lx = 100.0
Ly = 50.0
B0 = 1/15.0
n0 = 1.0
mu0 = 1.0
elcCharge = -1.0
ionCharge = 1.0
ionMass = 1.0
elcMass = ionMass/25
elcMass = ionMass/25
ionCycl = ionCharge*B0/ionMass
start = 0
end = 100
nFrame = end-start+1
tm = zeros((nFrame,), float)
flx = zeros((nFrame,), float)
count = 0
for i in range(start, end+1):
print ("Working on %d ..." % i)
fh = tables.openFile("s296-harris-tenmom_q_%d.h5" % i)
q = fh.root.StructGridField
nx, ny = q.shape[0], q.shape[1]
YI = ny/4
X = linspace(0, Lx, nx)
Y = linspace(0, Ly, ny)
dx = X[1]-X[0]
dy = Y[1]-Y[0]
tm[count] = fh.root.timeData._v_attrs['vsTime']
flx[count] = dx*sum(abs(q[0:nx,YI,24]))
count = count+1
tmDiff, flxDiff = calcDeriv(tm, flx)
figure(1)
plot(ionCycl*tm, flx/flx[0]*0.2, '-k', label='$\psi$')
legend(loc='best')
title('$\psi$')
xlabel('Time')
ylabel('$\psi$')
fp = open("s296-byFlux.txt", "w")
for i in range(flx.shape[0]):
fp.writelines("%g %g\n" % (ionCycl*tm[i], flx[i]))
fp.close()
#figure(2)
#plot(ionCycl*tmDiff, flxDiff, '-ko')
#legend(loc='best')
#title('$d\psi/dt$')
#xlabel('Time')
#ylabel('$d\psi/dt$')
show()
| 20.348837 | 58 | 0.562857 | import numpy
from pylab import *
import tables
rc('text', usetex=True)
Lx = 100.0
Ly = 50.0
B0 = 1/15.0
n0 = 1.0
mu0 = 1.0
elcCharge = -1.0
ionCharge = 1.0
ionMass = 1.0
elcMass = ionMass/25
elcMass = ionMass/25
ionCycl = ionCharge*B0/ionMass
start = 0
end = 100
nFrame = end-start+1
tm = zeros((nFrame,), float)
flx = zeros((nFrame,), float)
count = 0
for i in range(start, end+1):
print ("Working on %d ..." % i)
fh = tables.openFile("s296-harris-tenmom_q_%d.h5" % i)
q = fh.root.StructGridField
nx, ny = q.shape[0], q.shape[1]
YI = ny/4
X = linspace(0, Lx, nx)
Y = linspace(0, Ly, ny)
dx = X[1]-X[0]
dy = Y[1]-Y[0]
tm[count] = fh.root.timeData._v_attrs['vsTime']
flx[count] = dx*sum(abs(q[0:nx,YI,24]))
count = count+1
def calcDeriv(T, func):
nt = T.shape[0]-1
tm = numpy.zeros((nt,), numpy.float)
vx = numpy.zeros((nt,), numpy.float)
for i in range(nt):
tm[i] = 0.5*(T[i+1]+T[i])
vx[i] = (func[i+1]-func[i])/(T[i+1]-T[i])
return tm, vx
tmDiff, flxDiff = calcDeriv(tm, flx)
def findXloc(X, fx, val):
cross = []
for i in range(X.shape[0]-1):
if val>fx[i] and val<fx[i+1]:
cross.append(0.5*(X[i]+X[i+1]))
elif val<fx[i] and val>fx[i+1]:
cross.append(0.5*(X[i]+X[i+1]))
return cross
figure(1)
plot(ionCycl*tm, flx/flx[0]*0.2, '-k', label='$\psi$')
legend(loc='best')
title('$\psi$')
xlabel('Time')
ylabel('$\psi$')
fp = open("s296-byFlux.txt", "w")
for i in range(flx.shape[0]):
fp.writelines("%g %g\n" % (ionCycl*tm[i], flx[i]))
fp.close()
#figure(2)
#plot(ionCycl*tmDiff, flxDiff, '-ko')
#legend(loc='best')
#title('$d\psi/dt$')
#xlabel('Time')
#ylabel('$d\psi/dt$')
show()
| 482 | 0 | 46 |
e8d016a809ef62359bb058fb37178516edf1ec1c | 4,798 | py | Python | main.py | NairVish/rat-gene-annotation | e9b9f0668b38730cb47c302a30d257f3854e7f6a | [
"MIT"
] | null | null | null | main.py | NairVish/rat-gene-annotation | e9b9f0668b38730cb47c302a30d257f3854e7f6a | [
"MIT"
] | null | null | null | main.py | NairVish/rat-gene-annotation | e9b9f0668b38730cb47c302a30d257f3854e7f6a | [
"MIT"
] | null | null | null | import csv
import time
import requests
import argparse
from sys import exit
from typing import List, Optional, Tuple, Any
from bs4 import BeautifulSoup
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action='store', type=str, required=True)
parser.add_argument('-o', '--output', action='store', type=str, required=True)
parser.add_argument('-k', '--api_key', action='store', type=str, required=True)
parser.add_argument('-c', '--max_count', action='store', type=int, default=-1)
parser.add_argument('-s', '--start', action='store', type=int, default=-1)
args = parser.parse_args()
# read input file and get header
input_csv_file = open(args.input, 'r')
input_csv_data_reader = csv.reader(input_csv_file, delimiter=",")
csv_header = next(input_csv_data_reader) + ["Gene name", "Gene description", "Strand", "Gene type"]
# get output file ready
output_csv_file = open(args.output, "w", newline='')
output_csv_data_writer = csv.writer(output_csv_file, delimiter=',')
output_csv_data_writer.writerow(csv_header)
rows = []
curr_count = 0
for row in input_csv_data_reader:
curr_count += 1
if args.start != -1 and curr_count < args.start:
print(f"Skipping row #{curr_count}.")
continue
(ret_status, gene_name, ret_row) = get_data_using_row(row)
if ret_status == False:
print(f"Processing FAILED for #{curr_count}.")
elif ret_status is None:
print("KeyboardInterrupt called.")
exit(0)
else: # ret_status == True
print(f"Processing (#{curr_count}) -- {row[18]} / {gene_name}")
output_csv_data_writer.writerow(ret_row)
# output_csv_file.flush()
if args.max_count != -1 and curr_count >= args.max_count:
break
# close the input and output files
input_csv_file.close()
output_csv_file.close()
| 35.540741 | 160 | 0.647561 | import csv
import time
import requests
import argparse
from sys import exit
from typing import List, Optional, Tuple, Any
from bs4 import BeautifulSoup
def get_ensembl_data(transcript_id: str) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
gene_name, gene_desc, gene_type, strand = None, None, None, None
transcript_id = transcript_id.split(".")[0]
FETCH_URL = f"https://rest.ensembl.org/lookup/id/{transcript_id}"
r = requests.get(FETCH_URL, headers={ "Content-Type" : "application/json"})
try:
parent_id = r.json()["Parent"]
except KeyError:
return gene_name, gene_desc, gene_type, strand
FETCH_URL = f"https://rest.ensembl.org/lookup/id/{parent_id}"
r = requests.get(FETCH_URL, headers={ "Content-Type" : "application/json"})
j = r.json()
try:
gene_name = j["display_name"]
gene_desc = j["description"]
gene_type = j["biotype"]
strand = j["strand"]
except KeyError as e:
pass
return gene_name, gene_desc, gene_type, strand
def get_genbank_data(transcript_id: str) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
gene_name, gene_desc, gene_type, strand = None, None, None, None
ID_FETCH_URL = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?dbfrom=nuccore&db=gene&term={transcript_id}&retmode=json&api_key={args.api_key}"
id_json = requests.get(ID_FETCH_URL).json()
try:
gid = id_json["esearchresult"]["idlist"][0]
except (KeyError, IndexError):
return gene_name, gene_desc, gene_type, strand
time.sleep(0.04) # rate limit
GENE_FETCH_URL = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=gene&id={gid}&retmode=xml&api_key={args.api_key}"
gene_fetch_results = requests.get(GENE_FETCH_URL).text
soup = BeautifulSoup(gene_fetch_results, features='xml')
try:
gene_name = soup.findAll("Gene-ref_locus")[0].get_text()
gene_desc = soup.findAll("Gene-ref_desc")[0].get_text()
gene_type = soup.findAll("Entrezgene_type")[0]["value"]
strand = soup.findAll("Na-strand")[0]["value"]
except (KeyError, IndexError) as e:
pass
return gene_name, gene_desc, gene_type, strand
def get_data_using_row(row: List[Any]) -> Tuple[Optional[bool], str, List[Any]]:
try:
if " of " in row[12]:
row[11] = row[11] + row[12]
del row[12]
transcript_id = row[18]
func = get_ensembl_data if transcript_id.startswith("ENST") else get_genbank_data
gene_name, gene_desc, gene_type, strand = func(transcript_id)
time.sleep(0.04) # rate limit
row += [gene_name, gene_desc, strand, gene_type]
return (True, gene_name, row)
except KeyboardInterrupt:
return (None, "", row)
except Exception as e:
# row.append(f"No result.")
return (False, "", row)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action='store', type=str, required=True)
parser.add_argument('-o', '--output', action='store', type=str, required=True)
parser.add_argument('-k', '--api_key', action='store', type=str, required=True)
parser.add_argument('-c', '--max_count', action='store', type=int, default=-1)
parser.add_argument('-s', '--start', action='store', type=int, default=-1)
args = parser.parse_args()
# read input file and get header
input_csv_file = open(args.input, 'r')
input_csv_data_reader = csv.reader(input_csv_file, delimiter=",")
csv_header = next(input_csv_data_reader) + ["Gene name", "Gene description", "Strand", "Gene type"]
# get output file ready
output_csv_file = open(args.output, "w", newline='')
output_csv_data_writer = csv.writer(output_csv_file, delimiter=',')
output_csv_data_writer.writerow(csv_header)
rows = []
curr_count = 0
for row in input_csv_data_reader:
curr_count += 1
if args.start != -1 and curr_count < args.start:
print(f"Skipping row #{curr_count}.")
continue
(ret_status, gene_name, ret_row) = get_data_using_row(row)
if ret_status == False:
print(f"Processing FAILED for #{curr_count}.")
elif ret_status is None:
print("KeyboardInterrupt called.")
exit(0)
else: # ret_status == True
print(f"Processing (#{curr_count}) -- {row[18]} / {gene_name}")
output_csv_data_writer.writerow(ret_row)
# output_csv_file.flush()
if args.max_count != -1 and curr_count >= args.max_count:
break
# close the input and output files
input_csv_file.close()
output_csv_file.close()
| 2,709 | 0 | 69 |
a053594c48b4b655f20976e125386814a1da178f | 1,944 | py | Python | tests/test_cell.py | billwright/GridPuzzles | 9660880748c656d1a9ac6205eff8cfd22555e069 | [
"MIT"
] | 1 | 2021-01-14T00:29:52.000Z | 2021-01-14T00:29:52.000Z | tests/test_cell.py | billwright/GridPuzzles | 9660880748c656d1a9ac6205eff8cfd22555e069 | [
"MIT"
] | null | null | null | tests/test_cell.py | billwright/GridPuzzles | 9660880748c656d1a9ac6205eff8cfd22555e069 | [
"MIT"
] | null | null | null | import unittest
from Cell import Cell
from Blanking_Cell_Exception import Blanking_Cell_Exception
if __name__ == '__main__':
unittest.main()
| 32.4 | 82 | 0.633745 | import unittest
from Cell import Cell
from Blanking_Cell_Exception import Blanking_Cell_Exception
class TestCell(unittest.TestCase):
def test_cell_creation(self):
cell = Cell('A1', '1234')
self.assertIsNotNone(cell)
self.assertEqual('A1', cell.address)
self.assertEqual('1234', cell.candidates)
def test_remove_candidates(self):
cell = Cell('A1', '1234')
cell.remove_candidates('4')
self.assertEqual('123', cell.candidates)
cell.remove_candidates('13')
self.assertEqual('2', cell.candidates)
def test_size(self):
cell = Cell('A1', '1234')
self.assertEqual(4, cell.get_size())
cell = Cell('A1', '3')
self.assertEqual(1, cell.get_size())
def test_protection_against_no_candidates(self):
with self.assertRaises(Blanking_Cell_Exception):
Cell('A1', '')
cell = Cell('A1', '1234')
with self.assertRaises(AttributeError):
cell.candidates = ''
with self.assertRaises(Blanking_Cell_Exception):
cell.set_candidates('')
with self.assertRaises(Blanking_Cell_Exception):
cell.remove_candidates('1234')
cell = Cell('A1', '14')
with self.assertRaises(Blanking_Cell_Exception):
cell.remove_candidates('1234')
def test_cell_distance(self):
# The Cell candidates are irrelevant here, but something must be passed in
cell = Cell('A1', '1')
self.assertEqual(0, cell.distance_to_cell(cell))
self.assertEqual(1, cell.distance_to_cell(Cell('A2', '1')))
self.assertEqual(1, cell.distance_to_cell(Cell('B1', '1')))
self.assertEqual(8, cell.distance_to_cell(Cell('A9', '1')))
self.assertEqual(8, cell.distance_to_cell(Cell('I1', '1')))
self.assertEqual(8, cell.distance_to_cell(Cell('E5', '1')))
if __name__ == '__main__':
unittest.main()
| 1,616 | 13 | 166 |
c11f5d238f7a33c8a3ecbcbde814b2502bb65588 | 3,790 | py | Python | tearing/graph_modules.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | tearing/graph_modules.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | tearing/graph_modules.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
| 36.095238 | 88 | 0.473879 | import torch
from torch import nn
class GraphFilter(nn.Module):
def __init__(self, grid_dims, graph_r, graph_eps, graph_lam):
super(GraphFilter, self).__init__()
self.grid_dims = grid_dims
self.graph_r = graph_r
self.graph_eps_sqr = graph_eps * graph_eps
self.graph_lam = graph_lam
def forward(self, grid, pc):
# Data preparation
bs_cur = pc.shape[0]
grid_exp = grid.contiguous().view(
bs_cur, self.grid_dims[0], self.grid_dims[1], 2
) # batch_size X dim0 X dim1 X 2
pc_exp = pc.contiguous().view(
bs_cur, self.grid_dims[0], self.grid_dims[1], 3
) # batch_size X dim0 X dim1 X 3
graph_feature = torch.cat((grid_exp, pc_exp), dim=3).permute([0, 3, 1, 2])
# Compute the graph weights
wght_hori = (
graph_feature[:, :, :-1, :] - graph_feature[:, :, 1:, :]
) # horizontal weights
wght_vert = (
graph_feature[:, :, :, :-1] - graph_feature[:, :, :, 1:]
) # vertical weights
wght_hori = torch.exp(
-torch.sum(wght_hori * wght_hori, dim=1) / self.graph_eps_sqr
) # Gaussian weight
wght_vert = torch.exp(
-torch.sum(wght_vert * wght_vert, dim=1) / self.graph_eps_sqr
)
wght_hori = (wght_hori > self.graph_r) * wght_hori
wght_vert = (wght_vert > self.graph_r) * wght_vert
wght_lft = torch.cat(
(torch.zeros([bs_cur, 1, self.grid_dims[1]]).cuda(), wght_hori), 1
) # add left
wght_rgh = torch.cat(
(wght_hori, torch.zeros([bs_cur, 1, self.grid_dims[1]]).cuda()), 1
) # add right
wght_top = torch.cat(
(torch.zeros([bs_cur, self.grid_dims[0], 1]).cuda(), wght_vert), 2
) # add top
wght_bot = torch.cat(
(wght_vert, torch.zeros([bs_cur, self.grid_dims[0], 1]).cuda()), 2
) # add bottom
wght_all = torch.cat(
(
wght_lft.unsqueeze(1),
wght_rgh.unsqueeze(1),
wght_top.unsqueeze(1),
wght_bot.unsqueeze(1),
),
1,
)
# Perform the actural graph filtering: x = (I - \lambda L) * x
wght_hori = wght_hori.unsqueeze(1).expand(-1, 3, -1, -1) # dimension expansion
wght_vert = wght_vert.unsqueeze(1).expand(-1, 3, -1, -1)
pc = (
pc.permute([0, 2, 1])
.contiguous()
.view(bs_cur, 3, self.grid_dims[0], self.grid_dims[1])
)
pc_filt = (
torch.cat(
(
torch.zeros([bs_cur, 3, 1, self.grid_dims[1]]).cuda(),
pc[:, :, :-1, :] * wght_hori,
),
2,
)
+ torch.cat(
(
pc[:, :, 1:, :] * wght_hori,
torch.zeros([bs_cur, 3, 1, self.grid_dims[1]]).cuda(),
),
2,
)
+ torch.cat(
(
torch.zeros([bs_cur, 3, self.grid_dims[0], 1]).cuda(),
pc[:, :, :, :-1] * wght_vert,
),
3,
)
+ torch.cat(
(
pc[:, :, :, 1:] * wght_vert,
torch.zeros([bs_cur, 3, self.grid_dims[0], 1]).cuda(),
),
3,
)
) # left, right, top, bottom
pc_filt = pc + self.graph_lam * (
pc_filt - torch.sum(wght_all, dim=1).unsqueeze(1).expand(-1, 3, -1, -1) * pc
) # equivalent to ( I - \lambda L) * x
pc_filt = pc_filt.view(bs_cur, 3, -1).permute([0, 2, 1])
return pc_filt, wght_all
| 3,671 | 8 | 76 |
ebaf6c62d58d48f5c9b9b30a6c3488240ae105a8 | 7,090 | py | Python | machine_learning/ml_ch4.py | ivanlevsky/cowabunga-potato | ab317582b7b8f99d7be3ea4f5edbe9829fc398fb | [
"MIT"
] | null | null | null | machine_learning/ml_ch4.py | ivanlevsky/cowabunga-potato | ab317582b7b8f99d7be3ea4f5edbe9829fc398fb | [
"MIT"
] | null | null | null | machine_learning/ml_ch4.py | ivanlevsky/cowabunga-potato | ab317582b7b8f99d7be3ea4f5edbe9829fc398fb | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
# ---------------------------The Normal Equation---------------------
# X = 2 * np.random.rand(100, 1)
# y = 4 + 3 * X + np.random.randn(100, 1)
#
# X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
# theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
# X_new = np.array([[0], [2]])
# X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
# y_predict = X_new_b.dot(theta_best)
# plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
# plt.plot(X, y, "b.")
# plt.xlabel("$x_1$", fontsize=18)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.legend(loc="upper left", fontsize=14)
# plt.axis([0, 2, 0, 15])
# plt.show()
# lin_reg = LinearRegression()
# lin_reg.fit(X, y)
# print(lin_reg.intercept_, lin_reg.coef_)
# print(lin_reg.predict(X_new))
# -------------------------Gradient Descent---------------------
# def plot_gradient_descent(theta, eta, theta_path=None):
# m = len(X_b)
# plt.plot(X, y, "b.")
# n_iterations = 1000
# for iteration in range(n_iterations):
# if iteration < 10:
# y_predict = X_new_b.dot(theta)
# style = "b-" if iteration > 0 else "r--"
# plt.plot(X_new, y_predict, style)
# gradients = 2 / m * X_b.T.dot(X_b.dot(theta) - y)
# theta = theta - eta * gradients
# if theta_path is not None:
# theta_path.append(theta)
# plt.xlabel("$x_1$", fontsize=18)
# plt.axis([0, 2, 0, 15])
# plt.title(r"$\eta = {}$".format(eta), fontsize=16)
# theta_path_bgd = []
# np.random.seed(42)
# theta = np.random.randn(2, 1) # random initialization
# plt.figure(figsize=(10, 4))
# plt.subplot(131);plot_gradient_descent(theta, eta=0.02)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.subplot(132);plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
# plt.subplot(133);plot_gradient_descent(theta, eta=0.5)
# plt.show()
# -------------------------Stochastic Gradient Descent-------------------------
# theta_path_sgd = []
# m = len(X_b)
# np.random.seed(42)
# n_epochs = 50
# t0, t1 = 5, 50 # learning schedule hyperparameters
# def learning_schedule(t):
# return t0 / (t + t1)
# theta = np.random.randn(2,1) # random initialization
# for epoch in range(n_epochs):
# for i in range(m):
# if epoch == 0 and i < 20: # not shown in the book
# y_predict = X_new_b.dot(theta) # not shown
# style = "b-" if i > 0 else "r--" # not shown
# plt.plot(X_new, y_predict, style) # not shown
# random_index = np.random.randint(m)
# xi = X_b[random_index:random_index+1]
# yi = y[random_index:random_index+1]
# gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
# eta = learning_schedule(epoch * m + i)
# theta = theta - eta * gradients
# theta_path_sgd.append(theta) # not shown
# plt.plot(X, y, "b.") # not shown
# plt.xlabel("$x_1$", fontsize=18) # not shown
# plt.ylabel("$y$", rotation=0, fontsize=18) # not shown
# plt.axis([0, 2, 0, 15]) # not shown
# plt.show()
# print(theta)
# -------------------------Polynomial Regression-------------------------
# np.random.seed(42)
# m = 100
# X = 6 * np.random.rand(m, 1) - 3
# y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
#
# poly_features = PolynomialFeatures(degree=2, include_bias=False)
# X_poly = poly_features.fit_transform(X)
# lin_reg = LinearRegression()
# lin_reg.fit(X_poly, y)
#
# X_new=np.linspace(-3, 3, 100).reshape(100, 1)
# X_new_poly = poly_features.transform(X_new)
# y_new = lin_reg.predict(X_new_poly)
# for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
# polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
# std_scaler = StandardScaler()
# lin_reg = LinearRegression()
# polynomial_regression = Pipeline([
# ("poly_features", polybig_features),
# ("std_scaler", std_scaler),
# ("lin_reg", lin_reg),
# ])
# polynomial_regression.fit(X, y)
# y_newbig = polynomial_regression.predict(X_new)
# plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
#
# plt.plot(X, y, "b.", linewidth=3)
# plt.legend(loc="upper left")
# plt.xlabel("$x_1$", fontsize=18)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.axis([-3, 3, 0, 10])
# plt.show()
# -------------------------Learning Curves-------------------------
# def plot_learning_curves(model, X, y):
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
# train_errors, val_errors = [], []
# for m in range(1, len(X_train)):
# model.fit(X_train[:m], y_train[:m])
# y_train_predict = model.predict(X_train[:m])
# y_val_predict = model.predict(X_val)
# train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
# val_errors.append(mean_squared_error(y_val_predict, y_val))
# plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
# plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
#
#
# polynomial_regression = Pipeline([
# ("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
# ("lin_reg", LinearRegression()),
# ])
# plot_learning_curves(polynomial_regression, X, y)
# plt.axis([0, 80, 0, 3]) # not shown in the book
# plt.show()
# -------------------------Regularized Linear Model-------------------------
np.random.seed(42)
m = 20
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
plt.show()
| 37.315789 | 95 | 0.591255 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
# ---------------------------The Normal Equation---------------------
# X = 2 * np.random.rand(100, 1)
# y = 4 + 3 * X + np.random.randn(100, 1)
#
# X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
# theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
# X_new = np.array([[0], [2]])
# X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
# y_predict = X_new_b.dot(theta_best)
# plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
# plt.plot(X, y, "b.")
# plt.xlabel("$x_1$", fontsize=18)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.legend(loc="upper left", fontsize=14)
# plt.axis([0, 2, 0, 15])
# plt.show()
# lin_reg = LinearRegression()
# lin_reg.fit(X, y)
# print(lin_reg.intercept_, lin_reg.coef_)
# print(lin_reg.predict(X_new))
# -------------------------Gradient Descent---------------------
# def plot_gradient_descent(theta, eta, theta_path=None):
# m = len(X_b)
# plt.plot(X, y, "b.")
# n_iterations = 1000
# for iteration in range(n_iterations):
# if iteration < 10:
# y_predict = X_new_b.dot(theta)
# style = "b-" if iteration > 0 else "r--"
# plt.plot(X_new, y_predict, style)
# gradients = 2 / m * X_b.T.dot(X_b.dot(theta) - y)
# theta = theta - eta * gradients
# if theta_path is not None:
# theta_path.append(theta)
# plt.xlabel("$x_1$", fontsize=18)
# plt.axis([0, 2, 0, 15])
# plt.title(r"$\eta = {}$".format(eta), fontsize=16)
# theta_path_bgd = []
# np.random.seed(42)
# theta = np.random.randn(2, 1) # random initialization
# plt.figure(figsize=(10, 4))
# plt.subplot(131);plot_gradient_descent(theta, eta=0.02)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.subplot(132);plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
# plt.subplot(133);plot_gradient_descent(theta, eta=0.5)
# plt.show()
# -------------------------Stochastic Gradient Descent-------------------------
# theta_path_sgd = []
# m = len(X_b)
# np.random.seed(42)
# n_epochs = 50
# t0, t1 = 5, 50 # learning schedule hyperparameters
# def learning_schedule(t):
# return t0 / (t + t1)
# theta = np.random.randn(2,1) # random initialization
# for epoch in range(n_epochs):
# for i in range(m):
# if epoch == 0 and i < 20: # not shown in the book
# y_predict = X_new_b.dot(theta) # not shown
# style = "b-" if i > 0 else "r--" # not shown
# plt.plot(X_new, y_predict, style) # not shown
# random_index = np.random.randint(m)
# xi = X_b[random_index:random_index+1]
# yi = y[random_index:random_index+1]
# gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
# eta = learning_schedule(epoch * m + i)
# theta = theta - eta * gradients
# theta_path_sgd.append(theta) # not shown
# plt.plot(X, y, "b.") # not shown
# plt.xlabel("$x_1$", fontsize=18) # not shown
# plt.ylabel("$y$", rotation=0, fontsize=18) # not shown
# plt.axis([0, 2, 0, 15]) # not shown
# plt.show()
# print(theta)
# -------------------------Polynomial Regression-------------------------
# np.random.seed(42)
# m = 100
# X = 6 * np.random.rand(m, 1) - 3
# y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
#
# poly_features = PolynomialFeatures(degree=2, include_bias=False)
# X_poly = poly_features.fit_transform(X)
# lin_reg = LinearRegression()
# lin_reg.fit(X_poly, y)
#
# X_new=np.linspace(-3, 3, 100).reshape(100, 1)
# X_new_poly = poly_features.transform(X_new)
# y_new = lin_reg.predict(X_new_poly)
# for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
# polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
# std_scaler = StandardScaler()
# lin_reg = LinearRegression()
# polynomial_regression = Pipeline([
# ("poly_features", polybig_features),
# ("std_scaler", std_scaler),
# ("lin_reg", lin_reg),
# ])
# polynomial_regression.fit(X, y)
# y_newbig = polynomial_regression.predict(X_new)
# plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
#
# plt.plot(X, y, "b.", linewidth=3)
# plt.legend(loc="upper left")
# plt.xlabel("$x_1$", fontsize=18)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.axis([-3, 3, 0, 10])
# plt.show()
# -------------------------Learning Curves-------------------------
# def plot_learning_curves(model, X, y):
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
# train_errors, val_errors = [], []
# for m in range(1, len(X_train)):
# model.fit(X_train[:m], y_train[:m])
# y_train_predict = model.predict(X_train[:m])
# y_val_predict = model.predict(X_val)
# train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
# val_errors.append(mean_squared_error(y_val_predict, y_val))
# plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
# plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
#
#
# polynomial_regression = Pipeline([
# ("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
# ("lin_reg", LinearRegression()),
# ])
# plot_learning_curves(polynomial_regression, X, y)
# plt.axis([0, 80, 0, 3]) # not shown in the book
# plt.show()
# -------------------------Regularized Linear Model-------------------------
np.random.seed(42)
m = 20
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
def plot_model(model_class, polynomial, alphas, **model_kargs):
for alpha, style in zip(alphas, ("b-", "g--", "r:")):
model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression()
if polynomial:
model = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", StandardScaler()),
("regul_reg", model),
])
model.fit(X, y)
y_new_regul = model.predict(X_new)
lw = 2 if alpha > 0 else 1
plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha))
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left", fontsize=15)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 3, 0, 4])
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
plt.show()
| 776 | 0 | 23 |
922c6d09b01b7f8d5feb3572846faf5f1552685f | 577 | py | Python | vendor/haproxy-1.9.1/tests/test-sockpair.py | junsulee/c-goof | 240c979dd014ed3bb9c8dddf5d0e66afb6a8c2f2 | [
"Apache-2.0"
] | 2 | 2021-11-25T13:42:35.000Z | 2022-02-05T07:58:14.000Z | vendor/haproxy-1.9.1/tests/test-sockpair.py | junsulee/c-goof | 240c979dd014ed3bb9c8dddf5d0e66afb6a8c2f2 | [
"Apache-2.0"
] | null | null | null | vendor/haproxy-1.9.1/tests/test-sockpair.py | junsulee/c-goof | 240c979dd014ed3bb9c8dddf5d0e66afb6a8c2f2 | [
"Apache-2.0"
] | 15 | 2021-11-24T15:40:54.000Z | 2022-03-02T09:17:03.000Z | #!/usr/bin/python
"""
Python wrapper example to test socketpair protocol
./test-socketpair.py test.cfg
use sockpair@${FD1} and sockpair@${FD2} in your configuration file
"""
import socket, os, sys
s = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
os.set_inheritable(s[0].fileno(), 1)
os.set_inheritable(s[1].fileno(), 1)
FD1 = s[0].fileno()
FD2 = s[1].fileno()
print("FD1={} FD2={}".format(FD1, FD2))
os.environ["FD1"] = str(FD1)
os.environ["FD2"] = str(FD2)
cmd = ["./haproxy",
"-f",
"{}".format(sys.argv[1])
]
os.execve(cmd[0], cmd, os.environ)
| 19.896552 | 66 | 0.651646 | #!/usr/bin/python
"""
Python wrapper example to test socketpair protocol
./test-socketpair.py test.cfg
use sockpair@${FD1} and sockpair@${FD2} in your configuration file
"""
import socket, os, sys
s = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
os.set_inheritable(s[0].fileno(), 1)
os.set_inheritable(s[1].fileno(), 1)
FD1 = s[0].fileno()
FD2 = s[1].fileno()
print("FD1={} FD2={}".format(FD1, FD2))
os.environ["FD1"] = str(FD1)
os.environ["FD2"] = str(FD2)
cmd = ["./haproxy",
"-f",
"{}".format(sys.argv[1])
]
os.execve(cmd[0], cmd, os.environ)
| 0 | 0 | 0 |
24f33a3e9b503bf6b6318334720a5eb6ff38c001 | 224 | py | Python | output/models/sun_data/elem_decl/abstract/abstract00101m/abstract00101m_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/sun_data/elem_decl/abstract/abstract00101m/abstract00101m_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/sun_data/elem_decl/abstract/abstract00101m/abstract00101m_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.sun_data.elem_decl.abstract.abstract00101m.abstract00101m_xsd.abstract00101m import (
Head,
HeadType,
Member1,
Root,
)
__all__ = [
"Head",
"HeadType",
"Member1",
"Root",
]
| 16 | 104 | 0.651786 | from output.models.sun_data.elem_decl.abstract.abstract00101m.abstract00101m_xsd.abstract00101m import (
Head,
HeadType,
Member1,
Root,
)
__all__ = [
"Head",
"HeadType",
"Member1",
"Root",
]
| 0 | 0 | 0 |
55bc93f1f28a4d3cfdbf2ceae0689c9e65f3b7d4 | 10,760 | py | Python | cmdb/models.py | proffalken/edison | 5bfa941f8876cb8698cd8009c4514bc03d24c109 | [
"BSD-3-Clause"
] | 3 | 2015-11-05T07:29:00.000Z | 2021-06-17T23:44:17.000Z | cmdb/models.py | proffalken/edison | 5bfa941f8876cb8698cd8009c4514bc03d24c109 | [
"BSD-3-Clause"
] | 1 | 2016-05-04T10:54:48.000Z | 2016-05-04T10:54:56.000Z | cmdb/models.py | proffalken/edison | 5bfa941f8876cb8698cd8009c4514bc03d24c109 | [
"BSD-3-Clause"
] | null | null | null | # This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
from django.db import models
from django.contrib.auth.models import User
# These are the models required for the basic CMDB
# First, Define our list of countries
# Now define the counties/States that we can use
# Where do people/things live?
# What companies are there that we might want to talk to?
# A list of all our contacts both within and external to the company we work for
# Our Datacentres
# The rooms in the datacentres
# The suites in the datacentres
# The racks in the suites in the rooms in the datacentres....
# The different classes of configuration items
# The network interfaces that are assigned to configuration items
# the following classes are based on the libvirt xml standard, although they do not contain all the possible options
# Configuration Item Profiles
# The configuration items (servers/switches etc)
| 34.15873 | 205 | 0.683922 | # This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
from django.db import models
from django.contrib.auth.models import User
# These are the models required for the basic CMDB
# First, Define our list of countries
class Country(models.Model):
Name = models.CharField(max_length=255)
Code = models.CharField(max_length=3)
def __unicode__(self):
return self.Code
class Meta:
#permissions = ()
verbose_name = 'Country'
verbose_name_plural = 'Countries'
ordering = ['Name']
# Now define the counties/States that we can use
class County(models.Model):
Name = models.CharField(max_length=128)
Country = models.ForeignKey('Country')
def __unicode__(self):
return self.Name
class Meta:
#permissions = ()
verbose_name = 'County'
verbose_name_plural = 'Counties'
ordering = ['Name']
# Where do people/things live?
class Address(models.Model):
LineOne = models.CharField(max_length=128)
LineTwo = models.CharField(max_length=128,blank=True)
LineThree = models.CharField(max_length=128,blank=True)
Postcode = models.CharField(max_length=15)
County = models.ForeignKey('County')
Country = models.ForeignKey('Country')
def __unicode__(self):
return u'%s, %s, %s' % (self.LineOne, self.County, self.Postcode)
class Meta:
#permissions = ()
verbose_name = 'Address'
verbose_name_plural = 'Addresses'
ordering = ['LineOne']
# What companies are there that we might want to talk to?
class Company(models.Model):
Name = models.CharField(max_length=255)
HeadOffice = models.ForeignKey('Address')
SupportNumber = models.CharField(max_length=50)
SupportEmail = models.EmailField()
def __unicode__(self):
return self.Name
class Meta:
#permissions = ()
verbose_name = 'Company'
verbose_name_plural = 'Companies'
ordering = ['Name']
# A list of all our contacts both within and external to the company we work for
class Contact(models.Model):
TITLE_CHOICES = (
('Mr','Mr'),
('Mrs','Mrs'),
('Miss','Miss'),
('Ms','Ms')
)
Title = models.CharField(max_length=6,choices=TITLE_CHOICES)
FirstName = models.CharField(max_length=128)
LastName = models.CharField(max_length=128)
PrimaryPhone = models.CharField(max_length=50)
EmailAddress = models.EmailField()
Company = models.ForeignKey('Company')
def __unicode__(self):
return u'%s %s %s' % (self.Title, self.FirstName, self.LastName)
class Meta:
#permissions = ()
verbose_name = 'Contact'
verbose_name_plural = 'Contacts'
ordering = ['FirstName']
# Our Datacentres
class DataCentre(models.Model):
Name = models.CharField(max_length=255)
ShortCode = models.CharField(max_length=10)
Address = models.ForeignKey('Address')
PrincipleContact = models.ForeignKey('Contact')
def __unicode__(self):
return self.ShortCode
class Meta:
#permissions = ()
verbose_name = 'Data Centre'
verbose_name_plural = 'Data Centres'
ordering = ['Name']
# The rooms in the datacentres
class DataCentreRoom(models.Model):
RoomName = models.CharField(max_length=25)
DataCentre = models.ForeignKey('DataCentre')
def __unicode__(self):
return u'%s in %s' % (self.RoomName, self.DataCentre)
class Meta:
#permissions = ()
verbose_name = 'Data Centre Room'
verbose_name_plural = 'Data Centre Rooms'
ordering = ['RoomName']
# The suites in the datacentres
class DataCentreSuite(models.Model):
SuiteName = models.CharField(max_length=128)
Room = models.ForeignKey('DataCentreRoom')
def __unicode__(self):
return u'%s -> %s' % (self.SuiteName, self.Room)
class Meta:
#permissions = ()
verbose_name = 'Data Centre Suite'
verbose_name_plural = 'Data Centre Suites'
ordering = ['SuiteName']
# The racks in the suites in the rooms in the datacentres....
class DataCentreRack(models.Model):
RackName = models.CharField(max_length=25)
Room = models.ForeignKey('DataCentreRoom',blank=True)
Suite= models.ForeignKey('DataCentreSuite',blank=True)
def __unicode__(self):
return u'%s -> %s (%s)' % (self.RackName, self.Suite, self.Room)
class Meta:
#permissions = ()
verbose_name = 'Data Centre Rack'
verbose_name_plural = 'Data Centre Racks'
ordering = ['RackName']
# The different classes of configuration items
class ConfigurationItemClass(models.Model):
Name = models.CharField(max_length=100)
def __unicode__(self):
return self.Name
class Meta:
#permissions = ()
verbose_name = 'Configuration Item Class'
verbose_name_plural = 'Configuration Item Classes'
ordering = ['Name']
# The network interfaces that are assigned to configuration items
class NetworkInterface(models.Model):
Name = models.CharField(max_length=5)
MacAddress = models.CharField(max_length=30)
Gateway = models.IPAddressField(blank=True, null=True)
SubnetMask = models.IPAddressField(blank=True, null=True)
IPAddress = models.IPAddressField(blank=True, null=True)
UseDHCP = models.BooleanField()
def __unicode__(self):
return u'%s (%s -> %s)' % (self.Name, self.IPAddress, self.MacAddress)
class Meta:
#permissions = ()
verbose_name = 'Network Interface'
verbose_name_plural = 'Network Interfaces'
ordering = ['Name']
class PackageProvider(models.Model):
Name = models.CharField(max_length=255)
ExecutableName = models.CharField(max_length=255)
def __unicode__(self):
return self.Name
class PackageFormat(models.Model):
Name = models.CharField(max_length=255)
Provider = models.ForeignKey(PackageProvider)
def __unicode__(self):
return self.Name
class Repo(models.Model):
Name = models.CharField(max_length=255)
PackageProvider = models.ForeignKey(PackageProvider)
url = models.CharField(max_length=255)
def __unicode__(self):
return self.Name
class OperatingSystemBreed(models.Model):
Name = models.CharField(max_length=255)
PackageFormat = models.ForeignKey(PackageFormat)
def __unicode__(self):
return self.Name
class OperatingSystemName(models.Model):
Name = models.CharField(max_length=200)
SupportCompany = models.ForeignKey(Company)
Breed = models.ForeignKey(OperatingSystemBreed)
def __unicode__(self):
return u'%s supported by %s' % (self.Name, self.SupportCompany)
class OperatingSystemVersion(models.Model):
Name = models.ForeignKey(OperatingSystemName)
Version = models.CharField(max_length=128)
EOLDate = models.DateField(blank=True, null=True, verbose_name='End of Life Date')
EOSDate = models.DateField(blank=True, null=True, verbose_name='End of Support Date')
def __unicode__(self):
return u'%s %s' % (self.Name,self.Version)
# the following classes are based on the libvirt xml standard, although they do not contain all the possible options
class VirtualisationType(models.Model):
Name = models.CharField(max_length=128)
Description = models.CharField(max_length=255)
def __unicode__(self):
return self.Name
class Meta:
verbose_name = 'Virtualisation Type'
verbose_name_plural = 'Virtualisation Types'
ordering = ['Name']
class VirtualServerDefinition(models.Model):
Name = models.CharField(max_length=255)
NumCPU = models.IntegerField(max_length=4)
RamMB = models.IntegerField(max_length=7)
DeployTo = models.ForeignKey('ConfigurationItem',null=True,blank=True)
DiskSizeGB = models.IntegerField(default=8,max_length=7)
POWER_CHOICES = (
('reboot','Reboot'),
('destroy','Destroy'),
('preserve','Preserve'),
('coredump-destroy','Core Dump & Destroy'),
('coredump-restart','Core Dump & Restart'),
)
OnReboot = models.CharField(max_length=25,choices=POWER_CHOICES)
OnCrash = models.CharField(max_length=25,choices=POWER_CHOICES)
OnPowerOff = models.CharField(max_length=25,choices=POWER_CHOICES)
Acpi = models.BooleanField()
Pae = models.BooleanField()
NETWORK_CHOICES = (
('network','Virtual Network'),
('bridge','LAN Bridge'),
('user','Userspace SLIRP Stack'),
)
NetworkType = models.CharField(max_length=10,choices=NETWORK_CHOICES)
BridgeNetworkInterface = models.CharField(max_length=10)
VMType = models.ForeignKey(VirtualisationType)
def __unicode__(self):
return u'%s (%s cpus, %s MB RAM, %s GB Storage, %s Network using %s and powered by %s)' % (self.Name,self.NumCPU,self.RamMB,self.DiskSizeGB,self.NetworkType,self.BridgeNetworkInterface,self.VMType)
# Configuration Item Profiles
class ConfigurationItemProfile(models.Model):
Name = models.CharField(max_length=255)
VirtualServerDefinition = models.ForeignKey(VirtualServerDefinition,blank=True,null=True)
OperatingSystem = models.ForeignKey(OperatingSystemVersion)
AutoInstallFile = models.TextField(help_text="Paste your Kickstart/Debian a-i/Windows unattend.txt in here")
repos = models.ManyToManyField(Repo,blank=True,null=True)
def __unicode__(self):
return self.Name
# The configuration items (servers/switches etc)
class ConfigurationItem(models.Model):
Hostname = models.CharField(max_length=255)
Rack = models.ForeignKey('DataCentreRack')
Asset = models.CharField(max_length=128)
SupportTag = models.CharField(max_length=128)
Class = models.ForeignKey(ConfigurationItemClass)
Owner = models.ForeignKey(User)
NetworkInterface = models.ManyToManyField(NetworkInterface)
Profile = models.ForeignKey(ConfigurationItemProfile)
VMImagePath = models.CharField(max_length=255,blank=True,null=True,verbose_name='Path for Virtual Images')
IsVirtual = models.BooleanField()
BuildOnNextBoot = models.BooleanField(verbose_name="PXE Build",help_text="Should this box be rebuilt the next time it is booted?")
IsVMHost = models.BooleanField()
rootpwhash = models.CharField(max_length=255)
def __unicode__(self):
return self.Hostname
class Meta:
#permissions = ()
verbose_name = 'Configuration Item'
verbose_name_plural = 'Configuration Items'
ordering = ['Hostname']
| 1,077 | 8,174 | 469 |
2f2714142396728fd927af9cf97aff6dece18541 | 1,318 | py | Python | brightermonday.py | Simonwafula/Kenyan-Jobsites-Scraper | 05589adc2a2253e4bd61de2338ce7c3061afd697 | [
"Apache-2.0"
] | null | null | null | brightermonday.py | Simonwafula/Kenyan-Jobsites-Scraper | 05589adc2a2253e4bd61de2338ce7c3061afd697 | [
"Apache-2.0"
] | null | null | null | brightermonday.py | Simonwafula/Kenyan-Jobsites-Scraper | 05589adc2a2253e4bd61de2338ce7c3061afd697 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import sqlite3
conn = sqlite3.connect("output.db")
cur = conn.cursor()
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/84.0'
}
url = "https://www.brightermonday.co.ke/jobs"
response = requests.get(url, headers, timeout=5)
content = BeautifulSoup(response.content, "html.parser")
# article = content.find('article', attrs={"class": "search-result"})
# employer = article.find('div', attrs={"class": "search-result__job-meta"})
# print(article.prettify())
# print(employer.text)
job_posting = []
for posting in content.findAll('article', attrs={"class": "search-result"}):
job_post = {
"title": posting.find('h3').text,
"link": posting.find('a').get('href'),
"employer": posting.find('div', attrs={"class": "search-result__job-meta"}).text,
}
job_posting.append(job_post)
# writing to database
for job_post in job_posting:
cur.execute("INSERT INTO scraped_data (title, link, employer) values (?, ?, ?)",
(job_post["title"], job_post["link"], job_post["employer"])
)
| 32.146341 | 96 | 0.657056 | from bs4 import BeautifulSoup
import requests
import sqlite3
conn = sqlite3.connect("output.db")
cur = conn.cursor()
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/84.0'
}
url = "https://www.brightermonday.co.ke/jobs"
response = requests.get(url, headers, timeout=5)
content = BeautifulSoup(response.content, "html.parser")
# article = content.find('article', attrs={"class": "search-result"})
# employer = article.find('div', attrs={"class": "search-result__job-meta"})
# print(article.prettify())
# print(employer.text)
job_posting = []
for posting in content.findAll('article', attrs={"class": "search-result"}):
job_post = {
"title": posting.find('h3').text,
"link": posting.find('a').get('href'),
"employer": posting.find('div', attrs={"class": "search-result__job-meta"}).text,
}
job_posting.append(job_post)
# writing to database
for job_post in job_posting:
cur.execute("INSERT INTO scraped_data (title, link, employer) values (?, ?, ?)",
(job_post["title"], job_post["link"], job_post["employer"])
)
| 0 | 0 | 0 |
70cfb9a2c6a526447cb9611d46444aadca43a0be | 1,791 | py | Python | MyWord2Vec.py | hakimkt/SAIVS | c310bd7c9426f0d21efeea8866cf6b881b7e8530 | [
"Apache-2.0"
] | 40 | 2018-10-29T02:29:13.000Z | 2021-11-23T13:14:50.000Z | MyWord2Vec.py | 5l1v3r1/SAIVS | aa62451665b6398ba329d68592bf4313be60a886 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:27:28.000Z | 2021-02-23T12:27:28.000Z | MyWord2Vec.py | 5l1v3r1/SAIVS | aa62451665b6398ba329d68592bf4313be60a886 | [
"Apache-2.0"
] | 29 | 2018-10-29T02:29:17.000Z | 2022-03-17T06:31:35.000Z | # -*- coding: utf-8 -*-
from gensim.models import word2vec
import os
import logging
MODEL_NAME = 'text8'
DATA_PATH = 'data\\text8'
| 37.3125 | 114 | 0.537688 | # -*- coding: utf-8 -*-
from gensim.models import word2vec
import os
import logging
MODEL_NAME = 'text8'
DATA_PATH = 'data\\text8'
class Word2Vec:
def __init__(self, int_count=10):
self.int_word_count = int_count
def learn_sentense(self):
if os.path.exists(MODEL_NAME):
# print('Using Word2Vec :', MODEL_NAME)
return
else:
print('Learning sentense...')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
obj_sentense = word2vec.Text8Corpus(DATA_PATH)
obj_model = word2vec.Word2Vec(obj_sentense, size=200, min_count=20, window=15)
obj_model.save(MODEL_NAME)
return
def cal_similarity(self, lst_posi, lst_nega, obj_model):
int_idx = 1
obj_result = None
try:
obj_result = obj_model.most_similar(positive=lst_posi, negative=lst_nega, topn = self.int_word_count)
print("\nAnalogize the '%s'." % lst_posi[0])
print("#######################candidate#############################")
print("No.", " ", "word", " ", "cos distance")
for r in obj_result:
print(int_idx,' ', r[0],' ', r[1])
int_idx += 1
print("#############################################################")
return obj_result
except:
obj_result = False
return obj_result
def get_candidate_word(self, str_target_word):
self.learn_sentense()
obj_model = word2vec.Word2Vec.load(MODEL_NAME)
str_word = str_target_word
lst_nega = []
return self.cal_similarity([str_word.encode()], lst_nega, obj_model) | 1,527 | -6 | 138 |
131c6ed9e704d68d36f814ce2b61e216c5b751dd | 4,159 | py | Python | siftOnePixel.py | hakimhassani97/SIFT | b105a0eeb04f7dacd97e96a493bb01e859937098 | [
"MIT"
] | 1 | 2021-05-06T15:33:11.000Z | 2021-05-06T15:33:11.000Z | siftOnePixel.py | hakimhassani97/SIFT | b105a0eeb04f7dacd97e96a493bb01e859937098 | [
"MIT"
] | null | null | null | siftOnePixel.py | hakimhassani97/SIFT | b105a0eeb04f7dacd97e96a493bb01e859937098 | [
"MIT"
] | null | null | null | import cv2 as cv2
import numpy as np
from collections import Counter
import math
import matplotlib.pyplot as plt
#test images
# s='add.png'
s='lenna.jpg'
s='ttt.jpg'
#pixel used for SIFT
pixelX=200
pixelY=200
#functions
#main
img=cv2.imread(s)
h,w,d = np.shape(img)
#convolution matrix
c=1
convX=np.zeros((3,3),np.double)
convX[0,0]=0;convX[0,1]=0;convX[0,2]=0;convX[1,0]=-c;convX[1,1]=0
convX[1,2]= c;convX[2,0]= -0;convX[2,1]=0;convX[2,2]=0
convY=np.zeros((3,3),np.double)
convY[0,0]=-0;convY[0,1]=-c;convY[0,2]=-0;convY[1,0]=0;convY[1,1]=0
convY[1,2]= 0;convY[2,0]= 0;convY[2,1]=c;convY[2,2]=0
#threshold for contours
seuil=30
img,contours,imgContoursX,imgContoursY=getContours(img,seuil)
blocks=getBlock(img,pixelX,pixelY)
dic={}
histogrammes=[]
for block in blocks:
#count orientations for histogramme
array=np.matrix.flatten(block)
count=Counter(array)
for c in count:
dic[roundAngleTitle(c)]=count[c]
histogrammes.append(dic.copy())
dic={}
showHist(histogrammes)
cv2.imshow('image : '+s,img)
cv2.waitKey(0) | 28.486301 | 104 | 0.576821 | import cv2 as cv2
import numpy as np
from collections import Counter
import math
import matplotlib.pyplot as plt
#test images
# s='add.png'
s='lenna.jpg'
s='ttt.jpg'
#pixel used for SIFT
pixelX=200
pixelY=200
#functions
def drawContours(img,contours,color):
contours=np.array(contours)
for i in range(contours.shape[0]):
for j in range(contours[i].shape[0]):
for k in range(contours[i][j].shape[0]):
img[contours[i][j][k][1]][contours[i][j][k][0]]=color
def getContours(img,seuil=30):
contours=[]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h,w=np.shape(img)
imgContours=np.zeros((h,w),np.double)
imgContoursX=np.zeros((h,w),np.double)
imgContoursY=np.zeros((h,w),np.double)
for i in range(0,h):
for j in range (0,w):
if(j==0 or j==w-1 or i==0 or i==h-1):
imgContoursX[i][j]=0
imgContoursY[i][j]=0
else:
imgContoursX[i][j] = (np.multiply( convX,img[i-1:i+2,j-1:j+2]).sum(axis=1).sum(axis=0))
imgContoursY[i][j] = (np.multiply( convY,img[i-1:i+2,j-1:j+2]).sum(axis=1).sum(axis=0))
a=math.sqrt(math.pow(imgContoursX[i][j],2)+math.pow(imgContoursY[i][j],2))
a=min(a,255)
a=max(a,0)
if(a>seuil):
imgContours[i][j]=a
contours.append([i,j])
return imgContours,contours,imgContoursX,imgContoursY
def getOrientation(img,x,y):
global imgContoursX,imgContoursY
if(x<0 or x>w-1 or y<0 or y>h-1):
d=0
else:
d=math.atan2(imgContoursY[y][x],imgContoursX[y][x])
d+=math.pi
return d
def getBlock(img,x,y):
a=[]
block=np.zeros((16,16),np.double)
for i in range(x-8,x+8):
for j in range(y-8,y+8):
angle=roundAngle(getOrientation(img,i,j))
block[j-y+8][i-x+8]=angle
# block=np.zeros((4,4),np.double)
# for i in range(x-8,x+8):
# for j in range(y-8,y+8):
# xb=(i-x+8)//4
# yb=(j-y+8)//4
# angle=roundAngle(getOrientation(img,i,j))
# block[yb][xb]=max(block[yb][xb],angle)
# a.append(angle)
return block
def anglesArray():
angles = []
i=0
while True:
angles.append(math.pi*i)
i+=1/4
if(i==2):
break
return angles
def anglesStringsArray():
return ["0","π/4","π/2","3π/4","π","5π/4","3π/2","7π/4"]
def roundAngle(angle):
angles = anglesArray()
index=np.argmin(np.abs(np.subtract(angles,angle)))
angles = anglesArray()
a=angles[index]
return a
def roundAngleTitle(angle):
anglesStrings = anglesStringsArray()
angles = anglesArray()
a=np.argmin(np.abs(np.subtract(angles,angle)))
return anglesStrings[a]
def roundAngleIndex(angle):
angles = anglesArray()
a=np.argmin(np.abs(np.subtract(angles,angle)))
return a
def showHist(tempdicArray):
fig, ax = plt.subplots(4,4,figsize=(14,8))
for i in range (0,len(tempdicArray)):
x=(int)(i/4)
ax[x][i%4].bar(list(tempdicArray[i].keys()), tempdicArray[i].values(), color='b')
fig.tight_layout()
plt.show()
#main
img=cv2.imread(s)
h,w,d = np.shape(img)
#convolution matrix
c=1
convX=np.zeros((3,3),np.double)
convX[0,0]=0;convX[0,1]=0;convX[0,2]=0;convX[1,0]=-c;convX[1,1]=0
convX[1,2]= c;convX[2,0]= -0;convX[2,1]=0;convX[2,2]=0
convY=np.zeros((3,3),np.double)
convY[0,0]=-0;convY[0,1]=-c;convY[0,2]=-0;convY[1,0]=0;convY[1,1]=0
convY[1,2]= 0;convY[2,0]= 0;convY[2,1]=c;convY[2,2]=0
#threshold for contours
seuil=30
img,contours,imgContoursX,imgContoursY=getContours(img,seuil)
blocks=getBlock(img,pixelX,pixelY)
dic={}
histogrammes=[]
for block in blocks:
#count orientations for histogramme
array=np.matrix.flatten(block)
count=Counter(array)
for c in count:
dic[roundAngleTitle(c)]=count[c]
histogrammes.append(dic.copy())
dic={}
showHist(histogrammes)
cv2.imshow('image : '+s,img)
cv2.waitKey(0) | 2,811 | 0 | 248 |
d9a445fd552ca2fc90e819d34d3f6859d94151a1 | 895 | py | Python | carbon/setup.py | katzj/graphite | e33bf5e035360880c4172a3c2ecb355485d7b172 | [
"Apache-2.0"
] | 1 | 2016-07-25T09:45:31.000Z | 2016-07-25T09:45:31.000Z | carbon/setup.py | katzj/graphite | e33bf5e035360880c4172a3c2ecb355485d7b172 | [
"Apache-2.0"
] | null | null | null | carbon/setup.py | katzj/graphite | e33bf5e035360880c4172a3c2ecb355485d7b172 | [
"Apache-2.0"
] | 2 | 2018-03-19T17:49:03.000Z | 2018-12-04T02:14:09.000Z | #!/usr/bin/env python
import os
from glob import glob
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
storage_dirs = [ ('storage/whisper',[]), ('storage/lists',[]),
('storage/log',[]), ('storage/rrd',[]) ]
conf_files = [ ('conf', glob('conf/*.example')) ]
setup(
name='carbon',
version='0.9.8',
url='https://launchpad.net/graphite',
author='Chris Davis',
author_email='chrismd@gmail.com',
license='Apache Software License 2.0',
description='Backend data caching and persistence daemon for Graphite',
packages=['carbon', 'carbon.aggregator'],
package_dir={'' : 'lib'},
scripts=glob('bin/*'),
package_data={ 'carbon' : ['*.xml'] },
data_files=storage_dirs + conf_files,
install_requires=['twisted', 'txamqp'],
**setup_kwargs
)
| 25.571429 | 73 | 0.661453 | #!/usr/bin/env python
import os
from glob import glob
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
storage_dirs = [ ('storage/whisper',[]), ('storage/lists',[]),
('storage/log',[]), ('storage/rrd',[]) ]
conf_files = [ ('conf', glob('conf/*.example')) ]
setup(
name='carbon',
version='0.9.8',
url='https://launchpad.net/graphite',
author='Chris Davis',
author_email='chrismd@gmail.com',
license='Apache Software License 2.0',
description='Backend data caching and persistence daemon for Graphite',
packages=['carbon', 'carbon.aggregator'],
package_dir={'' : 'lib'},
scripts=glob('bin/*'),
package_data={ 'carbon' : ['*.xml'] },
data_files=storage_dirs + conf_files,
install_requires=['twisted', 'txamqp'],
**setup_kwargs
)
| 0 | 0 | 0 |
faf10b239be49828238fcb5f71856d89bfc15fda | 3,376 | py | Python | Scripts/netCDF_splitter2var_2D.py | wolfiex/AC_tools | de5b156ddcc01437bf80b4785d6a327b35d67cc6 | [
"Unlicense"
] | 7 | 2016-10-20T14:55:07.000Z | 2022-03-28T15:35:52.000Z | Scripts/netCDF_splitter2var_2D.py | wolfiex/AC_tools | de5b156ddcc01437bf80b4785d6a327b35d67cc6 | [
"Unlicense"
] | 44 | 2016-09-23T14:02:51.000Z | 2022-03-24T09:53:50.000Z | Scripts/netCDF_splitter2var_2D.py | wolfiex/AC_tools | de5b156ddcc01437bf80b4785d6a327b35d67cc6 | [
"Unlicense"
] | 9 | 2016-10-24T15:33:51.000Z | 2021-08-06T17:52:49.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Split off 2D variable from file with other variables
Notes
----
- based on software carpentary example.
http://damienirving.github.io/capstone-oceanography/03-data-provenance.html
"""
# Modules to import
from netCDF4 import Dataset
import numpy as np
import pylab as pl
import calendar
# add extra's for copied function...
import os
import sys
import argparse
import datetime
# --- verbose and debug settings for script main call
VERBOSE = False
DEBUG = False
def main(filename=None, VarName='OLSON', verbose=False, debug=False):
"""
Driver to split off variables
"""
# Get the file name and location
wd, fn = get_file_loc_and_name()
# name output file if name not given
if isinstance(filename, type(None)):
filename = wd.split('/')[-2]
if debug:
print((wd, fn, filename))
inFile = wd+'/'+fn
# Set output name
outfile_name = inFile+'.out'
# Read input data
VarData, input_DATA = read_data(inFile, VarName=VarName)
# Set values?
# print type(VarData)
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# VarData[VarData>1] = 1
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# --- Write the output file
outfile = Dataset(outfile_name, 'w', format='NETCDF4')
set_global_atts(input_DATA, outfile)
copy_dimensions(input_DATA, outfile)
copy_variables(input_DATA, outfile, VarName=VarName)
# overwite data
outfile[VarName][:] = VarData
# Close file
outfile.close()
def get_file_loc_and_name():
""" Get file location and name """
# Use command line grab function
import sys
# Get arguments from command line
wd = sys.argv[1]
fn = sys.argv[2]
return wd, fn
def copy_dimensions(infile, outfile):
"""
Copy the dimensions of the infile to the outfile
"""
for dimName, dimData in iter(list(infile.dimensions.items())):
outfile.createDimension(dimName, len(dimData))
def copy_variables(infile, outfile, VarName='OLSON'):
"""
Create variables corresponding to the file dimensions
by copying from infile
"""
# Get vars
var_list = ['lon', 'lat', 'time']
# Also consider LANDMAP value
var_list += [VarName]
# Now loop
for var_name in var_list:
varin = infile.variables[var_name]
outVar = outfile.createVariable(var_name, varin.datatype,
varin.dimensions,
)
outVar[:] = varin[:]
var_atts = {}
for att in varin.ncattrs():
if not att == '_FillValue':
var_atts[att] = eval('varin.'+att)
outVar.setncatts(var_atts)
def read_data(ifile, VarName='OLSON'):
"""
Read data from ifile corresponding to the VarName
"""
input_DATA = Dataset(ifile)
VarData = input_DATA.variables[VarName][:]
return VarData, input_DATA
def set_global_atts(infile, outfile):
"""Set the global attributes for outfile.
Note that the global attributes are simply copied from infile.
"""
global_atts = {}
for att in infile.ncattrs():
global_atts[att] = eval('infile.'+att)
# set attributes
outfile.setncatts(global_atts)
if __name__ == "__main__":
main(verbose=VERBOSE, debug=DEBUG)
| 25.19403 | 75 | 0.635367 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Split off 2D variable from file with other variables
Notes
----
- based on software carpentary example.
http://damienirving.github.io/capstone-oceanography/03-data-provenance.html
"""
# Modules to import
from netCDF4 import Dataset
import numpy as np
import pylab as pl
import calendar
# add extra's for copied function...
import os
import sys
import argparse
import datetime
# --- verbose and debug settings for script main call
VERBOSE = False
DEBUG = False
def main(filename=None, VarName='OLSON', verbose=False, debug=False):
"""
Driver to split off variables
"""
# Get the file name and location
wd, fn = get_file_loc_and_name()
# name output file if name not given
if isinstance(filename, type(None)):
filename = wd.split('/')[-2]
if debug:
print((wd, fn, filename))
inFile = wd+'/'+fn
# Set output name
outfile_name = inFile+'.out'
# Read input data
VarData, input_DATA = read_data(inFile, VarName=VarName)
# Set values?
# print type(VarData)
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# VarData[VarData>1] = 1
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# --- Write the output file
outfile = Dataset(outfile_name, 'w', format='NETCDF4')
set_global_atts(input_DATA, outfile)
copy_dimensions(input_DATA, outfile)
copy_variables(input_DATA, outfile, VarName=VarName)
# overwite data
outfile[VarName][:] = VarData
# Close file
outfile.close()
def get_file_loc_and_name():
""" Get file location and name """
# Use command line grab function
import sys
# Get arguments from command line
wd = sys.argv[1]
fn = sys.argv[2]
return wd, fn
def copy_dimensions(infile, outfile):
"""
Copy the dimensions of the infile to the outfile
"""
for dimName, dimData in iter(list(infile.dimensions.items())):
outfile.createDimension(dimName, len(dimData))
def copy_variables(infile, outfile, VarName='OLSON'):
"""
Create variables corresponding to the file dimensions
by copying from infile
"""
# Get vars
var_list = ['lon', 'lat', 'time']
# Also consider LANDMAP value
var_list += [VarName]
# Now loop
for var_name in var_list:
varin = infile.variables[var_name]
outVar = outfile.createVariable(var_name, varin.datatype,
varin.dimensions,
)
outVar[:] = varin[:]
var_atts = {}
for att in varin.ncattrs():
if not att == '_FillValue':
var_atts[att] = eval('varin.'+att)
outVar.setncatts(var_atts)
def read_data(ifile, VarName='OLSON'):
"""
Read data from ifile corresponding to the VarName
"""
input_DATA = Dataset(ifile)
VarData = input_DATA.variables[VarName][:]
return VarData, input_DATA
def set_global_atts(infile, outfile):
"""Set the global attributes for outfile.
Note that the global attributes are simply copied from infile.
"""
global_atts = {}
for att in infile.ncattrs():
global_atts[att] = eval('infile.'+att)
# set attributes
outfile.setncatts(global_atts)
if __name__ == "__main__":
main(verbose=VERBOSE, debug=DEBUG)
| 0 | 0 | 0 |
b66307882f2392b2f67c3b822cca4c439efe95c5 | 30,127 | py | Python | Ball Handle/base.py | maxbot5/Ball-Handle-Software | fe1b04c3f252a9e6848c2e79601f5633473abb28 | [
"MIT"
] | null | null | null | Ball Handle/base.py | maxbot5/Ball-Handle-Software | fe1b04c3f252a9e6848c2e79601f5633473abb28 | [
"MIT"
] | null | null | null | Ball Handle/base.py | maxbot5/Ball-Handle-Software | fe1b04c3f252a9e6848c2e79601f5633473abb28 | [
"MIT"
] | null | null | null | <<<<<<< HEAD
import time
import numpy as np
import Adafruit_BBIO.PWM as PWM
from PixyCam import PixyCam
from imu import Imu
from servo import Servo
from wheel import Wheel
from classes import State
import constants as cons
import threading
#import logging
import sys
from queue import LifoQueue
count =0
ball_status_new = 0
ball_status_old = 0
stop_threads = False
BUF_SIZE = 5
imuQueue = LifoQueue(BUF_SIZE)
camQueue = LifoQueue(BUF_SIZE)
wheelQueue = LifoQueue(BUF_SIZE)
servoQueue = LifoQueue(BUF_SIZE)
'''
wheel_leftQueue = LifoQueue(BUF_SIZE)
wheel_rightQueue = LifoQueue(BUF_SIZE)
servo_leftQueue = LifoQueue(BUF_SIZE)
servo_rightQueue = LifoQueue(BUF_SIZE)
'''
# estimate real ball motion and important information about it in relation to the ground
# decide how to handle the ball
if __name__ == '__main__':
try:
killpill = False
#start_thread_2()
inputThread = Input()
inputThread.daemon = True
inputThread.start()
processingThread = Processing()
processingThread.daemon = True
processingThread.start()
servoThread = Servos()
servoThread.daemon = True
servoThread.start()
wheelThread = Wheels()
wheelThread.daemon = True
wheelThread.start()
input("killpill activ with enter: ")
killpill = True
inputThread.join()
processingThread.join()
servoThread.join()
wheelThread.join()
#stop_threads()
except KeyboardInterrupt:
exit(0)
=======
import time
import numpy as np
import Adafruit_BBIO.PWM as PWM
from PixyCam import PixyCam
from imu import Imu
from servo import Servo
from wheel import Wheel
from classes import State
import constants as cons
import threading
#import logging
import sys
from queue import LifoQueue
count =0
ball_status_new = 0
ball_status_old = 0
stop_threads = False
BUF_SIZE = 1
imuQueue = LifoQueue(BUF_SIZE)
camQueue = LifoQueue(BUF_SIZE)
wheelQueue = LifoQueue(BUF_SIZE)
servoQueue = LifoQueue(BUF_SIZE)
'''
wheel_leftQueue = LifoQueue(BUF_SIZE)
wheel_rightQueue = LifoQueue(BUF_SIZE)
servo_leftQueue = LifoQueue(BUF_SIZE)
servo_rightQueue = LifoQueue(BUF_SIZE)
'''
# estimate real ball motion and important information about it in relation to the ground
# decide how to handle the ball
if __name__ == '__main__':
try:
killpill = False
#start_thread_2()
inputThread = Input()
inputThread.daemon = True
inputThread.start()
processingThread = Processing()
processingThread.daemon = True
processingThread.start()
servoThread = Servos()
servoThread.daemon = True
servoThread.start()
wheelThread = Wheels()
wheelThread.daemon = True
wheelThread.start()
input("killpill activ with enter: ")
killpill = True
inputThread.join()
processingThread.join()
servoThread.join()
wheelThread.join()
#stop_threads()
except KeyboardInterrupt:
exit(0)
>>>>>>> 445b4960d9388eb4f7ccd9801c006dc5d07d1921
| 44.898659 | 184 | 0.61181 | <<<<<<< HEAD
import time
import numpy as np
import Adafruit_BBIO.PWM as PWM
from PixyCam import PixyCam
from imu import Imu
from servo import Servo
from wheel import Wheel
from classes import State
import constants as cons
import threading
#import logging
import sys
from queue import LifoQueue
count =0
ball_status_new = 0
ball_status_old = 0
stop_threads = False
BUF_SIZE = 5
imuQueue = LifoQueue(BUF_SIZE)
camQueue = LifoQueue(BUF_SIZE)
wheelQueue = LifoQueue(BUF_SIZE)
servoQueue = LifoQueue(BUF_SIZE)
'''
wheel_leftQueue = LifoQueue(BUF_SIZE)
wheel_rightQueue = LifoQueue(BUF_SIZE)
servo_leftQueue = LifoQueue(BUF_SIZE)
servo_rightQueue = LifoQueue(BUF_SIZE)
'''
class Input(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# create Sensor objects
self.imu = Imu('P5_4')
self.cam = PixyCam(sim_mode=False)
def run(self):
#global dead
print("Start Input...")
while(not killpill):
if not imuQueue.full() : #and int-pin imu auslesen
imuQueue.put(self.imu.process()) #Muss noch erstellt werden!! queue in die imu class einfügen und direkt befüllen im process
#imuQueue.put((1000, 0, 0)) #TEST DATA
if not camQueue.full():
camQueue.put(self.cam.process())
#camQueue.put((2,400,0,100,0)) #TEST DATA
class Processing(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# Objects
self.ball_measure = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.ball_set = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.robot = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.ang_set_left = 0
self.ang_set_right = 0
self.V_set_left = 0
self.V_set_right = 0
self.impact_point = (0,0)
self.ang2impact_left = 0
self.ang2impact_right = 0
def cart2pol(self, beg, end): # transform cartesian coordiantes to polar
rel = (end[0] - beg[0], end[1] - beg[1])
mag = np.hypot(rel[0], rel[1])
# print("mag= ", mag, beg, end)
# print("rel= ", rel)
# proof of cases
if rel[0] < 0:
if rel[1] < 0:
return mag, np.arctan(rel[1] * (1 / rel[0]) - np.pi)
elif rel[1] >= 0:
return mag, np.arctan(rel[1] * (1 / rel[0]) + np.pi)
elif rel[0] == 0:
if rel[1] < 0:
return mag, -(np.pi * 0.5)
elif rel[1] > 0:
return mag, (np.pi * 0.5)
elif rel[0] > 0:
return mag, np.arctan(rel[1] * (1 / rel[0]))
# estimate real ball motion and important information about it in relation to the ground
def observer(self):
print("Start observer...")
#1. calculating impact point and the dribbel position
def impact_Y(): # cutting point between ball motion and y-axle through ideal dribbel point
# return -(((ball_measure.P_X - impact_point[0]) * -(ball_measure.V_Y / ball_measure.V_X)) + ball_measure.P_Y)
if self.ball_measure.V_Y == 0 or self.ball_measure.V_X == 0:
return -(self.ball_measure.P_Y)
else:
return -(-(self.ball_measure.P_X - self.impact_point[0]) * self.ball_measure.V_Y / self.ball_measure.V_X + self.ball_measure.P_Y)
def tangent_point(a, M_X, M_Y, P_X, P_Y): # calculate the wheel position on servos motion circle
# a = servo-radius
# b = halber Abstand d zwischen Ballmittelpunkt und Servomittelpunkt
c, tangent_ang = self.cart2pol((M_X, M_Y), (P_X, P_Y))
b = (c * 0.5)
# print("a,b,c, ang: ", a, b, c, np.rad2deg(tangent_ang))
x = np.sqrt(((c * c) + (a * a) - (b * b)) / (2 * c))
y = (np.sqrt(np.absolute((a * a) - (x * x))))
dx, dy = P_X - M_X, P_Y - M_Y
Wheel_X1 = M_X + x * (dx / c) - y * (dy / c)
Wheel_Y1 = M_Y + x * (dy / c) + y * (dx / c)
Wheel_X2 = M_X + x * (dx / c) + y * (dy / c)
Wheel_Y2 = M_Y + x * (dy / c) - y * (dx / c)
# print("Schnittpunkte x1, y1, x2, y2", S_X1, S_Y1, S_X2, S_Y2)
# der ball darf nicht weiter als der servodrehpunkt kommen, also ist M_Y das seitliche Maximum
# der ball kann niemals hinter dem servo sein, also ist P_X der äußerste Punkt auf dieser achse
# print("tangent_ang:",tangent_ang)
left_abs, ang_set_left= self.cart2pol((M_X,M_Y), (Wheel_X1,Wheel_Y1))
left_right, ang_set_right = self.cart2pol((M_X,M_Y), (Wheel_X2,Wheel_Y2))
return ang_set_left, ang_set_right
self.impact_point = (cons.DRIBBEL_POINT_X, impact_Y())
p_x_left, p_y_left = cons.SERVO_POS_LEFT
p_x_right, p_y_right = cons.SERVO_POS_RIGHT
imp_x, imp_y = self.impact_point
self.ang2impact_left, dump = tangent_point(cons.SERVO_RADIUS, p_x_left, p_y_left, imp_x, imp_y)
dump, self.ang2impact_right = tangent_point(cons.SERVO_RADIUS, p_x_right, p_y_right, imp_x, imp_y)
print("Observer: impact_point", self.impact_point, "tangent_angle", self.ang2impact_left, self.ang2impact_right)
#2. Ball motion in relation to the ground based on relativ motion to the robot and robots own motion
def setPoint_ball(): # setpoint for ball movement (include model from robot motion and relativ ball motion)
'''
Berechnung des Geschwindigkeitsbetrags des Balls in kartesischen Koordinaten V(X,Y)
Zur Zeit befindet sich der ideale Dribbelpunkt vor dem Roboter in X-Richrung ohne Abweichung in Y-Richtung
Überlegung: stattdessen aktuellen Dribbelpunkt verwenden!
(akutelle Umsetzung) Um den Ball im idealen Dribbelpunkt zu dribbeln, werden
die folgenden Ballgeschwindigkeiten benötigt:
'''
# required motion
'''
# with robots angular velocity from imu
V_X = robot.V_X + np.cos(np.deg2rad(robot.w_Z)) * np.sqrt(
ball_measure.P_X * ball_measure.P_X + ball_measure.P_Y * ball_measure.P_Y) - ball_measure.V_X
V_Y = robot.V_Y + np.sin(np.deg2rad(robot.w_Z)) * np.sqrt(
ball_measure.P_X * ball_measure.P_X + ball_measure.P_Y * ball_measure.P_Y) - ball_measure.V_Y
'''
'''
current ball motion in relation to the ground, because we can only influence the motion to the robot directly.
Thats why we need the robot motion
In front the robot pushes the ball, so the wheel should turn slower then the balls total motion, because
the relatiiv motion to the ground comes from robot. But backwards, the ball have to spin faster
for adjusting the missing robot pushing. Thats all taken with the sign in the following equation.
'''
#without measured angular velocity from imu
vy = self.robot.V_Y
vx = self.robot.V_X
if abs(self.ball_measure.V_X) > 30:
vx = self.robot.V_X - self.ball_measure.V_X
if abs(self.ball_measure.V_Y) > 30:
vy = self.robot.V_Y - self.ball_measure.V_Y
return vx, vy
self.ball_set.V_X, self.ball_set.V_Y = setPoint_ball()
print("Observer: self.ball_set.V_X, self.ball_set.V_Y", self.ball_set.V_X, self.ball_set.V_Y)
# decide how to handle the ball
def controller(self):
global ball_status_new
global ball_status_old
print("Start controller...")
def wheel_velocity(ball_mag, ball_ang):
print("controller: wheel velocity: ball_mag, ball_ang", ball_mag, np.rad2deg(ball_ang))
'''
#Ursprüngliche Version
v_left = -(ball_mag * (
np.cos(-self.ang_set_left + ball_ang) + np.sin(self.ang_set_left + ball_ang)))
v_right = (ball_mag * (
np.cos(-self.ang_set_right + ball_ang) + np.sin(-self.ang_set_right + ball_ang)))
'''
v_left = -(ball_mag * ((1 +
np.cos(-self.ang_set_left + ball_ang)) + np.sin(self.ang_set_left + ball_ang)))
v_right = (ball_mag * ((1 +
np.cos(-self.ang_set_right + ball_ang)) + np.sin(-self.ang_set_right + ball_ang)))
#print("wheel velocity x|Y", v_left, v_right)
# print("Ball |V|:",ball_mag, "Ball Ang:", ball_ang)
return v_left, v_right
def accept_ball():
print("accept_ball")
# 1. set servos to the position that the ball hit the ball-handle in front
self.ang_set_left = np.rad2deg(self.ang2impact_left)
self.ang_set_right = np.rad2deg(self.ang2impact_right)
# 2. set the wheels to spin in robots motion
V_Ball_mag, ang = self.cart2pol((self.ball_set.V_X, self.ball_set.V_Y), self.impact_point)
mag, Ball_ang = self.cart2pol((self.ball_set.P_X, self.ball_set.P_Y), self.impact_point)
self.V_set_left, self.V_set_right = wheel_velocity(V_Ball_mag, Ball_ang)
def dribbel_ball():
print("dribbel_ball")
# 1. fix the servos to ideal ball handle position
self.ang_set_left = cons.SERVO_ANG_DRIBBEL_LEFT
self.ang_set_right = cons.SERVO_ANG_DRIBBEL_RIGHT
# 2. balance the ball in front of the ball handle by setting wheels spin to the mirror ang of the current ball motion
#self.V_set_left, self.V_set_right = wheel_velocity(
# self.cart2pol((self.ball_set.V_X, self.ball_set.V_Y), self.impact_point))
print("V_ballset:",self.ball_set.V_X,self.ball_set.V_Y,"V_robot",self.robot.V_X, self.robot.V_Y)
self.impact_point = (350,0)
V_Ball_mag, Ball_ang = self.cart2pol((self.ball_set.V_X, self.ball_set.V_Y), (self.robot.V_X,self.robot.V_Y))
#mag, Ball_ang = self.cart2pol((self.ball_set.P_X, self.ball_set.P_Y), self.impact_point)
self.V_set_left, self.V_set_right = wheel_velocity(V_Ball_mag, Ball_ang)
if ball_status_new is cons.FAR_BALL:
ball_status_old = cons.FAR_BALL
print("FAR_BALL")
return
ball_status_new = cons.HAVE_BALL
if ball_status_new is cons.NEAR_BALL:
accept_ball()
print("NEAR_BALL")
ball_status_old = cons.NEAR_BALL
return
if ball_status_new is cons.HAVE_BALL:
if ball_status_old is cons.NEAR_BALL:
return
dribbel_ball()
ball_status_old = cons.HAVE_BALL
print("HAVE_BALL")
return
def run(self):
global dead, ball_status_new
print("Start Processing...")
while(not killpill):
#read sensordata
if not imuQueue.empty() : #and int-pin imu auslesen
self.robot.V_X, self.robot.V_Y, self.robot.w_Z = imuQueue.get()
print("PROCESS: self.robot.V_X, self.robot.V_Y, self.robot.w_Z",self.robot.V_X, self.robot.V_Y, self.robot.w_Z)
if not camQueue.empty():
ball_status_new, self.ball_measure.P_X, self.ball_measure.P_Y, self.ball_measure.V_X, self.ball_measure.V_Y = camQueue.get()
print("PROCESS: self.ball_measure.V_X, self.ball_measure.V_Y", self.ball_measure.V_X, self.ball_measure.V_Y)
#Execution:
if not wheelQueue.full():
self.observer()
self.controller()
print("PROCESS: (self.ang_set_left, self.ang_set_right, self.V_set_left, self.V_set_right)", (self.ang_set_left, self.ang_set_right, self.V_set_left, self.V_set_right))
wheelQueue.put((self.V_set_left, self.V_set_right))
#if not servoQueue.full():
servoQueue.put((self.ang_set_left, self.ang_set_right))
class Servos(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.servo_left = Servo(sim_mode=False, radius=65, name='left', port="P9_14", ang_min=-10, ang_max=85,
ang_crit= cons.SERVO_ANG_CRIT_LEFT,
ang_start=cons.SERVO_ANG_START, ang_dribbel=cons.SERVO_ANG_DRIBBEL_LEFT, pwm_min=5.4, pwm_max=9.5,
start_duty=8, pwm_crit_min=5.5, pwm_crit_max=9,
ang_offset=cons.SERVO_ANG_OFFSET_LEFT, p_x=194, p_y=-63.8)
self.servo_right = Servo(sim_mode=False, radius=65, name='right', port="P9_16", ang_min=-85, ang_max=10,
ang_crit=cons.SERVO_ANG_OFFSET_RIGHT,
ang_start=cons.SERVO_ANG_START, ang_dribbel=cons.SERVO_ANG_DRIBBEL_RIGHT, pwm_min=6, pwm_max=9.5,
pwm_crit_min=6.5, pwm_crit_max=10,ang_offset=cons.SERVO_ANG_OFFSET_RIGHT, p_x=194, p_y=63.8)
def run(self):
print("set servos..")
while(not killpill):
if not servoQueue.empty():
# self.servo_left.ang_set, self.servo_right.ang_set, self.wheel_left.V_set, self.wheel_right.V_set = outputQueue.get()
#servo_left_ang_set, servo_right_ang_set = servoQueue.get()
self.servo_left.ang_set, self.servo_right.ang_set = servoQueue.get()
print("servo_left_ang_set, servo_right_ang_set", self.servo_left.ang_set, self.servo_right.ang_set)
self.servo_left.process()
self.servo_right.process()
#time.sleep(0.2)
class Wheels(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# Actuators
self.wheel_left = Wheel(pin_en=cons.PIN_EN_WHEEL_LEFT, pin_dir=cons.PIN_DIR_WHEEL_LEFT, pin_pwm=cons.PIN_PWM_WHEEL_LEFT)
self.wheel_right = Wheel(pin_en=cons.PIN_EN_WHEEL_RIGHT, pin_dir=cons.PIN_DIR_WHEEL_RIGHT, pin_pwm=cons.PIN_PWM_WHEEL_RIGHT)
def run(self):
print("set wheels")
while(not killpill):
if not wheelQueue.empty():
#self.servo_left.ang_set, self.servo_right.ang_set, self.wheel_left.V_set, self.wheel_right.V_set = outputQueue.get()
self.wheel_left.V_set, self.wheel_right.V_set = wheelQueue.get()
print("wheel_left_V_set, wheel_right_V_set", self.wheel_left.V_set, self.wheel_right.V_set)
self.wheel_left.process()
self.wheel_right.process()
if __name__ == '__main__':
try:
killpill = False
#start_thread_2()
inputThread = Input()
inputThread.daemon = True
inputThread.start()
processingThread = Processing()
processingThread.daemon = True
processingThread.start()
servoThread = Servos()
servoThread.daemon = True
servoThread.start()
wheelThread = Wheels()
wheelThread.daemon = True
wheelThread.start()
input("killpill activ with enter: ")
killpill = True
inputThread.join()
processingThread.join()
servoThread.join()
wheelThread.join()
#stop_threads()
except KeyboardInterrupt:
exit(0)
=======
import time
import numpy as np
import Adafruit_BBIO.PWM as PWM
from PixyCam import PixyCam
from imu import Imu
from servo import Servo
from wheel import Wheel
from classes import State
import constants as cons
import threading
#import logging
import sys
from queue import LifoQueue
count =0
ball_status_new = 0
ball_status_old = 0
stop_threads = False
BUF_SIZE = 1
imuQueue = LifoQueue(BUF_SIZE)
camQueue = LifoQueue(BUF_SIZE)
wheelQueue = LifoQueue(BUF_SIZE)
servoQueue = LifoQueue(BUF_SIZE)
'''
wheel_leftQueue = LifoQueue(BUF_SIZE)
wheel_rightQueue = LifoQueue(BUF_SIZE)
servo_leftQueue = LifoQueue(BUF_SIZE)
servo_rightQueue = LifoQueue(BUF_SIZE)
'''
class Input(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# create Sensor objects
self.imu = Imu('P5_4', sim_mode=False)
self.cam = PixyCam(sim_mode=False)
def run(self):
#global dead
print("Start Input...")
while(not killpill):
if not imuQueue.full() : #and int-pin imu auslesen
imuQueue.put(self.imu.process()) #Muss noch erstellt werden!! queue in die imu class einfügen und direkt befüllen im process
#imuQueue.put((1000, 0, 0)) #TEST DATA
if not camQueue.full():
camQueue.put(self.cam.process())
#camQueue.put((2,400,0,100,0)) #TEST DATA
class Processing(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# Objects
self.ball_measure = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.ball_set = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.robot = State(p_x=0, p_y=0, phi_z=0, v_x=0, v_y=0, w_z=0)
self.ang_set_left = 0
self.ang_set_right = 0
self.V_set_left = 0
self.V_set_right = 0
self.impact_point = (0,0)
self.ang2impact_left = 0
self.ang2impact_right = 0
def cart2pol(self, beg, end): # transform cartesian coordiantes to polar
rel = (end[0] - beg[0], end[1] - beg[1])
mag = np.hypot(rel[0], rel[1])
# print("mag= ", mag, beg, end)
# print("rel= ", rel)
# proof of cases
if rel[0] < 0:
if rel[1] < 0:
return mag, np.arctan(rel[1] * (1 / rel[0]) - np.pi)
elif rel[1] >= 0:
return mag, np.arctan(rel[1] * (1 / rel[0]) + np.pi)
elif rel[0] == 0:
if rel[1] < 0:
return mag, -(np.pi * 0.5)
elif rel[1] > 0:
return mag, (np.pi * 0.5)
elif rel[0] > 0:
return mag, np.arctan(rel[1] * (1 / rel[0]))
# estimate real ball motion and important information about it in relation to the ground
def observer(self):
print("Start observer...")
#1. calculating impact point and the dribbel position
def impact_Y(): # cutting point between ball motion and y-axle through ideal dribbel point
# return -(((ball_measure.P_X - impact_point[0]) * -(ball_measure.V_Y / ball_measure.V_X)) + ball_measure.P_Y)
if self.ball_measure.V_Y == 0 or self.ball_measure.V_X == 0:
return -(self.ball_measure.P_Y)
else:
return -(-(self.ball_measure.P_X - self.impact_point[0]) * self.ball_measure.V_Y / self.ball_measure.V_X + self.ball_measure.P_Y)
def tangent_point(a, M_X, M_Y, P_X, P_Y): # calculate the wheel position on servos motion circle
# a = servo-radius
# b = halber Abstand d zwischen Ballmittelpunkt und Servomittelpunkt
c, tangent_ang = self.cart2pol((M_X, M_Y), (P_X, P_Y))
b = (c * 0.5)
# print("a,b,c, ang: ", a, b, c, np.rad2deg(tangent_ang))
x = np.sqrt(((c * c) + (a * a) - (b * b)) / (2 * c))
y = (np.sqrt(np.absolute((a * a) - (x * x))))
dx, dy = P_X - M_X, P_Y - M_Y
Wheel_X1 = M_X + x * (dx / c) - y * (dy / c)
Wheel_Y1 = M_Y + x * (dy / c) + y * (dx / c)
Wheel_X2 = M_X + x * (dx / c) + y * (dy / c)
Wheel_Y2 = M_Y + x * (dy / c) - y * (dx / c)
# print("Schnittpunkte x1, y1, x2, y2", S_X1, S_Y1, S_X2, S_Y2)
# der ball darf nicht weiter als der servodrehpunkt kommen, also ist M_Y das seitliche Maximum
# der ball kann niemals hinter dem servo sein, also ist P_X der äußerste Punkt auf dieser achse
# print("tangent_ang:",tangent_ang)
left_abs, ang_set_left= self.cart2pol((M_X,M_Y), (Wheel_X1,Wheel_Y1))
left_right, ang_set_right = self.cart2pol((M_X,M_Y), (Wheel_X2,Wheel_Y2))
return ang_set_left, ang_set_right
self.impact_point = (cons.DRIBBEL_POINT_X, impact_Y())
p_x_left, p_y_left = cons.SERVO_POS_LEFT
p_x_right, p_y_right = cons.SERVO_POS_RIGHT
imp_x, imp_y = self.impact_point
self.ang2impact_left, dump = tangent_point(cons.SERVO_RADIUS, p_x_left, p_y_left, imp_x, imp_y)
dump, self.ang2impact_right = tangent_point(cons.SERVO_RADIUS, p_x_right, p_y_right, imp_x, imp_y)
print("Observer: impact_point", self.impact_point, "tangent_angle", self.ang2impact_left, self.ang2impact_right)
#2. Ball motion in relation to the ground based on relativ motion to the robot and robots own motion
def setPoint_ball(): # setpoint for ball movement (include model from robot motion and relativ ball motion)
'''
Berechnung des Geschwindigkeitsbetrags des Balls in kartesischen Koordinaten V(X,Y)
Zur Zeit befindet sich der ideale Dribbelpunkt vor dem Roboter in X-Richrung ohne Abweichung in Y-Richtung
Überlegung: stattdessen aktuellen Dribbelpunkt verwenden!
(akutelle Umsetzung) Um den Ball im idealen Dribbelpunkt zu dribbeln, werden
die folgenden Ballgeschwindigkeiten benötigt:
'''
# required motion
'''
# with robots angular velocity from imu
V_X = robot.V_X + np.cos(np.deg2rad(robot.w_Z)) * np.sqrt(
ball_measure.P_X * ball_measure.P_X + ball_measure.P_Y * ball_measure.P_Y) - ball_measure.V_X
V_Y = robot.V_Y + np.sin(np.deg2rad(robot.w_Z)) * np.sqrt(
ball_measure.P_X * ball_measure.P_X + ball_measure.P_Y * ball_measure.P_Y) - ball_measure.V_Y
'''
'''
current ball motion in relation to the ground, because we can only influence the motion to the robot directly.
Thats why we need the robot motion
In front the robot pushes the ball, so the wheel should turn slower then the balls total motion, because
the relatiiv motion to the ground comes from robot. But backwards, the ball have to spin faster
for adjusting the missing robot pushing. Thats all taken with the sign in the following equation.
'''
#without measured angular velocity from imu
vy = self.robot.V_Y
vx = self.robot.V_X
if abs(self.ball_measure.V_X) > 30:
vx = self.robot.V_X - self.ball_measure.V_X
if abs(self.ball_measure.V_Y) > 30:
vy = self.robot.V_Y - self.ball_measure.V_Y
return vx, vy
self.ball_set.V_X, self.ball_set.V_Y = setPoint_ball()
print("Observer: self.ball_set.V_X, self.ball_set.V_Y", self.ball_set.V_X, self.ball_set.V_Y)
# decide how to handle the ball
def controller(self):
global ball_status_new
global ball_status_old
print("Start controller...")
def wheel_velocity(ball_mag, ball_ang):
print("controller: wheel velocity: ball_mag, ball_ang", ball_mag, np.rad2deg(ball_ang))
'''
#Ursprüngliche Version
v_left = -(ball_mag * (
np.cos(-self.ang_set_left + ball_ang) + np.sin(self.ang_set_left + ball_ang)))
v_right = (ball_mag * (
np.cos(-self.ang_set_right + ball_ang) + np.sin(-self.ang_set_right + ball_ang)))
'''
v_left = -(ball_mag * ((1 +
np.cos(-self.ang_set_left + ball_ang)) + np.sin(self.ang_set_left + ball_ang)))
v_right = (ball_mag * ((1 +
np.cos(-self.ang_set_right + ball_ang)) + np.sin(-self.ang_set_right + ball_ang)))
print("wheel velocity x|Y", v_left, v_right)
# print("Ball |V|:",ball_mag, "Ball Ang:", ball_ang)
return v_left, v_right
def accept_ball():
print("accept_ball")
# 1. set servos to the position that the ball hit the ball-handle in front
self.ang_set_left = np.rad2deg(self.ang2impact_left)
self.ang_set_right = np.rad2deg(self.ang2impact_right)
# 2. set the wheels to spin in robots motion
V_Ball_mag, ang = self.cart2pol((self.ball_set.V_X, self.ball_set.V_Y), self.impact_point)
mag, Ball_ang = self.cart2pol((self.ball_set.P_X, self.ball_set.P_Y), self.impact_point)
self.V_set_left, self.V_set_right = wheel_velocity(V_Ball_mag, Ball_ang)
def dribbel_ball():
print("dribbel_ball")
# 1. fix the servos to ideal ball handle position
self.ang_set_left = cons.ANG_DRIBBEL_LEFT
self.ang_set_right = cons.ANG_DRIBBEL_RIGHT
# 2. balance the ball in front of the ball handle by setting wheels spin to the mirror ang of the current ball motion
self.V_set_left, self.V_set_right = wheel_velocity(
self.cart2pol((self.ball_set.V_X, self.ball_set.V_Y), self.impact_point))
if ball_status_new is cons.FAR_BALL:
ball_status_old = cons.FAR_BALL
return
if ball_status_new is cons.NEAR_BALL:
accept_ball()
ball_status_old = cons.NEAR_BALL
return
if ball_status_new is cons.HAVE_BALL:
if ball_status_old is cons.NEAR_BALL:
return
dribbel_ball()
ball_status_old = cons.HAVE_BALL
return
def run(self):
global dead, ball_status_new
print("Start Processing...")
while(not killpill):
#read sensordata
if not imuQueue.empty() : #and int-pin imu auslesen
self.robot.V_X, self.robot.V_Y, self.robot.w_Z = imuQueue.get()
print("PROCESS: self.robot.V_X, self.robot.V_Y, self.robot.w_Z",self.robot.V_X, self.robot.V_Y, self.robot.w_Z)
if not camQueue.empty():
ball_status_new, self.ball_measure.P_X, self.ball_measure.P_Y, self.ball_measure.V_X, self.ball_measure.V_Y = camQueue.get()
print("PROCESS: self.ball_measure.V_X, self.ball_measure.V_Y", self.ball_measure.V_X, self.ball_measure.V_Y)
#Execution:
if not wheelQueue.full():
self.observer()
self.controller()
print("PROCESS: (self.ang_set_left, self.ang_set_right, self.V_set_left, self.V_set_right)", (self.ang_set_left, self.ang_set_right, self.V_set_left, self.V_set_right))
wheelQueue.put((self.V_set_left, self.V_set_right))
if not servoQueue.full():
servoQueue.put((self.ang_set_left, self.ang_set_right))
class Servos(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.servo_left = Servo(sim_mode=False, radius=65, name='left', port="P9_14", ang_min=-10, ang_max=85,
ang_crit=SERVO_ANG_CRIT_LEFT,
ang_start=SERVO_ANG_START, ang_dribbel=SERVO_ANG_DRIBBEL_LEFT, pwm_min=5.4, pwm_max=9.5,
start_duty=8,
ang_offset=SERVO_ANG_OFFSET_LEFT, p_x=194, p_y=-63.8)
self.servo_right = Servo(sim_mode=False, radius=65, name='right', port="P9_16", ang_min=-85, ang_max=10,
ang_crit=SERVO_ANG_OFFSET_RIGHT,
ang_start=SERVO_ANG_START, ang_dribbel=SERVO_ANG_DRIBBEL_RIGHT, pwm_min=6, pwm_max=9.5,
ang_offset=SERVO_ANG_OFFSET_RIGHT, p_x=194, p_y=63.8)
def run(self):
while(not killpill):
if not servoQueue.empty():
# self.servo_left.ang_set, self.servo_right.ang_set, self.wheel_left.V_set, self.wheel_right.V_set = outputQueue.get()
#servo_left_ang_set, servo_right_ang_set = servoQueue.get()
self.servo_left.ang_set, self.servo_right.ang_set = servoQueue.get()
print("servo_left_ang_set, servo_right_ang_set", self.servo_left.ang_set, self.servo_right.ang_set)
self.servo_left.process(self.servo_left.ang_set)
self.servo_right.process(self.servo_right.ang_set)
time.sleep(0.2)
class Wheels(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# Actuators
self.wheel_left = Wheel(pin_en=PIN_EN_WHEEL_LEFT, pin_dir=PIN_DIR_WHEEL_LEFT, pin_pwm=PIN_PWM_WHEEL_LEFT)
self.wheel_right = Wheel(pin_en=PIN_EN_WHEEL_RIGHT, pin_dir=PIN_DIR_WHEEL_RIGHT, pin_pwm=PIN_PWM_WHEEL_RIGHT)
def run(self):
global dead
print("Start Output...")
while(not killpill):
if not wheelQueue.empty():
#self.servo_left.ang_set, self.servo_right.ang_set, self.wheel_left.V_set, self.wheel_right.V_set = outputQueue.get()
self.wheel_left.V_set, self.wheel_right.V_set = wheelQueue.get()
print("wheel_left_V_set, wheel_right_V_set", self.wheel_left.V_set, self.wheel_right.V_set)
self.wheel_left.process(self.wheel_left.V_set)
self.wheel_right.process(self.wheel_right.V_set)
if __name__ == '__main__':
try:
killpill = False
#start_thread_2()
inputThread = Input()
inputThread.daemon = True
inputThread.start()
processingThread = Processing()
processingThread.daemon = True
processingThread.start()
servoThread = Servos()
servoThread.daemon = True
servoThread.start()
wheelThread = Wheels()
wheelThread.daemon = True
wheelThread.start()
input("killpill activ with enter: ")
killpill = True
inputThread.join()
processingThread.join()
servoThread.join()
wheelThread.join()
#stop_threads()
except KeyboardInterrupt:
exit(0)
>>>>>>> 445b4960d9388eb4f7ccd9801c006dc5d07d1921
| 26,163 | 86 | 766 |
060da80f798be7d2874ba9bc7f1c914bddfe0970 | 17,566 | py | Python | nailgun/nailgun/network/neutron.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/network/neutron.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/network/neutron.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
import six
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.logger import logger
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import AllocateVIPs80Mixin
from nailgun.network.manager import AssignIPs61Mixin
from nailgun.network.manager import AssignIPs70Mixin
from nailgun.network.manager import AssignIPsLegacyMixin
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkTemplateSerializer70
| 37.137421 | 79 | 0.5986 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
import six
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.logger import logger
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import AllocateVIPs80Mixin
from nailgun.network.manager import AssignIPs61Mixin
from nailgun.network.manager import AssignIPs70Mixin
from nailgun.network.manager import AssignIPsLegacyMixin
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkTemplateSerializer70
class NeutronManager(NetworkManager):
@classmethod
def create_neutron_config(
cls, cluster, segmentation_type=None,
net_l23_provider=consts.NEUTRON_L23_PROVIDERS.ovs):
neutron_config = models.NeutronConfig(
cluster_id=cluster.id,
net_l23_provider=net_l23_provider)
if segmentation_type is not None:
neutron_config.segmentation_type = segmentation_type
meta = cluster.release.networks_metadata["neutron"]["config"]
for key, value in meta.iteritems():
if hasattr(neutron_config, key):
setattr(neutron_config, key, value)
db().add(neutron_config)
db().flush()
return neutron_config
@classmethod
def generate_vlan_ids_list(cls, data, cluster, ng):
if ng.get("name") == consts.NETWORKS.private and \
cluster.network_config.segmentation_type == \
consts.NEUTRON_SEGMENT_TYPES.vlan:
if data.get("networking_parameters", {}).get("vlan_range"):
vlan_range = data["networking_parameters"]["vlan_range"]
else:
vlan_range = cluster.network_config.vlan_range
return range(vlan_range[0], vlan_range[1] + 1)
return [int(ng.get("vlan_start"))] if ng.get("vlan_start") else []
@classmethod
def get_ovs_bond_properties(cls, bond):
props = []
if 'lacp' in bond.mode:
props.append('lacp=active')
props.append('bond_mode=balance-tcp')
else:
props.append('bond_mode=%s' % bond.mode)
return props
class NeutronManagerLegacy(AssignIPsLegacyMixin, NeutronManager):
pass
class NeutronManager61(AssignIPs61Mixin, NeutronManager):
pass
class NeutronManager70(
AllocateVIPs70Mixin, AssignIPs70Mixin, NeutronManager
):
@classmethod
def build_role_to_network_group_mapping(cls, cluster, node_group_name):
"""Build network role to network map according to template data
If template is not loaded, empty map is returned.
:param cluster: Cluster instance
:type cluster: Cluster model
:param node_group_name: Node group name
:type node_group_name: string
:return: Network role to network map
:rtype: dict
"""
template = cluster.network_config.configuration_template
if template is None:
return {}
node_group = template['adv_net_template'][node_group_name]
endpoint_to_net_group = {}
for net_group, value in six.iteritems(
node_group['network_assignments']):
endpoint_to_net_group[value['ep']] = net_group
result = {}
for scheme in six.itervalues(node_group['network_scheme']):
for role, endpoint in six.iteritems(scheme['roles']):
if endpoint in endpoint_to_net_group:
result[role] = endpoint_to_net_group[endpoint]
return result
@classmethod
def get_network_group_for_role(cls, network_role, net_group_mapping):
"""Returns network group to which network role is associated
If networking template is set first lookup happens in the
template. Otherwise the default network group from
the network role is returned.
:param network_role: Network role dict
:type network_role: dict
:param net_group_mapping: Network role to network group mapping
:type net_group_mapping: dict
:return: Network group name
:rtype: str
"""
return net_group_mapping.get(
network_role['id'], network_role['default_mapping'])
@classmethod
def get_node_networks_with_ips(cls, node):
"""Returns IP, CIDR, meta, gateway for each network on given node."""
if not node.group_id:
return {}
ngs = db().query(models.NetworkGroup, models.IPAddr.ip_addr).\
filter(models.NetworkGroup.group_id == node.group_id). \
filter(models.IPAddr.network == models.NetworkGroup.id). \
filter(models.IPAddr.node == node.id)
if not ngs:
return {}
networks = {}
for ng, ip in ngs:
networks[ng.name] = {
'ip': cls.get_ip_w_cidr_prefix_len(ip, ng),
'cidr': ng.cidr,
'meta': ng.meta,
'gateway': ng.gateway
}
admin_ng = cls.get_admin_network_group(node.id)
if admin_ng:
networks[admin_ng.name] = {
'ip': cls.get_ip_w_cidr_prefix_len(
cls.get_admin_ip_for_node(node.id), admin_ng),
'cidr': admin_ng.cidr,
'meta': admin_ng.meta,
'gateway': admin_ng.gateway
}
return networks
@classmethod
def get_node_endpoints(cls, node):
"""Get a set of endpoints for node for the case when template is loaded
Endpoints are taken from 'endpoints' field
of templates for every node role.
"""
endpoints = set()
template = node.network_template
for role in node.all_roles:
role_templates = template['templates_for_node_role'][role]
for role_template in role_templates:
endpoints.update(
template['templates'][role_template]['endpoints'])
return endpoints
@classmethod
def get_node_network_mapping(cls, node):
"""Get (network, endpoint) mappings for node with loaded template
Returns a list of pairs (network, endpoint) for particular node
for the case when template is loaded. Networks are aggregated for all
node roles assigned to node. Endpoints are taken from 'endpoints' field
of templates for every node role and they are mapped to networks from
'network_assignments' field.
"""
output = []
endpoints = cls.get_node_endpoints(node)
mappings = node.network_template['network_assignments']
for netgroup, endpoint in six.iteritems(mappings):
if endpoint['ep'] in endpoints:
output.append((netgroup, endpoint['ep']))
return output
@classmethod
def get_network_name_to_endpoint_mappings(cls, cluster):
"""Returns endpoint-to-network mappings for node groups in cluster
{
"node_group1": {
"endpoint1": "network_name1",
"endpoint2": "network_name2",
...
},
...
}
"""
output = {}
template = cluster.network_config.configuration_template[
'adv_net_template']
for ng in cluster.node_groups:
output[ng.id] = {}
mappings = template[ng.name]['network_assignments']
for network, endpoint in six.iteritems(mappings):
output[ng.id][endpoint['ep']] = network
return output
@classmethod
def assign_ips_in_node_group(cls, net_id, net_name, node_ids, ip_ranges):
"""Assigns IP addresses for nodes in given network."""
ips_by_node_id = db().query(
models.IPAddr.ip_addr,
models.IPAddr.node
).filter_by(
network=net_id
)
nodes_dont_need_ip = set()
ips_in_use = set()
for ip_str, node_id in ips_by_node_id:
ip_addr = netaddr.IPAddress(ip_str)
for ip_range in ip_ranges:
if ip_addr in ip_range:
nodes_dont_need_ip.add(node_id)
ips_in_use.add(ip_str)
nodes_need_ip = node_ids - nodes_dont_need_ip
free_ips = cls.get_free_ips_from_ranges(
net_name, ip_ranges, ips_in_use, len(nodes_need_ip))
for ip, node_id in zip(free_ips, nodes_need_ip):
logger.info(
"Assigning IP for node '{0}' in network '{1}'".format(
node_id,
net_name
)
)
ip_db = models.IPAddr(node=node_id,
ip_addr=ip,
network=net_id)
db().add(ip_db)
db().flush()
@classmethod
def assign_ips_for_nodes_w_template(cls, cluster, nodes):
"""Assign IPs for the case when network template is applied.
IPs for every node are allocated only for networks which are mapped
to the particular node according to the template.
"""
network_by_group = db().query(
models.NetworkGroup.id,
models.NetworkGroup.name,
models.NetworkGroup.meta,
).join(
models.NetworkGroup.nodegroup
).filter(
models.NodeGroup.cluster_id == cluster.id,
models.NetworkGroup.name != consts.NETWORKS.fuelweb_admin
)
ip_ranges_by_network = db().query(
models.IPAddrRange.first,
models.IPAddrRange.last,
).join(
models.NetworkGroup.ip_ranges,
models.NetworkGroup.nodegroup
).filter(
models.NodeGroup.cluster_id == cluster.id
)
for group_id, nodes_in_group in itertools.groupby(
nodes, lambda n: n.group_id):
net_names_by_node = {}
for node in nodes_in_group:
net_names_by_node[node.id] = \
set(x[0] for x in cls.get_node_network_mapping(node))
networks = network_by_group.filter(
models.NetworkGroup.group_id == group_id)
for net_id, net_name, net_meta in networks:
if not net_meta.get('notation'):
continue
node_ids = set(node_id
for node_id, net_names
in six.iteritems(net_names_by_node)
if net_name in net_names)
ip_ranges_ng = ip_ranges_by_network.filter(
models.IPAddrRange.network_group_id == net_id
)
ip_ranges = [netaddr.IPRange(r.first, r.last)
for r in ip_ranges_ng]
cls.assign_ips_in_node_group(
net_id, net_name, node_ids, ip_ranges
)
cls.assign_admin_ips(nodes)
@classmethod
def _split_iface_name(cls, iface):
try:
iface, vlan = iface.split('.')
vlan = int(vlan)
except ValueError:
vlan = None
return (iface, vlan)
@classmethod
def get_interfaces_from_template(cls, node):
"""Parse transformations for all node role templates.
Returns a list of bare interfaces and bonds.
"""
transformations = \
NeutronNetworkTemplateSerializer70.generate_transformations(node)
interfaces = {}
for tx in transformations:
if tx['action'] == 'add-port':
key = tx.get('bridge', tx['name'])
interfaces[key] = {
'name': tx['name'],
'type': consts.NETWORK_INTERFACE_TYPES.ether
}
if tx['action'] == 'add-bond':
key = tx.get('bridge', tx['name'])
interfaces[key] = {
'name': tx['name'],
'slaves': [{'name': cls._split_iface_name(i)[0]}
for i in tx['interfaces']],
'type': consts.NETWORK_INTERFACE_TYPES.bond,
'bond_properties': tx.get('bond_properties', {})
}
return interfaces
@classmethod
def assign_networks_by_template(cls, node):
"""Configures a node's network-to-nic mapping based on its template.
This also creates bonds in the database and ensures network
groups are assigned to the correct interface or bond.
"""
cls.clear_assigned_networks(node)
interfaces = cls.get_interfaces_from_template(node)
# This maps interface names to bridge names, the opposite of the
# interfaces dictionary.
bridges_by_iface = {v['name']: k for k, v in interfaces.items()}
endpoint_mapping = cls.get_node_network_mapping(node)
em = dict((reversed(ep) for ep in endpoint_mapping))
node_ifaces = {}
for bridge, values in interfaces.items():
network = em.get(bridge)
# There is no network associated with this bridge (e.g. br-aux)
if not network:
continue
iface, vlan = cls._split_iface_name(values['name'])
# A parent interface can be associated with a bridge so looking it
# up by iface won't always work. For example, if bond0 is
# associated with br-aux then the key in interfaces will be br-aux,
# not bond0. If that lookup fails while processing a
# sub-interface then is_sub_iface will have an incorrect value
# resulting in an error. This attempts to find a bridge name
# associated with an interface. In the case of the bond0 example
# iface_key will be 'br-aux' here. A sub-interface will then
# correctly find the parent information by looking up
# interfaces['br-aux'] instead of failing to find
# interfaces['bond0'] resulting in is_sub_iface being False.
if iface not in interfaces:
iface_key = bridges_by_iface.get(iface)
else:
iface_key = iface
is_sub_iface = (vlan is not None) and (iface_key in interfaces)
# If the current interface is a sub-interface (e.g bond0.302) then
# node_ifaces should be populated with the values of the parent
# interface. If a sub-interface is processed first the entry for
# the parent interface will be missing any data defined in its
# transformation (e.g. bond_properties). The only thing the
# sub-interface actually needs to do is update assigned_networks so
# populating node_ifaces with the parent data is correct.
default = interfaces[iface_key] if is_sub_iface else values
node_ifaces.setdefault(iface, default)
node_ifaces[iface].setdefault('assigned_networks', [])
# Default admin network has no node group
if network == consts.NETWORKS.fuelweb_admin:
net_db = cls.get_admin_network_group(node.id)
else:
net_db = objects.NetworkGroup.get_from_node_group_by_name(
node.group_id, network)
if not net_db:
logger.warning(
("Failed to assign network {0} on node {1}"
" because it does not exist.").format(network, node.id))
else:
# Ensure network_group configuration is consistent
# with the template
if vlan != net_db.vlan_start:
net_db.vlan_start = vlan
db().add(net_db)
db().flush()
ng = {'id': net_db.id}
node_ifaces[iface]['assigned_networks'].append(ng)
# The parent interface NIC ID does not need to be updated for each
# sub-interface as it will have the same value every time. This
# also avoids issues caused by the assumption that all add-port
# actions are for ethernet interfaces. A bond sub-interface added
# via add-port will NOT exist in the database at this point and is
# not an ethernet interface so no NIC will be found.
if values['type'] == consts.NETWORK_INTERFACE_TYPES.ether \
and not is_sub_iface:
nic = objects.Node.get_nic_by_name(node, iface)
node_ifaces[iface]['id'] = nic.id
node_data = {
'id': node.id,
'interfaces': node_ifaces.values()
}
cls._update_attrs(node_data)
class NeutronManager80(AllocateVIPs80Mixin, NeutronManager70):
pass
| 1,642 | 14,546 | 115 |
f1eeece7c0f18999599212275f25573bdf1f2fc4 | 9,267 | py | Python | phns/graph.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | 5 | 2020-04-03T20:59:46.000Z | 2020-07-08T17:40:40.000Z | phns/graph.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | null | null | null | phns/graph.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | null | null | null | import itertools
import numpy as np
from scipy.sparse.csgraph import shortest_path
| 33.334532 | 87 | 0.573109 | import itertools
import numpy as np
from scipy.sparse.csgraph import shortest_path
class Node:
def __init__(self, value, index, meta={}):
self.in_edges = []
self.out_edges = []
self.value = value
self.index = index
self.meta = meta
def __repr__(self):
return f'Node("{self.value}")'
@property
def in_nodes(self):
return [edge.from_node for edge in self.in_edges]
@property
def out_nodes(self):
return [edge.to_node for edge in self.out_edges]
class Edge:
def __init__(self, from_node, to_node, meta={}):
self.from_node = from_node
self.to_node = to_node
self.meta = meta
from_node.out_edges.append(self)
to_node.in_edges.append(self)
def __repr__(self):
return f"Edge({self.from_node}->{self.to_node})"
class Graph:
def __init__(self):
self.roots = []
self.tails = []
self.nodes = []
self.max_length = 0
self._shortest_paths = None
self._distance_matrix = None
self._transition_matrix = None
self._final_transitions = None
self._initial_transitions = None
@property
def distance_matrix(self):
if self._distance_matrix is None:
mat = np.zeros((len(self.nodes), len(self.nodes)))
for node in self.nodes:
for out in node.out_nodes:
mat[node.index, out.index] = 1
self._distance_matrix, self._shortest_paths = shortest_path(
mat, method="FW", return_predecessors=True
)
self._distance_matrix[self._distance_matrix == np.inf] = 0
return self._distance_matrix
@property
def shortest_paths(self):
if self._shortest_paths is None:
self.distance_matrix
return self._shortest_paths
@property
def transition_matrix(self):
if self._transition_matrix is None:
mat = np.exp2(-self.distance_matrix + 1)
mat[self.distance_matrix == 0] = 0
np.fill_diagonal(mat, 1)
self._transition_matrix = mat
return self._transition_matrix
@property
def initial_transitions(self):
if self._initial_transitions is None:
idxs = [it.index for it in self.roots]
transitions = self.transition_matrix[idxs].max(axis=0) / 2
transitions[idxs] = 1
self._initial_transitions = transitions
return self._initial_transitions
@property
def final_transitions(self):
if self._final_transitions is None:
idxs = [it.index for it in self.tails]
transitions = self.transition_matrix[:, idxs].max(axis=1) / 2
transitions[idxs] = 1
self._final_transitions = transitions
return self._final_transitions
def attach(self, pronunciations, word=None):
self.max_length += max([len(p) for p in pronunciations])
first_pronunciation = list(pronunciations)[0]
is_dict = isinstance(pronunciations, dict)
if len(pronunciations) > 1:
# h e l l o
# h e w l o
# h a l o
# 1. zip вперед и находим первый разный элемент - с этого элемента
# наши ноды расходятся
# 2. zip с конца с подсчетом индекса в отрицательном виде
# (-1, -2...) - находим первый разный элемент с конца - это место где
# наши ветки объединяются
# 3. Создаем начальную общую ветку
# 4. Создаем все разные средние ветки
# 5. Объединяем все ветки в одну, даже если это просто нода конца слова.
i_diff_forward = __find_index_of_first_diff__(pronunciations)
reversed_pronunciations = [list(reversed(p)) for p in pronunciations]
i_diff_reverse = -__find_index_of_first_diff__(reversed_pronunciations) - 1
for i in range(i_diff_forward):
self.tails = [
self.__add_phn__(first_pronunciation[i], meta={"word": word})
]
new_tails = []
if not self.roots and not i_diff_forward:
least_len = min([len(pr) for pr in pronunciations])
if least_len - i_diff_forward < -i_diff_reverse:
i_diff_reverse += 1
for pronunciation in pronunciations:
prev_nodes = self.tails
meta = {"word": word}
if is_dict:
meta["variant"] = pronunciations[pronunciation]
for phn in pronunciation[i_diff_forward:i_diff_reverse]:
node = self.__add_phn__(phn, prev_nodes, meta=meta)
prev_nodes = [node]
if len(pronunciation) - i_diff_forward >= -i_diff_reverse:
phn = pronunciation[i_diff_reverse]
node = self.__add_phn__(phn, prev_nodes, meta=meta)
prev_nodes = [node]
new_tails.extend(prev_nodes)
self.tails = new_tails
for i in range(i_diff_reverse + 1, 0):
self.tails = [
self.__add_phn__(first_pronunciation[i], meta={"word": word})
]
else:
for phn in first_pronunciation:
self.tails = [self.__add_phn__(phn, meta={"word": word})]
return self
def __create_node__(self, phn, meta):
node = Node(phn, len(self.nodes), meta=meta)
self.nodes.append(node)
return node
def __add_phn__(self, phn, prev_nodes=None, meta={}):
node = self.__create_node__(phn, meta=meta)
if not self.tails and not prev_nodes:
self.roots.append(node)
if prev_nodes is None:
prev_nodes = self.tails
for prev_node in prev_nodes:
Edge(from_node=prev_node, to_node=node)
return node
def to_graphviz(self):
import graphviz
dot = graphviz.Digraph()
for node in self.nodes:
if "heuristic" in node.meta:
dot.attr("node", shape="doubleoctagon", color="lightblue2")
dot.node(str(id(node)), str(node.value)) # + f"\n{node.meta}")
else:
dot.attr("node", shape="ellipse")
dot.node(str(id(node)), str(node.value))
for node in self.nodes:
for edge in node.out_edges:
if edge.meta:
dot.edge(
str(id(node)),
str(id(edge.to_node)),
label=edge.meta["heuristic"],
)
else:
dot.edge(str(id(node)), str(id(edge.to_node)))
return dot
def to_list(self):
result = []
for root in self.roots:
for node in self.__traverse__(root, []):
if node not in result:
result.append(node)
return result
def __traverse__(self, node, prefix):
result = []
new_prefix = prefix.copy()
new_prefix.append(node.value)
for next_node in node.out_nodes:
result.extend(self.__traverse__(next_node, new_prefix))
return result or [new_prefix]
def triples(self):
result = []
for node in self.nodes:
result += self.__fetch_triples__(node)
return result
def __fetch_triples__(self, node):
return itertools.product(
node.in_nodes or [None], [node], node.out_nodes or [None]
)
def create_edge(self, from_node, to_node, meta={}):
if to_node in from_node.out_nodes:
return []
if from_node.value == to_node.value:
triples = []
if to_node.out_nodes:
for node in to_node.out_nodes:
triples += self.create_edge(from_node, node, meta)
elif from_node.in_nodes:
for node in from_node.in_nodes:
triples += self.create_edge(node, to_node, meta)
return triples
Edge(from_node, to_node, meta=meta)
new_triples_before_edge = itertools.product(
from_node.in_nodes or [None], [from_node], [to_node]
)
new_triples_after_edge = itertools.product(
[from_node], [to_node], to_node.out_nodes or [None]
)
return list(new_triples_before_edge) + list(new_triples_after_edge)
def create_node_between(self, phn, from_node, to_node, meta={}):
if to_node and to_node.value == phn:
return self.create_edge(from_node, to_node)
node = self.__create_node__(phn, meta=meta)
new_triples = self.create_edge(from_node, node)
if to_node:
new_triples += self.create_edge(node, to_node)
else:
self.tails.append(Node)
new_triples += self.__fetch_triples__(node)
return new_triples
def __find_index_of_first_diff__(seqs):
i = 0
cardinality = len(seqs)
for i_items in itertools.zip_longest(*seqs):
if i_items.count(i_items[0]) == cardinality:
i += 1
else:
return i
raise Exception
| 8,705 | 607 | 145 |
a96304dec6862bdf083752a9af5442f5bd3ef565 | 1,957 | py | Python | tests/encryption_test.py | faradaywallet/faradayapp | db9392aea48946979b974d64db3e856b43bc287f | [
"MIT"
] | null | null | null | tests/encryption_test.py | faradaywallet/faradayapp | db9392aea48946979b974d64db3e856b43bc287f | [
"MIT"
] | null | null | null | tests/encryption_test.py | faradaywallet/faradayapp | db9392aea48946979b974d64db3e856b43bc287f | [
"MIT"
] | null | null | null | from encrypt import Encrypt
import json
Encrypt = Encrypt()
if __name__ == '__main__':
payload_encryption_test()
| 32.616667 | 112 | 0.722024 | from encrypt import Encrypt
import json
Encrypt = Encrypt()
def payload_encryption_test():
password = b'testpwd'
payload = {'ccnum': '1111222233334444', 'expdate': '09/13/2018', 'cvc': '123', 'notes': 'adding user notes'}
salt = Encrypt.generate_salt()
print('ENCRYPT\TEST: salt: ', salt)
sym_key_box = Encrypt.generate_key(password, salt)
print('ENCRYPT\TEST: sym_key_box: ', sym_key_box)
sym_key = Encrypt.decrypt_key(sym_key_box, password, salt)
print('ENCRYPT\TEST: sym_key: ', sym_key)
# Payload encryption (encrypt the payload)
json_payload_string = json.dumps(payload)
print('JSON PAYLOAD:', json_payload_string)
encrypted_payload = Encrypt.encrypt_payload(sym_key, json_payload_string.encode())
print('ENCRYPT\TEST: encrypted_payload: ', encrypted_payload)
decrypted_payload = Encrypt.decrypt_payload(sym_key, encrypted_payload)
print('ENCRYPT\TEST: decrypted_payload: ', decrypted_payload)
payload_dict = json.loads(decrypted_payload)
print(payload_dict)
print(payload_dict["ccnum"])
print(payload_dict["expdate"])
print(payload_dict["cvc"])
print(payload_dict["notes"])
def full_encryption_test():
user = 'testuser'
password = b'testpwd'
ccnum = '1111222233334444'
salt = Encrypt.generate_salt()
print('ENCRYPT\TEST: salt: ', salt)
sym_key_box = Encrypt.generate_key(password, salt)
print('ENCRYPT\TEST: sym_key_box: ', sym_key_box)
sym_key = Encrypt.decrypt_key(sym_key_box, password, salt)
print('ENCRYPT\TEST: sym_key: ', sym_key)
# Payload encryption (encrypt the payload)
encrypted_payload = Encrypt.encrypt_payload(sym_key, ccnum)
print('ENCRYPT\TEST: encrypted_payload: ', encrypted_payload)
decrypted_payload = Encrypt.decrypt_payload(sym_key, encrypted_payload)
print('ENCRYPT\TEST: decrypted_payload: ', decrypted_payload)
if __name__ == '__main__':
payload_encryption_test()
| 1,791 | 0 | 46 |
1a0af461995c8249b4ab9f68a503e712e5f4c6f2 | 1,598 | py | Python | SpiralSpline/SpiralSpline.py | sterlingcrispin/Fusion360API | 5ef8d2ac9fce5476f8c7501aa213c16f54fd481a | [
"Unlicense"
] | 12 | 2017-08-29T12:41:08.000Z | 2022-03-18T13:19:59.000Z | SpiralSpline/SpiralSpline.py | sterlingcrispin/Fusion360API | 5ef8d2ac9fce5476f8c7501aa213c16f54fd481a | [
"Unlicense"
] | null | null | null | SpiralSpline/SpiralSpline.py | sterlingcrispin/Fusion360API | 5ef8d2ac9fce5476f8c7501aa213c16f54fd481a | [
"Unlicense"
] | 5 | 2019-04-10T09:22:28.000Z | 2022-02-15T12:54:39.000Z | #Author-Sterling Crispin
#Description-directly adapted from http://help.autodesk.com/view/fusion360/ENU/?guid=GUID-c3d4a306-fade-11e4-8e56-3417ebd3d5be
import adsk.core, adsk.fusion, traceback
import math | 35.511111 | 126 | 0.59637 | #Author-Sterling Crispin
#Description-directly adapted from http://help.autodesk.com/view/fusion360/ENU/?guid=GUID-c3d4a306-fade-11e4-8e56-3417ebd3d5be
import adsk.core, adsk.fusion, traceback
import math
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
doc = app.documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)
design = app.activeProduct
# Get the root component of the active design.
rootComp = design.rootComponent
# Create a new sketch on the xy plane.
sketch = rootComp.sketches.add(rootComp.xYConstructionPlane)
# Create an object collection for the points.
points = adsk.core.ObjectCollection.create()
# Define the points the spline with fit through.
for j in range(10):
for i in range(10):
# from 0 to TWOPI radians as i increases
p = (i/9) * math.pi * 2
# scaled in intensity by each spline
p = p * (j/9)
# so the spline aren't ontop of one another
xstep = j * 2
points.add(adsk.core.Point3D.create( math.cos(p) + xstep , math.sin(p) , i ))
# Create a spline along those points
spline = sketch.sketchCurves.sketchFittedSplines.add(points)
#delete any old points
points = adsk.core.ObjectCollection.create()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) | 1,369 | 0 | 23 |
8b7d537952b1acd31cd5604bc842bcbe32faaea9 | 1,240 | py | Python | Misc_TestCode/problem_set_1-3.py | osamadel/Python | 6c7e26a96d4b8f875755de98f16eba89e81d94d2 | [
"MIT"
] | null | null | null | Misc_TestCode/problem_set_1-3.py | osamadel/Python | 6c7e26a96d4b8f875755de98f16eba89e81d94d2 | [
"MIT"
] | null | null | null | Misc_TestCode/problem_set_1-3.py | osamadel/Python | 6c7e26a96d4b8f875755de98f16eba89e81d94d2 | [
"MIT"
] | null | null | null | """
Description :
A program to calculate the credit card balance after one year if a person only pays the minimum monthly payment
required by the credit card company each month.
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
monthlyPaymentRate - minimum monthly payment rate as a decimal
Monthly interest rate= (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
"""
balance = 320000
annualInterestRate = 0.2
monthly_interest_rate = annualInterestRate/12
lower_fixed = balance/12
upper_fixed = balance * (1 + monthly_interest_rate)**12 / 12.0
fixed = 0
unpaid_balance = 0
balance_copy = balance
while True:
balance_copy = balance
fixed = (lower_fixed+upper_fixed)/2
for i in range(12):
# min_monthly_payment = monthlyPaymentRate * balance
unpaid_balance = balance_copy - fixed
balance_copy = unpaid_balance + monthly_interest_rate * unpaid_balance
if balance_copy > 0.01: lower_fixed = fixed
elif balance_copy < 0: upper_fixed = fixed
else: break
print round(fixed,2) | 32.631579 | 111 | 0.756452 | """
Description :
A program to calculate the credit card balance after one year if a person only pays the minimum monthly payment
required by the credit card company each month.
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
monthlyPaymentRate - minimum monthly payment rate as a decimal
Monthly interest rate= (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
"""
balance = 320000
annualInterestRate = 0.2
monthly_interest_rate = annualInterestRate/12
lower_fixed = balance/12
upper_fixed = balance * (1 + monthly_interest_rate)**12 / 12.0
fixed = 0
unpaid_balance = 0
balance_copy = balance
while True:
balance_copy = balance
fixed = (lower_fixed+upper_fixed)/2
for i in range(12):
# min_monthly_payment = monthlyPaymentRate * balance
unpaid_balance = balance_copy - fixed
balance_copy = unpaid_balance + monthly_interest_rate * unpaid_balance
if balance_copy > 0.01: lower_fixed = fixed
elif balance_copy < 0: upper_fixed = fixed
else: break
print round(fixed,2) | 0 | 0 | 0 |
6579848b1593a2da0e2c319da40528c3a7235254 | 2,720 | py | Python | backend/wod_board/tests/crud/test_movement.py | GuillaumeOj/P13-WOD-Board | 36df7979e63c354507edb56eabdfc548b1964d08 | [
"MIT"
] | null | null | null | backend/wod_board/tests/crud/test_movement.py | GuillaumeOj/P13-WOD-Board | 36df7979e63c354507edb56eabdfc548b1964d08 | [
"MIT"
] | 82 | 2021-01-17T18:12:23.000Z | 2021-06-12T21:46:49.000Z | backend/wod_board/tests/crud/test_movement.py | GuillaumeOj/WodBoard | 1ac12404f6094909c9bf116bcaf6ccd60e85bc00 | [
"MIT"
] | null | null | null | import pytest
from wod_board import exceptions
from wod_board.crud import movement_crud
from wod_board.models import movement
from wod_board.models import unit
from wod_board.schemas import movement_schemas
| 33.170732 | 75 | 0.745221 | import pytest
from wod_board import exceptions
from wod_board.crud import movement_crud
from wod_board.models import movement
from wod_board.models import unit
from wod_board.schemas import movement_schemas
def test_create_movement(db, db_unit):
assert db.query(movement.Movement).count() == 0
devil_press = movement_schemas.MovementCreate(
name="Devil Press", unit_id=db_unit.id
)
assert movement_crud.create_movement(db, devil_press)
assert db.query(movement.Movement).count() == 1
with pytest.raises(exceptions.DuplicatedMovement):
movement_crud.create_movement(db, devil_press)
assert db.query(movement.Movement).count() == 1
burpees = movement_schemas.MovementCreate(name="Burpees", unit_id=2)
with pytest.raises(exceptions.UnknownUnit):
movement_crud.create_movement(db, burpees)
def test_get_movement_by_id(db, db_movement):
with pytest.raises(exceptions.UnknownMovement):
movement_crud.get_movement_by_id(db, 2)
wanted_movement = movement_crud.get_movement_by_id(db, db_movement.id)
assert wanted_movement.id == db_movement.id
def test_get_movement_by_name(db, db_movement):
devil_press = movement_crud.get_movement_by_name(db, db_movement.name)
assert devil_press.name == devil_press.name
with pytest.raises(exceptions.UnknownMovement):
movement_crud.get_movement_by_name(db, "Burpee")
with pytest.raises(exceptions.UnknownMovement):
movement_crud.get_movement_by_name(db, db_movement.name.lower())
def test_get_or_create_movement(db):
unit_unit = unit.Unit(name="Unit", symbol="u")
db.add(unit_unit)
db.commit()
db.refresh(unit_unit)
assert db.query(movement.Movement).count() == 0
devil_press = movement_schemas.MovementCreate(
name="Devil Press", unit_id=unit_unit.id
)
assert movement_crud.get_or_create_movement(db, devil_press)
assert movement_crud.get_or_create_movement(db, devil_press)
assert db.query(movement.Movement).count() == 1
def test_get_movements_by_name(db, db_unit):
devil_press = movement.Movement(name="Devil Press", unit_id=db_unit.id)
push_press = movement.Movement(name="Push Press", unit_id=db_unit.id)
db.add_all([devil_press, push_press])
db.commit()
db.refresh(devil_press)
db.refresh(push_press)
movements = movement_crud.get_movements_by_name(db, "pres")
assert len(movements) == 2
movements = movement_crud.get_movements_by_name(db, "push pres")
assert len(movements) == 1
assert hasattr(movements[0], "name")
assert movements[0].name == push_press.name
movements = movement_crud.get_movements_by_name(db, "Burpee")
assert movements == []
| 2,392 | 0 | 115 |
0b23c79247e242a9c10909666bbd15979acc980b | 7,058 | py | Python | indicators.py | fwd1990man/PHPCodeScanner | d3601820da465513944ff20650558b862f9ccde1 | [
"MIT"
] | 2 | 2021-05-19T00:09:22.000Z | 2021-05-19T00:09:24.000Z | indicators.py | fwd1990man/PHPCodeScanner | d3601820da465513944ff20650558b862f9ccde1 | [
"MIT"
] | null | null | null | indicators.py | fwd1990man/PHPCodeScanner | d3601820da465513944ff20650558b862f9ccde1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# /!\ Detection Format (.*)function($vuln)(.*) matched by payload[0]+regex_indicators
regex_indicators = '\\((.*?)(\\$_GET\\[.*?\\]|\\$_FILES\\[.*?\\]|\\$_POST\\[.*?\\]|\\$_REQUEST\\[.*?\\]|\\$_COOKIES\\[.*?\\]|\\$_SESSION\\[.*?\\]|\\$(?!this|e-)[a-zA-Z0-9_,]*)(.*?)\\)'
# Function_Name:String, Vulnerability_Name:String, Protection_Function:Array
payloads = [
# Remote Command Execution
["eval", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["popen", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["system", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["passthru", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["shell_exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["pcntl_exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["assert", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["proc_open", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["expect_popen", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["create_function", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["call_user_func", "Remote Code Execution", []],
["call_user_func_array", "Remote Code Execution", []],
["preg_replace", "Remote Command Execution", ["preg_quote"]],
["ereg_replace", "Remote Command Execution", ["preg_quote"]],
["eregi_replace", "Remote Command Execution", ["preg_quote"]],
["mb_ereg_replace", "Remote Command Execution", ["preg_quote"]],
["mb_eregi_replace", "Remote Command Execution", ["preg_quote"]],
# File Inclusion / Path Traversal
["virtual", "File Inclusion", []],
["include", "File Inclusion", []],
["require", "File Inclusion", []],
["include_once", "File Inclusion", []],
["require_once", "File Inclusion", []],
["readfile", "File Inclusion / Path Traversal", []],
["file_get_contents", "File Inclusion / Path Traversal", []],
["stream_get_contents", "File Inclusion / Path Traversal", []],
["show_source", "File Inclusion / Path Traversal", []],
["fopen", "File Inclusion / Path Traversal", []],
["file", "File Inclusion / Path Traversal", []],
["fpassthru", "File Inclusion / Path Traversal", []],
["gzopen", "File Inclusion / Path Traversal", []],
["gzfile", "File Inclusion / Path Traversal", []],
["gzpassthru", "File Inclusion / Path Traversal", []],
["readgzfile", "File Inclusion / Path Traversal", []],
# MySQL(i) SQL Injection
["mysql_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_multi_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_send_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_master_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_master_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysql_unbuffered_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysql_db_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli::real_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_real_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli::query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_query", "SQL Injection", ["mysql_real_escape_string"]],
# PostgreSQL Injection
["pg_query", "SQL Injection", ["pg_escape_string", "pg_pconnect", "pg_connect"]],
["pg_send_query", "SQL Injection", ["pg_escape_string", "pg_pconnect", "pg_connect"]],
# SQLite SQL Injection
["sqlite_array_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_exec", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_single_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_unbuffered_query", "SQL Injection", ["sqlite_escape_string"]],
# PDO SQL Injection
["->arrayQuery", "SQL Injection", ["->prepare"]],
["->query", "SQL Injection", ["->prepare"]],
["->queryExec", "SQL Injection", ["->prepare"]],
["->singleQuery", "SQL Injection", ["->prepare"]],
["->querySingle", "SQL Injection", ["->prepare"]],
["->exec", "SQL Injection", ["->prepare"]],
["->execute", "SQL Injection", ["->prepare"]],
["->unbufferedQuery", "SQL Injection", ["->prepare"]],
["->real_query", "SQL Injection", ["->prepare"]],
["->multi_query", "SQL Injection", ["->prepare"]],
["->send_query", "SQL Injection", ["->prepare"]],
# Cubrid SQL Injection
["cubrid_unbuffered_query", "SQL Injection", ["cubrid_real_escape_string"]],
["cubrid_query", "SQL Injection", ["cubrid_real_escape_string"]],
# MSSQL SQL Injection : Warning there is not any real_escape_string
["mssql_query", "SQL Injection", ["mssql_escape"]],
# File Upload
["move_uploaded_file", "File Upload", []],
# Cross Site Scripting
["echo", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["print", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["printf", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["vprintf", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["trigger_error", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["user_error", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["odbc_result_all", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["ifx_htmltbl_result", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["die", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["exit", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
# XPATH and LDAP
["xpath", "XPATH Injection", []],
["ldap_search", "LDAP Injection", ["Zend_Ldap", "ldap_escape"]],
# Insecure E-Mail
["mail", "Insecure E-mail", []],
# PHP Objet Injection
["unserialize", "PHP Object Injection", []],
# Header Injection
["header", "Header Injection", []],
["HttpMessage::setHeaders", "Header Injection", []],
["HttpRequest::setHeaders", "Header Injection", []],
# URL Redirection
["http_redirect", "URL Redirection", []],
["HttpMessage::setResponseCode", "URL Redirection", []],
# Server Side Template Injection
["->render", "Server Side Template Injection", []],
["->assign", "Server Side Template Injection", []],
# Weak Cryptographic Hash
["md5", "Weak Cryptographic Hash", []],
# Insecure Weak Random
["mt_rand", "Insecure Weak Random", []],
["srand", "Insecure Weak Random", []],
["uniqid", "Insecure Weak Random", []],
# Information Leak
["phpinfo", "Information Leak", []],
["show_source", "Information Leak", []],
["highlight_file", "Information Leak", []],
]
| 48.342466 | 184 | 0.638424 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# /!\ Detection Format (.*)function($vuln)(.*) matched by payload[0]+regex_indicators
regex_indicators = '\\((.*?)(\\$_GET\\[.*?\\]|\\$_FILES\\[.*?\\]|\\$_POST\\[.*?\\]|\\$_REQUEST\\[.*?\\]|\\$_COOKIES\\[.*?\\]|\\$_SESSION\\[.*?\\]|\\$(?!this|e-)[a-zA-Z0-9_,]*)(.*?)\\)'
# Function_Name:String, Vulnerability_Name:String, Protection_Function:Array
payloads = [
# Remote Command Execution
["eval", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["popen", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["system", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["passthru", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["shell_exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["pcntl_exec", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["assert", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["proc_open", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["expect_popen", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["create_function", "Remote Command Execution", ["escapeshellarg", "escapeshellcmd"]],
["call_user_func", "Remote Code Execution", []],
["call_user_func_array", "Remote Code Execution", []],
["preg_replace", "Remote Command Execution", ["preg_quote"]],
["ereg_replace", "Remote Command Execution", ["preg_quote"]],
["eregi_replace", "Remote Command Execution", ["preg_quote"]],
["mb_ereg_replace", "Remote Command Execution", ["preg_quote"]],
["mb_eregi_replace", "Remote Command Execution", ["preg_quote"]],
# File Inclusion / Path Traversal
["virtual", "File Inclusion", []],
["include", "File Inclusion", []],
["require", "File Inclusion", []],
["include_once", "File Inclusion", []],
["require_once", "File Inclusion", []],
["readfile", "File Inclusion / Path Traversal", []],
["file_get_contents", "File Inclusion / Path Traversal", []],
["stream_get_contents", "File Inclusion / Path Traversal", []],
["show_source", "File Inclusion / Path Traversal", []],
["fopen", "File Inclusion / Path Traversal", []],
["file", "File Inclusion / Path Traversal", []],
["fpassthru", "File Inclusion / Path Traversal", []],
["gzopen", "File Inclusion / Path Traversal", []],
["gzfile", "File Inclusion / Path Traversal", []],
["gzpassthru", "File Inclusion / Path Traversal", []],
["readgzfile", "File Inclusion / Path Traversal", []],
# MySQL(i) SQL Injection
["mysql_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_multi_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_send_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_master_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_master_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysql_unbuffered_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysql_db_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli::real_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_real_query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli::query", "SQL Injection", ["mysql_real_escape_string"]],
["mysqli_query", "SQL Injection", ["mysql_real_escape_string"]],
# PostgreSQL Injection
["pg_query", "SQL Injection", ["pg_escape_string", "pg_pconnect", "pg_connect"]],
["pg_send_query", "SQL Injection", ["pg_escape_string", "pg_pconnect", "pg_connect"]],
# SQLite SQL Injection
["sqlite_array_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_exec", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_single_query", "SQL Injection", ["sqlite_escape_string"]],
["sqlite_unbuffered_query", "SQL Injection", ["sqlite_escape_string"]],
# PDO SQL Injection
["->arrayQuery", "SQL Injection", ["->prepare"]],
["->query", "SQL Injection", ["->prepare"]],
["->queryExec", "SQL Injection", ["->prepare"]],
["->singleQuery", "SQL Injection", ["->prepare"]],
["->querySingle", "SQL Injection", ["->prepare"]],
["->exec", "SQL Injection", ["->prepare"]],
["->execute", "SQL Injection", ["->prepare"]],
["->unbufferedQuery", "SQL Injection", ["->prepare"]],
["->real_query", "SQL Injection", ["->prepare"]],
["->multi_query", "SQL Injection", ["->prepare"]],
["->send_query", "SQL Injection", ["->prepare"]],
# Cubrid SQL Injection
["cubrid_unbuffered_query", "SQL Injection", ["cubrid_real_escape_string"]],
["cubrid_query", "SQL Injection", ["cubrid_real_escape_string"]],
# MSSQL SQL Injection : Warning there is not any real_escape_string
["mssql_query", "SQL Injection", ["mssql_escape"]],
# File Upload
["move_uploaded_file", "File Upload", []],
# Cross Site Scripting
["echo", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["print", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["printf", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["vprintf", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["trigger_error", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["user_error", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["odbc_result_all", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["ifx_htmltbl_result", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["die", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
["exit", "Cross Site Scripting", ["htmlentities", "htmlspecialchars"]],
# XPATH and LDAP
["xpath", "XPATH Injection", []],
["ldap_search", "LDAP Injection", ["Zend_Ldap", "ldap_escape"]],
# Insecure E-Mail
["mail", "Insecure E-mail", []],
# PHP Objet Injection
["unserialize", "PHP Object Injection", []],
# Header Injection
["header", "Header Injection", []],
["HttpMessage::setHeaders", "Header Injection", []],
["HttpRequest::setHeaders", "Header Injection", []],
# URL Redirection
["http_redirect", "URL Redirection", []],
["HttpMessage::setResponseCode", "URL Redirection", []],
# Server Side Template Injection
["->render", "Server Side Template Injection", []],
["->assign", "Server Side Template Injection", []],
# Weak Cryptographic Hash
["md5", "Weak Cryptographic Hash", []],
# Insecure Weak Random
["mt_rand", "Insecure Weak Random", []],
["srand", "Insecure Weak Random", []],
["uniqid", "Insecure Weak Random", []],
# Information Leak
["phpinfo", "Information Leak", []],
["show_source", "Information Leak", []],
["highlight_file", "Information Leak", []],
]
| 0 | 0 | 0 |
76131c72b8636284a0712af7817874865d24b1ea | 3,734 | py | Python | src/polyswarm/client/engine.py | polyswarm/polyswarm-cli | f783b77180a7436bc993171b46691a223f175260 | [
"MIT"
] | 2 | 2021-04-14T01:42:48.000Z | 2022-03-12T16:20:23.000Z | src/polyswarm/client/engine.py | polyswarm/polyswarm-cli | f783b77180a7436bc993171b46691a223f175260 | [
"MIT"
] | 11 | 2019-10-22T23:23:27.000Z | 2021-06-07T21:40:10.000Z | src/polyswarm/client/engine.py | polyswarm/polyswarm-cli | f783b77180a7436bc993171b46691a223f175260 | [
"MIT"
] | 1 | 2021-04-26T10:58:01.000Z | 2021-04-26T10:58:01.000Z | from __future__ import absolute_import
import logging
import click
logger = logging.getLogger(__name__)
@click.group(short_help="Interact with engines.")
@engine.group(short_help="Interact with engine's votes.")
@engine.group(short_help="Interact with engine's assertions.")
@assertions.command('create', short_help='Create a new bundle with the consolidated assertions data.')
@click.argument('engine-id', type=click.STRING)
@click.argument('date-start', type=click.STRING)
@click.argument('date-end', type=click.STRING)
@click.pass_context
def assertions_create(ctx, engine_id, date_start, date_end):
"""
Create a new bundle with the consolidated assertions data for the provided
period of time.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_create(engine_id, date_start, date_end)
output.assertions(result)
@assertions.command('get', short_help='Get an assertions bundle.')
@click.argument('assertions-job-id', type=click.INT)
@click.pass_context
def assertions_get(ctx, assertions_job_id):
"""
Get the assertions bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_get(assertions_job_id)
output.assertions(result)
@assertions.command('delete', short_help='Delete an assertions bundle.')
@click.argument('assertions-job-id', type=click.INT)
@click.pass_context
def assertions_delete(ctx, assertions_job_id):
"""
Delete the assertions bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_delete(assertions_job_id)
output.assertions(result)
@assertions.command('list', short_help='List all assertions bundles for the given engine.')
@click.argument('engine-id', type=click.STRING)
@click.pass_context
@votes.command('create', short_help='Create a new bundle with the consolidated votes data.')
@click.argument('engine-id', type=click.STRING)
@click.argument('date-start', type=click.STRING)
@click.argument('date-end', type=click.STRING)
@click.pass_context
def votes_create(ctx, engine_id, date_start, date_end):
"""
Create a new bundle with the consolidated votes data for the provided
period of time.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_create(engine_id, date_start, date_end)
output.votes(result)
@votes.command('get', short_help='Get a votes bundle.')
@click.argument('votes-job-id', type=click.INT)
@click.pass_context
def votes_get(ctx, votes_job_id):
"""
Get the votes bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_get(votes_job_id)
output.votes(result)
@votes.command('delete', short_help='Delete a votes bundle.')
@click.argument('votes-job-id', type=click.INT)
@click.pass_context
def votes_delete(ctx, votes_job_id):
"""
Delete the votes bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_delete(votes_job_id)
output.votes(result)
@votes.command('list', short_help='List all votes bundles for the given engine.')
@click.argument('engine-id', type=click.STRING)
@click.pass_context
| 29.171875 | 102 | 0.71023 | from __future__ import absolute_import
import logging
import click
logger = logging.getLogger(__name__)
@click.group(short_help="Interact with engines.")
def engine():
pass
@engine.group(short_help="Interact with engine's votes.")
def votes():
pass
@engine.group(short_help="Interact with engine's assertions.")
def assertions():
pass
@assertions.command('create', short_help='Create a new bundle with the consolidated assertions data.')
@click.argument('engine-id', type=click.STRING)
@click.argument('date-start', type=click.STRING)
@click.argument('date-end', type=click.STRING)
@click.pass_context
def assertions_create(ctx, engine_id, date_start, date_end):
"""
Create a new bundle with the consolidated assertions data for the provided
period of time.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_create(engine_id, date_start, date_end)
output.assertions(result)
@assertions.command('get', short_help='Get an assertions bundle.')
@click.argument('assertions-job-id', type=click.INT)
@click.pass_context
def assertions_get(ctx, assertions_job_id):
"""
Get the assertions bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_get(assertions_job_id)
output.assertions(result)
@assertions.command('delete', short_help='Delete an assertions bundle.')
@click.argument('assertions-job-id', type=click.INT)
@click.pass_context
def assertions_delete(ctx, assertions_job_id):
"""
Delete the assertions bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.assertions_delete(assertions_job_id)
output.assertions(result)
@assertions.command('list', short_help='List all assertions bundles for the given engine.')
@click.argument('engine-id', type=click.STRING)
@click.pass_context
def assertions_list(ctx, engine_id):
api = ctx.obj['api']
output = ctx.obj['output']
results = api.assertions_list(engine_id)
for result in results:
output.assertions(result)
@votes.command('create', short_help='Create a new bundle with the consolidated votes data.')
@click.argument('engine-id', type=click.STRING)
@click.argument('date-start', type=click.STRING)
@click.argument('date-end', type=click.STRING)
@click.pass_context
def votes_create(ctx, engine_id, date_start, date_end):
"""
Create a new bundle with the consolidated votes data for the provided
period of time.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_create(engine_id, date_start, date_end)
output.votes(result)
@votes.command('get', short_help='Get a votes bundle.')
@click.argument('votes-job-id', type=click.INT)
@click.pass_context
def votes_get(ctx, votes_job_id):
"""
Get the votes bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_get(votes_job_id)
output.votes(result)
@votes.command('delete', short_help='Delete a votes bundle.')
@click.argument('votes-job-id', type=click.INT)
@click.pass_context
def votes_delete(ctx, votes_job_id):
"""
Delete the votes bundle for the given bundle id.
"""
api = ctx.obj['api']
output = ctx.obj['output']
result = api.votes_delete(votes_job_id)
output.votes(result)
@votes.command('list', short_help='List all votes bundles for the given engine.')
@click.argument('engine-id', type=click.STRING)
@click.pass_context
def votes_list(ctx, engine_id):
api = ctx.obj['api']
output = ctx.obj['output']
results = api.votes_list(engine_id)
for result in results:
output.votes(result)
| 345 | 0 | 110 |
540ebc7352e4bff5af37415803694be42d874d61 | 8,339 | py | Python | rapp_testing_tools/src/rapp_testing_tools/rapp_testing_core.py | DEVX1/NAOrapp-Pythonlib | d07d7fe304556cad24e7e138df4e41376eacb6a7 | [
"Apache-2.0"
] | null | null | null | rapp_testing_tools/src/rapp_testing_tools/rapp_testing_core.py | DEVX1/NAOrapp-Pythonlib | d07d7fe304556cad24e7e138df4e41376eacb6a7 | [
"Apache-2.0"
] | null | null | null | rapp_testing_tools/src/rapp_testing_tools/rapp_testing_core.py | DEVX1/NAOrapp-Pythonlib | d07d7fe304556cad24e7e138df4e41376eacb6a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
import sys
import os
import time
import argparse
from os import listdir
from os.path import isfile, join
import importlib
from threading import Thread, Lock
# import roslib
import rospkg
import rospy
import yaml
import subprocess
__path__ = os.path.dirname(os.path.realpath(__file__))
# Mutex lock used when threaded.
mutex = Lock()
## --------- Test Classess ---------- ##
testClasses = [
'face-detection',
'qr-detection',
'speech-detection',
'speech-detection-sphinx4',
'speech-detection-google',
'ontology',
'cognitive',
'tts'
]
## --------------------------------- ##
testClassMatch = {
'face-detection' : 'face',
'qr-detection' : 'qr',
'speech-detection' : 'speech',
'speech-detection-sphinx4' : 'sphinx4',
'speech-detection-google' : 'google',
'ontology' : 'ontology',
'cognitive': 'cognitive',
'tts': 'text_to_speech'
}
results = {
'success' : [],
'failed' : [],
'num_tests': 0
}
## ------------- Console colors -------------- ##
## ------------------------------------------ ##
##
# @brief Parse input arguments.
##
##
# @brief Parse and get all given tests path directories, plus the default
# ones.
#
# @return Array of tests paths.
##
##
# @brief Append directory paths, given as input into the global system path.
# This is usefull in order to load test files under those directories.
##
##
# @brief Parse input paths and export found test files.
#
# @param args Arguments.
# @param paths Path directories to look for test files.
#
##
# @brief Load and execute input given tests.
#
# @param tests List of tests to execute.
# @param numCalls Number of executions.
# @param threaded If true the execution is handled by threads.
#
##
##
# @brief Main.
##
| 29.996403 | 82 | 0.58832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
import sys
import os
import time
import argparse
from os import listdir
from os.path import isfile, join
import importlib
from threading import Thread, Lock
# import roslib
import rospkg
import rospy
import yaml
import subprocess
__path__ = os.path.dirname(os.path.realpath(__file__))
# Mutex lock used when threaded.
mutex = Lock()
## --------- Test Classess ---------- ##
testClasses = [
'face-detection',
'qr-detection',
'speech-detection',
'speech-detection-sphinx4',
'speech-detection-google',
'ontology',
'cognitive',
'tts'
]
## --------------------------------- ##
testClassMatch = {
'face-detection' : 'face',
'qr-detection' : 'qr',
'speech-detection' : 'speech',
'speech-detection-sphinx4' : 'sphinx4',
'speech-detection-google' : 'google',
'ontology' : 'ontology',
'cognitive': 'cognitive',
'tts': 'text_to_speech'
}
results = {
'success' : [],
'failed' : [],
'num_tests': 0
}
## ------------- Console colors -------------- ##
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
## ------------------------------------------ ##
##
# @brief Parse input arguments.
##
def parse_args():
parser = argparse.ArgumentParser(description= \
'RAPP Platform front-end hop-service invocation tests')
parser.add_argument('-i','--name',\
help='Test file name to execute.',\
dest='fileName', action='store', nargs='+', type=str)
parser.add_argument('-n', '--num-calls', dest='numCalls', action='store', \
help='Number of times to run the test', type=int, default=1)
parser.add_argument('-t', '--threaded', dest='threaded',\
action='store_true', help='Enable threaded mode')
parser.add_argument('-c', '--class', dest='testClass', action='store', \
help='Tests class. "face-detection", "speech-detection"...', type=str)
args = parser.parse_args( ) # Parse console arguments
return args
##
# @brief Parse and get all given tests path directories, plus the default
# ones.
#
# @return Array of tests paths.
##
def load_tests_paths():
## This is the default directory where tests are stored
testPaths = [join(join(__pkgDir__, 'scripts'), 'default_tests')]
cfgFile = join(__pkgDir__, 'config/params.yaml')
try:
with open(cfgFile, 'r') as ymlFile:
cfg = yaml.safe_load(ymlFile)
except Exception as e:
print e
sys.exit(1)
if 'tests_path' in cfg:
extPaths = cfg['tests_path']
if extPaths is not None and len(extPaths) > 0:
for p in extPaths:
testPaths += [p] if os.path.isdir(p) else []
else:
pass
return testPaths
##
# @brief Append directory paths, given as input into the global system path.
# This is usefull in order to load test files under those directories.
##
def append_to_system_path(paths):
for p in paths:
sys.path.append(p)
##
# @brief Parse input paths and export found test files.
#
# @param args Arguments.
# @param paths Path directories to look for test files.
#
def get_test_files(args, paths):
tests = []
if args.testClass and args.testClass in testClasses:
for path in paths:
# Find all the tests corresponding to given test class
files = [ f for f in listdir(path) if isfile(join(path, f)) \
and testClassMatch[args.testClass] in f ]
elif args.fileName == None:
for path in paths:
# Find and run all the tests located under python_tests dirrectory
files = [ f for f in listdir(path) if isfile(join(path, f)) ]
else:
files = args.fileName # Input test files from arguments
for f in files:
print f
f = f.replace('default_tests/', '')
clean_file = f.split('.')
if len(clean_file) == 1:
pass
elif clean_file[1] == "pyc" or clean_file[0] == "template" or \
len(clean_file) > 2:
continue
tests.append(clean_file[0])
return tests
##
# @brief Load and execute input given tests.
#
# @param tests List of tests to execute.
# @param numCalls Number of executions.
# @param threaded If true the execution is handled by threads.
#
##
def execute_tests_all(tests, numCalls, threaded):
if threaded:
core = "Parallel"
threads = []
else:
core = "Serial"
## ------------------------- Print Header -------------------------- ##
count = 1
print "\033[0;33m"
print "***************************"
print " RAPP Platfrom Tests "
print "***************************"
print bcolors.BOLD + bcolors.OKBLUE + bcolors.UNDERLINE
print "* Parameters:" + bcolors.ENDC
print "-- Number of Executions for each given test: [%s] " % numCalls
print "-- %s execution" % core
print bcolors.BOLD + bcolors.OKBLUE + bcolors.UNDERLINE
print "* Tests to Execute:" + bcolors.ENDC
for test in tests:
print "%s] %s x%s" % (count, test, numCalls)
count += 1
print '\n'
# print "\033[0;33m***************************\033[1;32m"
time.sleep(1)
## ---------------------------------------------------------------- ##
testPaths = [join(join(__pkgDir__, 'scripts'), 'default_tests')]
failed = []
# -- Loop throug test files to be executed -- #
for test in tests:
filename = testPaths[0] + '/' + test + '.py'
sys.stdout.write(bcolors.BOLD + bcolors.OKBLUE + \
"Running " + test + "... " + bcolors.ENDC)
sys.stdout.flush()
# res = os.system(filename)
p = subprocess.Popen([filename], \
stdout=subprocess.PIPE,\
stderr=subprocess.PIPE)
p.wait()
output = p.stderr.read()
outlines = output.split('\n');
report_test_line = ''
for l in outlines:
if 'Ran ' in l:
report_test_line = l
break
print '\n\t' + report_test_line
if 'FAILED' in output or 'Traceback' in output:
failed.append(test)
sys.stdout.write(\
'\t' + bcolors.BOLD + bcolors.FAIL + \
"Failed" + bcolors.ENDC + '\n')
print output
sys.stdout.flush()
else:
sys.stdout.write(\
'\t' + bcolors.BOLD + \
bcolors.OKGREEN + "Success" + bcolors.ENDC + '\n')
sys.stdout.flush()
return failed
##
# @brief Main.
##
def main():
global __pkgDir__
rospack = rospkg.RosPack()
# Load this package absolute path.
__pkgDir__ = rospack.get_path('rapp_testing_tools')
# Load default and user given external directory paths to test files.
testPaths = load_tests_paths()
# Append above loaded paths to system path variable.
append_to_system_path(testPaths)
# Parse input arguments
args = parse_args()
numCalls = args.numCalls
threaded = args.threaded
# Load test files to execute based on user input arguments
testFiles = get_test_files(args, testPaths)
# Execute loaded tests. Use input number-of-calls and threaded arguments.
failed = execute_tests_all(testFiles, numCalls, threaded)
if len(failed) != 0:
print "\nThe failed tests are:"
for t in failed:
print "\t" + bcolors.BOLD + bcolors.FAIL + t + bcolors.ENDC
sys.exit(1)
| 5,487 | 205 | 154 |
fd1c96698e6f7c7562f10de30edc13bc0b766cbf | 15,403 | py | Python | pymontecarlo_gui/options/beam/base.py | pymontecarlo/pymontecarlo-gui | 1b3c37d4b634a85c63f23d27ea8bd79bf5a43a2f | [
"Apache-2.0"
] | null | null | null | pymontecarlo_gui/options/beam/base.py | pymontecarlo/pymontecarlo-gui | 1b3c37d4b634a85c63f23d27ea8bd79bf5a43a2f | [
"Apache-2.0"
] | 2 | 2016-05-16T10:19:56.000Z | 2021-12-29T15:16:20.000Z | pymontecarlo_gui/options/beam/base.py | pymontecarlo/pymontecarlo-gui | 1b3c37d4b634a85c63f23d27ea8bd79bf5a43a2f | [
"Apache-2.0"
] | null | null | null | """"""
# Standard library modules.
import abc
from collections import namedtuple
import itertools
# Third party modules.
from qtpy import QtCore, QtGui, QtWidgets
import numpy as np
# Local modules.
from pymontecarlo.options.beam.base import BeamBase
from pymontecarlo.options.particle import Particle
from pymontecarlo.util.tolerance import tolerance_to_decimals
from pymontecarlo_gui.widgets.field import (
MultiValueFieldBase,
FieldBase,
WidgetFieldBase,
FieldChooser,
)
from pymontecarlo_gui.widgets.lineedit import (
ColoredMultiFloatLineEdit,
ColoredFloatLineEdit,
)
from pymontecarlo_gui.options.base import ToleranceMixin
# Globals and constants variables.
Position = namedtuple("Position", ("x_m", "y_m"))
| 28.898687 | 82 | 0.652925 | """"""
# Standard library modules.
import abc
from collections import namedtuple
import itertools
# Third party modules.
from qtpy import QtCore, QtGui, QtWidgets
import numpy as np
# Local modules.
from pymontecarlo.options.beam.base import BeamBase
from pymontecarlo.options.particle import Particle
from pymontecarlo.util.tolerance import tolerance_to_decimals
from pymontecarlo_gui.widgets.field import (
MultiValueFieldBase,
FieldBase,
WidgetFieldBase,
FieldChooser,
)
from pymontecarlo_gui.widgets.lineedit import (
ColoredMultiFloatLineEdit,
ColoredFloatLineEdit,
)
from pymontecarlo_gui.options.base import ToleranceMixin
# Globals and constants variables.
class EnergyField(MultiValueFieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = ColoredMultiFloatLineEdit()
decimals = tolerance_to_decimals(BeamBase.ENERGY_TOLERANCE_eV) + 3
self._widget.setRange(0, 1000, decimals)
self._widget.setValues([20.0])
# Signals
self._widget.valuesChanged.connect(self.fieldChanged)
def title(self):
return "Energies [keV]"
def widget(self):
return self._widget
def energiesEV(self):
return np.array(self._widget.values()) * 1e3
def setEnergiesEV(self, energies_eV):
energies_eV = np.array(energies_eV) / 1e3
self._widget.setValues(energies_eV)
class ParticleField(FieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = QtWidgets.QComboBox()
for particle in Particle:
self._widget.addItem(particle.name, particle)
index = self._widget.findData(Particle.ELECTRON)
self._widget.setCurrentIndex(index)
# Signals
self._widget.currentIndexChanged.connect(self.fieldChanged)
def title(self):
return "Particle"
def widget(self):
return self._widget
def particle(self):
return self._widget.currentData()
def setParticle(self, particle):
index = self._widget.findData(particle)
self._widget.setCurrentIndex(index)
Position = namedtuple("Position", ("x_m", "y_m"))
class CoordinateField(FieldBase, ToleranceMixin):
def __init__(self, title):
self._title = title + " [nm]"
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setValue(0.0)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
decimals = tolerance_to_decimals(tolerance_m * 1e9)
self._widget.setRange(float("-inf"), float("inf"), decimals)
def coordinateMeter(self):
return self._widget.value() / 1e9
def setCoordinateMeter(self, value_m):
self._widget.setValue(value_m * 1e9)
class StepField(FieldBase):
def __init__(self, title="Number of steps"):
self._title = title
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setRange(2, 500, 0)
self._widget.setValue(5)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def step(self):
return self._widget.value()
def setStep(self, step):
self._widget.setValue(step)
class PositionField(WidgetFieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
def setToleranceMeter(self, tolerance_m):
for field in self.fields():
if hasattr(field, "setToleranceMeter"):
field.setToleranceMeter(tolerance_m)
@abc.abstractmethod
def positions(self):
return []
class SinglePositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x = CoordinateField("x")
self.addLabelField(self.field_x)
self.field_y = CoordinateField("y")
self.addLabelField(self.field_y)
def title(self):
return "Single position"
def positions(self):
x_m = self.field_x.coordinateMeter()
y_m = self.field_y.coordinateMeter()
return [Position(x_m, y_m)]
class LineScanPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_start = CoordinateField("Start")
self.field_start.setCoordinateMeter(-5e-6)
self.addLabelField(self.field_start)
self.field_stop = CoordinateField("Stop")
self.field_stop.setCoordinateMeter(5e-6)
self.addLabelField(self.field_stop)
self.field_step = StepField()
self.addLabelField(self.field_step)
class LineScanXPositionField(LineScanPositionField):
def title(self):
return "Line scan along X axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(x_m, 0.0)
for x_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class LineScanYPositionField(LineScanPositionField):
def title(self):
return "Line scan along Y axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(0.0, y_m)
for y_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class GridPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x_start = CoordinateField("Start X")
self.field_x_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_x_start)
self.field_x_stop = CoordinateField("Stop X")
self.field_x_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_x_stop)
self.field_x_step = StepField("Number of steps X")
self.addLabelField(self.field_x_step)
self.field_y_start = CoordinateField("Start Y")
self.field_y_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_y_start)
self.field_y_stop = CoordinateField("Stop Y")
self.field_y_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_y_stop)
self.field_y_step = StepField("Number of steps Y")
self.addLabelField(self.field_y_step)
def title(self):
return "Grid"
def positions(self):
x_start_m = self.field_x_start.coordinateMeter()
x_stop_m = self.field_x_stop.coordinateMeter()
x_num = self.field_x_step.step()
xs_m = np.linspace(x_start_m, x_stop_m, x_num, endpoint=True)
y_start_m = self.field_y_start.coordinateMeter()
y_stop_m = self.field_y_stop.coordinateMeter()
y_num = self.field_y_step.step()
ys_m = np.linspace(y_start_m, y_stop_m, y_num, endpoint=True)
return [Position(x_m, y_m) for x_m, y_m in itertools.product(xs_m, ys_m)]
class PositionsModel(QtCore.QAbstractTableModel, ToleranceMixin):
def __init__(self):
super().__init__()
self._positions = []
def rowCount(self, parent=None):
return len(self._positions)
def columnCount(self, parent=None):
return 2
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
row = index.row()
column = index.column()
position = self._positions[row]
if role == QtCore.Qt.DisplayRole:
if self.toleranceMeter() is not None:
precision = tolerance_to_decimals(self.toleranceMeter()) - 9
fmt = "{0:.{precision}f}"
else:
fmt = "{0:g}"
if column == 0:
return fmt.format(position.x_m * 1e9, precision=precision)
elif column == 1:
return fmt.format(position.y_m * 1e9, precision=precision)
elif role == QtCore.Qt.UserRole:
return position
elif role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignCenter
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
if section == 0:
return "X [nm]"
elif section == 1:
return "Y [nm]"
elif orientation == QtCore.Qt.Vertical:
return str(section + 1)
def flags(self, index):
return super().flags(index)
def _add_position(self, position):
if position in self._positions:
return False
self._positions.append(position)
return True
def addPosition(self, position):
added = self._add_position(position)
if added:
self.modelReset.emit()
return added
def addPositions(self, positions):
if not positions:
return False
added = False
for position in positions:
added |= self._add_position(position)
if added:
self.modelReset.emit()
return added
def removePosition(self, position):
if position not in self._positions:
return False
self._positions.remove(position)
self.modelReset.emit()
return True
def clearPositions(self):
self._positions.clear()
self.modelReset.emit()
def hasPositions(self):
return bool(self._positions)
def position(self, row):
return self._positions[row]
def positions(self):
return tuple(self._positions)
def setPositions(self, positions):
self.clearPositions()
for x, y in positions:
self._add_position(x, y)
self.modelReset.emit()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self.modelReset.emit()
class PositionsWidget(QtWidgets.QWidget, ToleranceMixin):
positionsChanged = QtCore.Signal()
def __init__(self, parent=None):
super().__init__(parent)
# Variables
model = PositionsModel()
model.addPosition(Position(0.0, 0.0))
# Actions
self.action_remove = QtWidgets.QAction("Remove")
self.action_remove.setIcon(QtGui.QIcon.fromTheme("list-remove"))
self.action_remove.setToolTip("Remove position")
self.action_remove.setEnabled(False)
self.action_clear = QtWidgets.QAction("Clear")
self.action_clear.setIcon(QtGui.QIcon.fromTheme("edit-clear"))
self.action_clear.setToolTip("Remove all positions")
self.action_clear.setEnabled(False)
# Widgets
self.chooser = FieldChooser()
self.button_add = QtWidgets.QPushButton("Add position(s)")
self.button_add.setIcon(QtGui.QIcon.fromTheme("list-add"))
self.button_add.setMaximumWidth(self.button_add.sizeHint().width())
self.table_positions = QtWidgets.QTableView()
self.table_positions.setModel(model)
self.table_positions.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
header = self.table_positions.horizontalHeader()
for column in range(model.columnCount()):
header.setSectionResizeMode(column, QtWidgets.QHeaderView.Stretch)
self.toolbar = QtWidgets.QToolBar()
self.toolbar.addAction(self.action_remove)
self.toolbar.addAction(self.action_clear)
# Layouts
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.chooser)
layout.addWidget(self.button_add, alignment=QtCore.Qt.AlignRight)
layout.addWidget(self.table_positions)
layout.addWidget(self.toolbar, alignment=QtCore.Qt.AlignRight)
self.setLayout(layout)
# Signals
self.action_remove.triggered.connect(self._on_remove_triggered)
self.action_clear.triggered.connect(self._on_clear_triggered)
self.button_add.clicked.connect(self._on_add_clicked)
model.dataChanged.connect(self._on_positions_changed)
model.dataChanged.connect(self.positionsChanged)
model.modelReset.connect(self._on_positions_changed)
model.modelReset.connect(self.positionsChanged)
self.table_positions.selectionModel().selectionChanged.connect(
self._on_positions_changed
)
def _on_remove_triggered(self):
selection_model = self.table_positions.selectionModel()
if not selection_model.hasSelection():
return
indexes = selection_model.selectedIndexes()
model = self.table_positions.model()
for row in reversed(sorted(set(index.row() for index in indexes))):
model.removePosition(model.position(row))
def _on_clear_triggered(self):
model = self.table_positions.model()
model.clearPositions()
def _on_add_clicked(self):
field = self.chooser.currentField()
if field is None:
return
positions = field.positions()
self.table_positions.model().addPositions(positions)
def _on_positions_changed(self):
model = self.table_positions.model()
has_rows = model.hasPositions()
selection_model = self.table_positions.selectionModel()
has_selection = selection_model.hasSelection()
self.action_remove.setEnabled(has_rows and has_selection)
self.action_clear.setEnabled(has_rows)
def _on_field_changed(self):
field = self.chooser.currentField()
if field is None:
return
self.button_add.setEnabled(field.isValid())
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
for field in self.chooser.fields():
field.setToleranceMeter(tolerance_m)
self.table_positions.model().setToleranceMeter(tolerance_m)
def registerPositionField(self, field):
self.chooser.addField(field)
field.fieldChanged.connect(self._on_field_changed)
def positions(self):
return self.table_positions.model().positions()
class PositionsField(FieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
# Widgets
self._widget = PositionsWidget()
# Signals
self._widget.positionsChanged.connect(self.fieldChanged)
def title(self):
return "Positions"
def widget(self):
return self._widget
def registerPositionField(self, field):
field.setToleranceMeter(self.toleranceMeter())
self._widget.registerPositionField(field)
def positions(self):
return self._widget.positions()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self._widget.setToleranceMeter(tolerance_m)
class BeamFieldBase(WidgetFieldBase):
def isValid(self):
return super().isValid() and bool(self.beams())
@abc.abstractmethod
def beams(self):
"""
Returns a :class:`list` of :class:`BeamBase`.
"""
return []
| 11,979 | 894 | 1,769 |
46bea9e2fbb3568b36cb4f14630ab5d9663be4d5 | 826 | py | Python | pyrads/Absorption_Crosssections_UV.py | ddbkoll/PyRADS-shortwave | 9d86f7dc07bef37f832949a584f0abe2fd3b72c4 | [
"MIT"
] | 3 | 2020-12-22T17:39:12.000Z | 2021-02-10T12:31:52.000Z | pyrads/Absorption_Crosssections_UV.py | ddbkoll/PyRADS-shortwave | 9d86f7dc07bef37f832949a584f0abe2fd3b72c4 | [
"MIT"
] | 1 | 2019-11-26T22:53:52.000Z | 2021-02-18T13:35:50.000Z | pyrads/Absorption_Crosssections_UV.py | ddbkoll/PyRADS-shortwave | 9d86f7dc07bef37f832949a584f0abe2fd3b72c4 | [
"MIT"
] | 1 | 2021-05-21T17:55:36.000Z | 2021-05-21T17:55:36.000Z | from __future__ import division, print_function, absolute_import
import numpy as np
from . import phys
import os
'''
Implement UV scattering cross-sections.
Either data or fits.
'''
### -----------------------------------
### Global definitions here
### -----------------------------------
### Absorption crosssection for CO2
### based on eqn 6 in Venot+ (2013).
| 27.533333 | 122 | 0.561743 | from __future__ import division, print_function, absolute_import
import numpy as np
from . import phys
import os
'''
Implement UV scattering cross-sections.
Either data or fits.
'''
### -----------------------------------
### Global definitions here
### -----------------------------------
### Absorption crosssection for CO2
### based on eqn 6 in Venot+ (2013).
def get_kappaAbs_CO2(wavenr,T):
lam = 1e7 / wavenr # cm-1 -> nm
# fitting formula:
a = lambda T: -42.26 + (9593.*1.44/T)
b = lambda T: 4.82e-3 - 61.5*1.44/T
Q = lambda T: (1.-np.exp(-667.4*1.44/T))**(-2) * (1.-np.exp(-1388.2*1.44/T))**(-1) * (1.-np.exp(-2449.1*1.44/T))**(-1)
sigma = Q(T) * np.exp(a(T)+b(T)*lam)
kappaAbs = sigma * phys.N_avogadro/phys.CO2.MolecularWeight * 1e-1 # cm^2/molec -> m^2/kg
return kappaAbs
| 436 | 0 | 23 |
1685731263b74c59bfdef531e9fab0f3fc9420f5 | 200 | py | Python | scripts/item/consume_2434574.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/item/consume_2434574.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/item/consume_2434574.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Full Moon Damage Skin
success = sm.addDamageSkin(2434574)
if success:
sm.chat("The Full Moon Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2434574)
| 33.333333 | 97 | 0.745 | # Full Moon Damage Skin
success = sm.addDamageSkin(2434574)
if success:
sm.chat("The Full Moon Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2434574)
| 0 | 0 | 0 |
212466f096bb63bb64ea1c7aed334c836bb300b4 | 4,215 | py | Python | engineering/dataset/crawl_codes/cjscrape.py | ningfeiwang/Code_De-anonymization | 7c144be4f84eab8d6373ab7608280d80128435d5 | [
"MIT"
] | 3 | 2019-09-26T18:52:33.000Z | 2020-06-13T10:17:22.000Z | engineering/dataset/crawl_codes/cjscrape.py | ningfeiwang/Code_De-anonymization | 7c144be4f84eab8d6373ab7608280d80128435d5 | [
"MIT"
] | null | null | null | engineering/dataset/crawl_codes/cjscrape.py | ningfeiwang/Code_De-anonymization | 7c144be4f84eab8d6373ab7608280d80128435d5 | [
"MIT"
] | 1 | 2021-08-09T09:21:18.000Z | 2021-08-09T09:21:18.000Z | #!/usr/local/bin/python
# coding:utf-8
from urllib import urlopen
from urllib import urlretrieve
import json
import sys
import os
import zipfile
import shutil
import multiprocessing
# returns the URL to download the user submission
# scrapes the C/C++/Python files of the given round
# main section of script
if __name__ == '__main__':
script_path = os.path.dirname(os.path.realpath(__file__))
metadatafile = open(script_path + "/CodeJamMetadata.json").read()
metadata = json.loads(metadatafile)
# loop through years
for year_json in metadata['competitions']:
year = year_json['year']
# loop through rounds
for round_json in year_json['round']:
round_id = round_json['contest']
problems = round_json['problems']
# run scraper on current round
scraper = multiprocessing.Process(target=scrape, args=(round_id, problems, script_path))
scraper.start()
| 37.972973 | 106 | 0.541637 | #!/usr/local/bin/python
# coding:utf-8
from urllib import urlopen
from urllib import urlretrieve
import json
import sys
import os
import zipfile
import shutil
import multiprocessing
# returns the URL to download the user submission
def get_download_url(round_id, problem_id, username):
return "http://code.google.com/codejam/contest/scoreboard/do?cmd=GetSourceCode&contest=" \
+ round_id \
+ "&problem=" \
+ problem_id \
+ "&io_set_id=0&username=" \
+ username
# scrapes the C/C++/Python files of the given round
def scrape(round_id, problems, script_path):
# load list of users
user_file = open(script_path + '/users/' + round_id + '.txt', 'r')
users = user_file.read().splitlines()
# loop through problems in the round
for problem_json in problems:
problem_id = problem_json['id']
# loop through users who participated in the round
for username in users:
download_url = get_download_url(round_id, problem_id, username)
# print and flush URL
print download_url
sys.stdout.flush()
# make temp directory for storing zips
tempdir = round_id + 'temp'
if not os.path.exists(tempdir):
os.makedirs(tempdir)
# download and read zip
target_zip = tempdir + '/' + problem_id + '.' + username + '0.zip'
urlretrieve(download_url,target_zip)
zip_header = open(target_zip, 'rb')
# try-except in case of a bad header
try:
my_zip = zipfile.ZipFile(zip_header)
# loop through each file in the zip file
for my_file in my_zip.namelist():
# check for C/C++/Python source
if my_file.endswith(('.c', '.cpp', '.py')):
target_source = username + '_0' # destination of source files
file_newname = 'p' + problem_id + '_' + username + '.' # appropriate name for file
if my_file.endswith('.c'):
file_newname += 'c'
target_source = 'c/' + target_source
elif my_file.endswith('.cpp'):
file_newname += 'cpp'
target_source = 'cpp/' + target_source
else:
file_newname += 'py'
target_source = 'py/' + target_source
target_source = 'codejamfolder/' + target_source
# make directory for language and author
if not os.path.exists(target_source):
os.makedirs(target_source)
# extract and rename source file
my_zip.extract(my_file, target_source)
os.rename((target_source + '/' + my_file), (target_source + '/' + file_newname))
# print location of extracted source file
print target_source + '/' + file_newname
sys.stdout.flush()
except:
print "error:", sys.exc_info()[0] # can happen if the user didn't do a problem
sys.stdout.flush()
# delete temp directory
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
return
# main section of script
if __name__ == '__main__':
script_path = os.path.dirname(os.path.realpath(__file__))
metadatafile = open(script_path + "/CodeJamMetadata.json").read()
metadata = json.loads(metadatafile)
# loop through years
for year_json in metadata['competitions']:
year = year_json['year']
# loop through rounds
for round_json in year_json['round']:
round_id = round_json['contest']
problems = round_json['problems']
# run scraper on current round
scraper = multiprocessing.Process(target=scrape, args=(round_id, problems, script_path))
scraper.start()
| 3,212 | 0 | 44 |
2282f6e87cb3130b7496b5870fd8c7852d9ed08b | 1,972 | py | Python | tests/test_il_medicaid.py | kiwisquash/city-scrapers | 38ca372467856853b36ec180c440eef0a0c6ce5b | [
"MIT"
] | 1 | 2019-03-18T03:12:25.000Z | 2019-03-18T03:12:25.000Z | tests/test_il_medicaid.py | kiwisquash/city-scrapers | 38ca372467856853b36ec180c440eef0a0c6ce5b | [
"MIT"
] | null | null | null | tests/test_il_medicaid.py | kiwisquash/city-scrapers | 38ca372467856853b36ec180c440eef0a0c6ce5b | [
"MIT"
] | null | null | null | from datetime import datetime
from os.path import dirname, join
import re
import pytest
from freezegun import freeze_time
from city_scrapers_core.constants import NOT_CLASSIFIED
from city_scrapers_core.utils import file_response
from city_scrapers.spiders.il_medicaid import IlMedicaidSpider
test_response = file_response(
join(dirname(__file__), "files", "il_medicaid.html"),
url="https://www.illinois.gov/hfs/About/BoardsandCommisions/MAC/Pages/default.aspx",
)
spider = IlMedicaidSpider()
freezer = freeze_time("2019-05-20")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
# def test_tests():
# print("Please write some tests for this spider or at least disable this one.")
# assert False
"""
Uncomment below
"""
# def test_description():
# assert parsed_items[0]["description"] == "EXPECTED DESCRIPTION"
# def test_start():
# assert parsed_items[0]["start"] == datetime(2019, 1, 1, 0, 0)
# def test_end():
# assert parsed_items[0]["end"] == datetime(2019, 1, 1, 0, 0)
# def test_time_notes():
# assert parsed_items[0]["time_notes"] == "EXPECTED TIME NOTES"
# def test_id():
# assert parsed_items[0]["id"] == "EXPECTED ID"
# def test_status():
# assert parsed_items[0]["status"] == "EXPECTED STATUS"
# def test_location():
# assert parsed_items[0]["location"] == {
# "name": "EXPECTED NAME",
# "address": "EXPECTED ADDRESS"
# }
# def test_source():
# assert parsed_items[0]["source"] == "EXPECTED URL"
# def test_links():
# assert parsed_items[0]["links"] == [{
# "href": "EXPECTED HREF",
# "title": "EXPECTED TITLE"
# }]
# def test_classification():
# assert parsed_items[0]["classification"] == NOT_CLASSIFIED
# @pytest.mark.parametrize("item", parsed_items)
# def test_all_day(item):
# assert item["all_day"] is False
| 22.409091 | 88 | 0.678499 | from datetime import datetime
from os.path import dirname, join
import re
import pytest
from freezegun import freeze_time
from city_scrapers_core.constants import NOT_CLASSIFIED
from city_scrapers_core.utils import file_response
from city_scrapers.spiders.il_medicaid import IlMedicaidSpider
test_response = file_response(
join(dirname(__file__), "files", "il_medicaid.html"),
url="https://www.illinois.gov/hfs/About/BoardsandCommisions/MAC/Pages/default.aspx",
)
spider = IlMedicaidSpider()
freezer = freeze_time("2019-05-20")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
# def test_tests():
# print("Please write some tests for this spider or at least disable this one.")
# assert False
"""
Uncomment below
"""
def test_title():
assert parsed_items[0]["title"] == "EXPECTED TITLE"
# def test_description():
# assert parsed_items[0]["description"] == "EXPECTED DESCRIPTION"
# def test_start():
# assert parsed_items[0]["start"] == datetime(2019, 1, 1, 0, 0)
# def test_end():
# assert parsed_items[0]["end"] == datetime(2019, 1, 1, 0, 0)
# def test_time_notes():
# assert parsed_items[0]["time_notes"] == "EXPECTED TIME NOTES"
# def test_id():
# assert parsed_items[0]["id"] == "EXPECTED ID"
# def test_status():
# assert parsed_items[0]["status"] == "EXPECTED STATUS"
# def test_location():
# assert parsed_items[0]["location"] == {
# "name": "EXPECTED NAME",
# "address": "EXPECTED ADDRESS"
# }
# def test_source():
# assert parsed_items[0]["source"] == "EXPECTED URL"
# def test_links():
# assert parsed_items[0]["links"] == [{
# "href": "EXPECTED HREF",
# "title": "EXPECTED TITLE"
# }]
# def test_classification():
# assert parsed_items[0]["classification"] == NOT_CLASSIFIED
# @pytest.mark.parametrize("item", parsed_items)
# def test_all_day(item):
# assert item["all_day"] is False
| 52 | 0 | 23 |
79b80d0c3b746a092c83a791ebac8e67d24c451d | 16,170 | py | Python | v0/aia_eis_v0/circuits/vogit_1.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | 1 | 2022-03-02T12:57:19.000Z | 2022-03-02T12:57:19.000Z | v0/aia_eis_v0/circuits/vogit_1.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | null | null | null | v0/aia_eis_v0/circuits/vogit_1.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | null | null | null | import sys
sys.path.append('../')
import numpy as np
import math
import copy
import os
from circuits.elements import ele_C, ele_L
from IS.IS import IS_0
from IS.IS_criteria import cal_ChiSquare_0
from utils.file_utils.pickle_utils import pickle_file
from utils.visualize_utils.IS_plots.ny import nyquist_multiPlots_1, nyquist_plot_1
class Vogit_3:
"""
Refer
papers:
paper1: A Linear Kronig-Kramers Transform Test for Immittance Data Validation
paper0: A Method for Improving the Robustness of linear Kramers-Kronig Validity Tests
Note:
Vogit 最基本的电路为
Rs-M*(RC)-[Cs]-Ls
Ls: inductive effects are considered byadding an additional inductivity [1]
Cs:
option to add a serial capacitance that helps validate data with no low-frequency intercept
due to their capacitive nature an additional capacityis added to the ECM.
1- 只考虑 complex / imag / real -fit中的complex-fit
2- 三种加权方式只考虑 modulus
3- add Capacity / Inductance 中 只考虑 add Capacity
Version:
v3:
更新2:取消手动设置M的选择,合理设置M的上限,达到上限在停止
更新1:仿照《Impedance.py》构造Ax=Y,直接求解
class vogit的前两个版本在 \dpfc_src\circuits\vogit_0.py 中,都不好使
v2: 之前的Vogit中没有加入电感L,在这一版本中加上
"""
def __init__(self, impSpe, fit_type='complex', u_optimum=0.85, add_C=False, M_max=None):
"""
因为Vogit是一个measurement model,所以使用vogit之前一定会传进来一个IS
:param
impSpe: IS cls
fit_type: str
'real',
'imag',
'complex',
M: int
number of (RC)
w: list(float)
RC_para_list:[
[R0, C0],
[R1, C1],
...
[Rm-1, Cm-1],
]
Rs: float
add_C: Bool
"""
self.impSpe = impSpe
self.w_arr = self.impSpe.w_arr
self.z_arr = self.impSpe.z_arr
self.fit_type = fit_type
self.u_optimum = u_optimum
self.add_C = add_C
self.M = 1
if (M_max is not None) and (type(M_max) == int):
self.M_max = M_max
else:
self.get_Mmax()
def get_Mmax(self):
"""
M_max 设置条件
condition 1- Paper1: As a rule of thumb we can conclude that, for the single fit and transformation, the v range should be
equal to the inverse w range with a distribution of 6 or 7 Tcs per decade. 在这里再稍微取的更大一些 8 * decades
condition 2- 在Vogit 单独使用 实部/虚部拟合时,由于系数矩阵A (row col) 要求 rol=tested points > col=number of parameters
"""
# condition 1
M1 = int(math.log10(self.w_arr.max() / self.w_arr.min())) * 7
# condition 2
num_points = self.w_arr.size
if self.add_C:
M2 = num_points - 3 - 1
else:
M2 = num_points - 2 - 1
self.M_max = min(M1, M2)
def calc_timeConstant(self):
"""
timeConstant = tao = R * C
Refer:
A Method for Improving the Robustness of linear Kramers-Kronig Validity Tests
2.2. Distribution of Time Constants Eq 10-12
:return:
"""
sorted_w_arr = np.sort(copy.deepcopy(self.w_arr)) # small --> big number
w_min, w_max = sorted_w_arr[0], sorted_w_arr[-1]
# Time Constant τ 用 tao表示
tao_min = 1 / w_max
tao_max = 1 / w_min
tao_list = []
if self.M == 1:
tao_list.append(tao_min)
elif self.M == 2:
tao_list.extend([tao_min, tao_max])
elif self.M > 2:
tao_list.append(tao_min)
K = self.M - 1
for i in range(1, K):
tao = 10 ** (math.log10(tao_min) + i * math.log10(tao_max / tao_min) / (self.M - 1))
tao_list.append(tao)
tao_list.append(tao_max)
self.tao_arr = np.array(tao_list)
def update_u(self):
"""
refer paper0-eq21
:return:
"""
if self.fit_type == 'complex':
self.M_R_arr = self.para_arr[1:-2]
positive_R_list = []
negtive_R_list = []
for R in self.M_R_arr:
if R >= 0:
positive_R_list.append(R)
elif R < 0:
negtive_R_list.append(R)
self.u = 1 - abs(sum(negtive_R_list)) / sum(positive_R_list)
def fit_kk(self):
"""
Are/im
N row
M+2 or M+3(with capacity) col
Are
col 0: Rs(w0) / |Z(w0)|, Rs(w1) / |Z(w1)|, Rs(w2) / |Z(w2)|, ..., Rs(w_N-1) / |Z(w_N-1)|
col 1: Z_RCk_0(w0)_re = Rk_0 / {[1+(w0*tao0)**2]*|Z(w0)|},
Z_RCk_0(w1)_re = Rk_0 / {[1+(w1*tao0)**2]*|Z(w1)|}
Z_RCk_0(w2)_re = Rk_0 / {[1+(w2*tao0)**2]*|Z(w2)|},
...,
Z_RCk_0(w_N-1)_re = Rk_0 / {[1+(w_N-1*tao_0)**2]*|Z(w_N-1)|}
...
col k(M): Z_RCk_k(w0)_re = Rk_k / {[1+(w0*taok)**2]*|Z(w0)|},
Z_RCk_k(w1)_re = Rk_k / {[1+(w1*taok)**2]*|Z(w1)|}
Z_RCk_k(w2)_re = Rk_k / {[1+(w2*taok)**2]*|Z(w2)|},
...,
Z_RCk_k(w_N-1)_re = Rk_k / {[1+(w_N-1*tao_k)**2]*|Z(w_N-1)|}
col -2(C): 如果加capacity,它对阻抗实部的贡献为0
0, 0, 0, ..., 0
col -1(L): L对阻抗实部的贡献为0
0, 0, 0, ..., 0
Aim
col 0: Rs(wi)_im = 0, 0,0,0,...,0,0
col 1: Z_RCk_0(w0)_im = (-1 * w0 * Rk_0 * tao0) / {[1+(w0*tao0)**2]*|Z(w0)|},
Z_RCk_0(w1)_im = (-1 * w1 * Rk_0 * tao0) / {[1+(w1*tao0)**2]*|Z(w1)|},
Z_RCk_0(w2)_im = (-1 * w2 * Rk_0 * tao0) / {[1+(w2*tao0)**2]*|Z(w2)|},
...,
Z_RCk_0(w_N-1)_im = (-1 * w_N-1 * Rk_0 * tao0) / {[1+(w_N-1*tao0)**2]*|Z(w0_N-1)|},
...
col k(M):
col -2(C):
col -1(L):
:return:
"""
Are = np.zeros(shape=(self.w_arr.size, self.M + 2))
Aim = np.zeros(shape=(self.w_arr.size, self.M + 2))
if self.add_C:
Are = np.zeros(shape=(self.w_arr.size, self.M + 3))
Aim = np.zeros(shape=(self.w_arr.size, self.M + 3))
# Rs col
Are[:,0] = 1 / np.abs(self.z_arr)
# Aim[:,0] = np.zeros(shape=(self.w_arr.size)) 本来就是0
# RC_1~M col
for i in range(self.M):
Are[:, i+1] = RC(para_arr=np.array([1, self.tao_arr[i]]), w_arr=self.w_arr).real / np.abs(self.z_arr)
Aim[:, i+1] = RC(para_arr=np.array([1, self.tao_arr[i]]), w_arr=self.w_arr).imag / np.abs(self.z_arr)
if self.add_C:
# Are[:, -2] = np.zeros(shape=(self.w_arr.size)) 本来就是0
Aim[:, -2] = -1 / (self.w_arr * np.abs(self.z_arr))
Aim[:, -1] = self.w_arr / np.abs(self.z_arr)
if self.fit_type == 'real':
self.para_arr = np.linalg.pinv(Are).dot(self.z_arr.real / np.abs(self.z_arr))
XLim = np.zeros(shape=(self.w_arr.size, 2))
# 根据paper0-Lin-KK-Eq10 再构造一组方程 求C和L, X= 1/C
# data for L-col
# Aim[:, -1] = self.w_arr / np.abs(self.z_arr)
XLim[:, -1] = self.w_arr / np.abs(self.z_arr)
# data for C-col
if self.add_C:
XLim[:, -2] = -1 / self.w_arr / np.abs(self.z_arr)
# Aim[:, -2] = -1 / self.w_arr / np.abs(self.z_arr)
"""
self.para_arr[-2] = 一个很小的正数 如1e-18 的原因:
在fit_type == 'real'时, self.para_arr = np.linalg.pinv(Are).dot(self.z_arr.real / np.abs(self.z_arr))
得到的 para_arr【-2:】 = 【X,L】 == 【0, 0】,由于下方代码马上需要计算 拟合参数所得的阻抗,计算Cs的阻抗时,
Cs=1/X,因X=0,Cs-》Inf,所有要给X一个必要的、很小的正数,来防止计算上溢
"""
# self.para_arr[-2] = 1e-20
# self.simulate_Z()
# tmp_para_arr = np.linalg.pinv(Aim).dot((self.z_arr.imag - self.z_sim_arr.imag) / np.abs(self.z_arr))
z_vogit_arr = self.simulate_vogit()
XL = np.linalg.pinv(Aim).dot((self.z_arr.imag - z_vogit_arr.imag) / np.abs(self.z_arr))
# self.para_arr[-1] = tmp_para_arr[-1]
self.para_arr[-1] = XL[-1]
if self.add_C:
# self.para_arr[-2] = tmp_para_arr[-2]
self.para_arr[-2] = XL[-2]
elif self.fit_type == 'imag':
self.para_arr = np.linalg.pinv(Aim).dot(self.z_arr.imag / np.abs(self.z_arr))
"""
根据 paper1-lin-KK-Eq7 计算 Rs
Eq7中方括号里的叠加 == Vogit中M个RC的阻抗对于实部的贡献
"""
self.simulate_Z()
weight_arr = 1 / (np.abs(self.z_arr) ** 2)
# paper1-Eq 7
# ValueError: setting an array element with a sequence.
Rs = np.sum(weight_arr * (self.z_arr.real - self.z_sim_arr.real)) / np.sum(weight_arr)
self.para_arr[0] = Rs
elif self.fit_type == 'complex':
A_inv = np.linalg.inv(Are.T.dot(Are) + Aim.T.dot(Aim))
Y = Are.T.dot(self.z_arr.real / np.abs(self.z_arr)) + Aim.T.dot(self.z_arr.imag / np.abs(self.z_arr))
self.para_arr = A_inv.dot(Y)
def simulate_vogit(self):
"""
这里的Vogit是纯的 Rs + M * RC
:return:
"""
self.Rs = self.para_arr[0]
self.M_R_arr = self.para_arr[1: self.M+1]
z_vogit_arr = np.empty(shape=(self.M, self.w_arr.size), dtype=complex)
# Z of M RC
for i, R in enumerate(self.M_R_arr):
z_RC_arr = RC(para_arr=np.array([R, self.tao_arr[i]]), w_arr=self.w_arr)
z_vogit_arr[i, :] = z_RC_arr
z_vogit_arr = z_vogit_arr.sum(axis=0)
z_vogit_arr += self.Rs
return z_vogit_arr
def cal_residual(self):
"""
按照paper0-Eq 15 and Eq 16
residual_arr = Z_arr - Z_sim_arr
:return:
"""
self.simulate_Z()
z_abs_arr = np.abs(self.z_arr)
self.residual_arr = (self.z_arr - self.z_sim_arr) / z_abs_arr
def residual_statistic(self, type):
"""
我定义衡量残差的几种定量标准;
1 残差的绝对值
实部残差的绝对值
虚部残差的绝对值
2 残差的 平方
实部残差的 平方
虚部残差的 平方
:param
type: str
'abs'
'square'
"""
self.cal_residual()
if type == 'abs':
residual_real_abs_arr = np.abs(self.residual_arr.real)
residual_imag_abd_arr = np.abs(self.residual_arr.imag)
return residual_real_abs_arr, residual_imag_abd_arr
elif type == 'square':
residual_real_square_arr = self.residual_arr.real ** 2
residual_imag_square_arr = self.residual_arr.imag ** 2
return residual_real_square_arr, residual_imag_square_arr
def cal_chiSquare(self, weight_type='modulus'):
"""
这里不能按照ZSimpWin的方式计算,因ZSimpWin的方式计算 涉及到 ECM中参数的数量,删除点前后的ECM可能不一样,没法计算
故只能按照 chiSquare = weight * [▲Re**2 + ▲Im**2]
:return:
"""
self.simulate_Z()
if weight_type == 'modulus':
self.chi_square = cal_ChiSquare_0(z_arr=self.z_arr, z_sim_arr=self.z_sim_arr, weight_type=weight_type)
return self.chi_square
# ---------------------------------- Test Vogit_3 on Lin-KK-Ex1_LIB_time_invariant ----------------------------------
# 1- load data
# fit_type = 'real'
# fit_type = 'imag'
# fit_type = 'complex'
# lib_res_fp = '../plugins_test/jupyter_code/rbp_files/2/example_data_sets/LIB_res'
# if fit_type == 'complex':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_res.npz'))
# elif fit_type == 'real':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_real_addC_res.npz'))
# elif fit_type == 'imag':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_imag_addC_res.npz'))
# ex1_z_arr = ex1_data_dict['z_arr']
# ex1_f_arr = ex1_data_dict['fre']
# ex1_z_MS_sim_arr = ex1_data_dict['z_sim']
# ex1_real_residual_arr = ex1_data_dict['real_residual']
# ex1_imag_residual_arr = ex1_data_dict['imag_residual']
# ex1_IS = IS_0()
# ex1_IS.raw_z_arr = ex1_z_arr
# ex1_IS.exp_area = 1.0
# ex1_IS.z_arr = ex1_z_arr
# ex1_IS.fre_arr = ex1_f_arr
# ex1_IS.w_arr = ex1_IS.fre_arr * 2 * math.pi
# --------------- real Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, fit_type=fit_type, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-real-Fit','Mine-real-Fit'])
# --------------- real Fit ---------------
# --------------- imag Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, fit_type=fit_type, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-imag-Fit','Mine-imag-Fit'])
# --------------- imag Fit ---------------
# --------------- Complex Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-Fit','Mine-Fit'])
# --------------- Complex Fit ---------------
# ---------------------------------- Test Vogit_1 on Lin-KK-Ex1_LIB_time_invariant ---------------------------------- | 38.317536 | 147 | 0.532158 | import sys
sys.path.append('../')
import numpy as np
import math
import copy
import os
from circuits.elements import ele_C, ele_L
from IS.IS import IS_0
from IS.IS_criteria import cal_ChiSquare_0
from utils.file_utils.pickle_utils import pickle_file
from utils.visualize_utils.IS_plots.ny import nyquist_multiPlots_1, nyquist_plot_1
def RC(para_arr, w_arr):
R, tao = para_arr[0], para_arr[1]
z = R / (1+1j*w_arr*tao)
return z
class Vogit_3:
"""
Refer
papers:
paper1: A Linear Kronig-Kramers Transform Test for Immittance Data Validation
paper0: A Method for Improving the Robustness of linear Kramers-Kronig Validity Tests
Note:
Vogit 最基本的电路为
Rs-M*(RC)-[Cs]-Ls
Ls: inductive effects are considered byadding an additional inductivity [1]
Cs:
option to add a serial capacitance that helps validate data with no low-frequency intercept
due to their capacitive nature an additional capacityis added to the ECM.
1- 只考虑 complex / imag / real -fit中的complex-fit
2- 三种加权方式只考虑 modulus
3- add Capacity / Inductance 中 只考虑 add Capacity
Version:
v3:
更新2:取消手动设置M的选择,合理设置M的上限,达到上限在停止
更新1:仿照《Impedance.py》构造Ax=Y,直接求解
class vogit的前两个版本在 \dpfc_src\circuits\vogit_0.py 中,都不好使
v2: 之前的Vogit中没有加入电感L,在这一版本中加上
"""
def __init__(self, impSpe, fit_type='complex', u_optimum=0.85, add_C=False, M_max=None):
"""
因为Vogit是一个measurement model,所以使用vogit之前一定会传进来一个IS
:param
impSpe: IS cls
fit_type: str
'real',
'imag',
'complex',
M: int
number of (RC)
w: list(float)
RC_para_list:[
[R0, C0],
[R1, C1],
...
[Rm-1, Cm-1],
]
Rs: float
add_C: Bool
"""
self.impSpe = impSpe
self.w_arr = self.impSpe.w_arr
self.z_arr = self.impSpe.z_arr
self.fit_type = fit_type
self.u_optimum = u_optimum
self.add_C = add_C
self.M = 1
if (M_max is not None) and (type(M_max) == int):
self.M_max = M_max
else:
self.get_Mmax()
def get_Mmax(self):
"""
M_max 设置条件
condition 1- Paper1: As a rule of thumb we can conclude that, for the single fit and transformation, the v range should be
equal to the inverse w range with a distribution of 6 or 7 Tcs per decade. 在这里再稍微取的更大一些 8 * decades
condition 2- 在Vogit 单独使用 实部/虚部拟合时,由于系数矩阵A (row col) 要求 rol=tested points > col=number of parameters
"""
# condition 1
M1 = int(math.log10(self.w_arr.max() / self.w_arr.min())) * 7
# condition 2
num_points = self.w_arr.size
if self.add_C:
M2 = num_points - 3 - 1
else:
M2 = num_points - 2 - 1
self.M_max = min(M1, M2)
def calc_timeConstant(self):
"""
timeConstant = tao = R * C
Refer:
A Method for Improving the Robustness of linear Kramers-Kronig Validity Tests
2.2. Distribution of Time Constants Eq 10-12
:return:
"""
sorted_w_arr = np.sort(copy.deepcopy(self.w_arr)) # small --> big number
w_min, w_max = sorted_w_arr[0], sorted_w_arr[-1]
# Time Constant τ 用 tao表示
tao_min = 1 / w_max
tao_max = 1 / w_min
tao_list = []
if self.M == 1:
tao_list.append(tao_min)
elif self.M == 2:
tao_list.extend([tao_min, tao_max])
elif self.M > 2:
tao_list.append(tao_min)
K = self.M - 1
for i in range(1, K):
tao = 10 ** (math.log10(tao_min) + i * math.log10(tao_max / tao_min) / (self.M - 1))
tao_list.append(tao)
tao_list.append(tao_max)
self.tao_arr = np.array(tao_list)
def update_u(self):
"""
refer paper0-eq21
:return:
"""
if self.fit_type == 'complex':
self.M_R_arr = self.para_arr[1:-2]
positive_R_list = []
negtive_R_list = []
for R in self.M_R_arr:
if R >= 0:
positive_R_list.append(R)
elif R < 0:
negtive_R_list.append(R)
self.u = 1 - abs(sum(negtive_R_list)) / sum(positive_R_list)
def lin_KK(self):
self.u = 1
self.calc_timeConstant()
while (self.u > self.u_optimum) and (self.M <= self.M_max):
self.M += 1
self.calc_timeConstant()
self.fit_kk()
# print('M = ', self.M, 'U = ', self.u)
self.update_u()
# print('M = ', self.M, 'U = ', self.u)
def fit_kk(self):
"""
Are/im
N row
M+2 or M+3(with capacity) col
Are
col 0: Rs(w0) / |Z(w0)|, Rs(w1) / |Z(w1)|, Rs(w2) / |Z(w2)|, ..., Rs(w_N-1) / |Z(w_N-1)|
col 1: Z_RCk_0(w0)_re = Rk_0 / {[1+(w0*tao0)**2]*|Z(w0)|},
Z_RCk_0(w1)_re = Rk_0 / {[1+(w1*tao0)**2]*|Z(w1)|}
Z_RCk_0(w2)_re = Rk_0 / {[1+(w2*tao0)**2]*|Z(w2)|},
...,
Z_RCk_0(w_N-1)_re = Rk_0 / {[1+(w_N-1*tao_0)**2]*|Z(w_N-1)|}
...
col k(M): Z_RCk_k(w0)_re = Rk_k / {[1+(w0*taok)**2]*|Z(w0)|},
Z_RCk_k(w1)_re = Rk_k / {[1+(w1*taok)**2]*|Z(w1)|}
Z_RCk_k(w2)_re = Rk_k / {[1+(w2*taok)**2]*|Z(w2)|},
...,
Z_RCk_k(w_N-1)_re = Rk_k / {[1+(w_N-1*tao_k)**2]*|Z(w_N-1)|}
col -2(C): 如果加capacity,它对阻抗实部的贡献为0
0, 0, 0, ..., 0
col -1(L): L对阻抗实部的贡献为0
0, 0, 0, ..., 0
Aim
col 0: Rs(wi)_im = 0, 0,0,0,...,0,0
col 1: Z_RCk_0(w0)_im = (-1 * w0 * Rk_0 * tao0) / {[1+(w0*tao0)**2]*|Z(w0)|},
Z_RCk_0(w1)_im = (-1 * w1 * Rk_0 * tao0) / {[1+(w1*tao0)**2]*|Z(w1)|},
Z_RCk_0(w2)_im = (-1 * w2 * Rk_0 * tao0) / {[1+(w2*tao0)**2]*|Z(w2)|},
...,
Z_RCk_0(w_N-1)_im = (-1 * w_N-1 * Rk_0 * tao0) / {[1+(w_N-1*tao0)**2]*|Z(w0_N-1)|},
...
col k(M):
col -2(C):
col -1(L):
:return:
"""
Are = np.zeros(shape=(self.w_arr.size, self.M + 2))
Aim = np.zeros(shape=(self.w_arr.size, self.M + 2))
if self.add_C:
Are = np.zeros(shape=(self.w_arr.size, self.M + 3))
Aim = np.zeros(shape=(self.w_arr.size, self.M + 3))
# Rs col
Are[:,0] = 1 / np.abs(self.z_arr)
# Aim[:,0] = np.zeros(shape=(self.w_arr.size)) 本来就是0
# RC_1~M col
for i in range(self.M):
Are[:, i+1] = RC(para_arr=np.array([1, self.tao_arr[i]]), w_arr=self.w_arr).real / np.abs(self.z_arr)
Aim[:, i+1] = RC(para_arr=np.array([1, self.tao_arr[i]]), w_arr=self.w_arr).imag / np.abs(self.z_arr)
if self.add_C:
# Are[:, -2] = np.zeros(shape=(self.w_arr.size)) 本来就是0
Aim[:, -2] = -1 / (self.w_arr * np.abs(self.z_arr))
Aim[:, -1] = self.w_arr / np.abs(self.z_arr)
if self.fit_type == 'real':
self.para_arr = np.linalg.pinv(Are).dot(self.z_arr.real / np.abs(self.z_arr))
XLim = np.zeros(shape=(self.w_arr.size, 2))
# 根据paper0-Lin-KK-Eq10 再构造一组方程 求C和L, X= 1/C
# data for L-col
# Aim[:, -1] = self.w_arr / np.abs(self.z_arr)
XLim[:, -1] = self.w_arr / np.abs(self.z_arr)
# data for C-col
if self.add_C:
XLim[:, -2] = -1 / self.w_arr / np.abs(self.z_arr)
# Aim[:, -2] = -1 / self.w_arr / np.abs(self.z_arr)
"""
self.para_arr[-2] = 一个很小的正数 如1e-18 的原因:
在fit_type == 'real'时, self.para_arr = np.linalg.pinv(Are).dot(self.z_arr.real / np.abs(self.z_arr))
得到的 para_arr【-2:】 = 【X,L】 == 【0, 0】,由于下方代码马上需要计算 拟合参数所得的阻抗,计算Cs的阻抗时,
Cs=1/X,因X=0,Cs-》Inf,所有要给X一个必要的、很小的正数,来防止计算上溢
"""
# self.para_arr[-2] = 1e-20
# self.simulate_Z()
# tmp_para_arr = np.linalg.pinv(Aim).dot((self.z_arr.imag - self.z_sim_arr.imag) / np.abs(self.z_arr))
z_vogit_arr = self.simulate_vogit()
XL = np.linalg.pinv(Aim).dot((self.z_arr.imag - z_vogit_arr.imag) / np.abs(self.z_arr))
# self.para_arr[-1] = tmp_para_arr[-1]
self.para_arr[-1] = XL[-1]
if self.add_C:
# self.para_arr[-2] = tmp_para_arr[-2]
self.para_arr[-2] = XL[-2]
elif self.fit_type == 'imag':
self.para_arr = np.linalg.pinv(Aim).dot(self.z_arr.imag / np.abs(self.z_arr))
"""
根据 paper1-lin-KK-Eq7 计算 Rs
Eq7中方括号里的叠加 == Vogit中M个RC的阻抗对于实部的贡献
"""
self.simulate_Z()
weight_arr = 1 / (np.abs(self.z_arr) ** 2)
# paper1-Eq 7
# ValueError: setting an array element with a sequence.
Rs = np.sum(weight_arr * (self.z_arr.real - self.z_sim_arr.real)) / np.sum(weight_arr)
self.para_arr[0] = Rs
elif self.fit_type == 'complex':
A_inv = np.linalg.inv(Are.T.dot(Are) + Aim.T.dot(Aim))
Y = Are.T.dot(self.z_arr.real / np.abs(self.z_arr)) + Aim.T.dot(self.z_arr.imag / np.abs(self.z_arr))
self.para_arr = A_inv.dot(Y)
def simulate_vogit(self):
"""
这里的Vogit是纯的 Rs + M * RC
:return:
"""
self.Rs = self.para_arr[0]
self.M_R_arr = self.para_arr[1: self.M+1]
z_vogit_arr = np.empty(shape=(self.M, self.w_arr.size), dtype=complex)
# Z of M RC
for i, R in enumerate(self.M_R_arr):
z_RC_arr = RC(para_arr=np.array([R, self.tao_arr[i]]), w_arr=self.w_arr)
z_vogit_arr[i, :] = z_RC_arr
z_vogit_arr = z_vogit_arr.sum(axis=0)
z_vogit_arr += self.Rs
return z_vogit_arr
def simulate_Z(self):
self.Rs = self.para_arr[0]
self.Ls = self.para_arr[-1]
if self.add_C:
self.M_R_arr = self.para_arr[1: -2]
# X = 1/C
self.Cs = 1 / self.para_arr[-2]
# print('Cs:', self.Cs)
self.z_sim_arr = np.empty(shape=(self.M + 2, self.w_arr.size), dtype=complex)
else:
self.M_R_arr = self.para_arr[1: -1]
self.z_sim_arr = np.empty(shape=(self.M + 1, self.w_arr.size), dtype=complex)
# ---------- 依次按照 M个RC的阻抗 -》 【C的阻抗】 -》 L的阻抗 -》 Rs的阻抗 拼接--------------
# Z of M RC
for i, R in enumerate(self.M_R_arr):
z_RC_arr = RC(para_arr=np.array([R, self.tao_arr[i]]), w_arr=self.w_arr)
self.z_sim_arr[i, :] = z_RC_arr
if self.add_C:
# Z of Cs
self.z_sim_arr[self.M, :] = np.array([ele_C(w, C=self.Cs) for w in self.w_arr])
# Z of Ls
self.z_sim_arr[self.M+1, :] = np.array([ele_L(w, L=self.Ls) for w in self.w_arr])
else:
# Z of Ls
self.z_sim_arr[self.M, :] = np.array([ele_L(w, L=self.Ls) for w in self.w_arr])
self.z_sim_arr = self.z_sim_arr.sum(axis=0)
# Z of Rs
self.z_sim_arr += self.Rs
# ---------- 依次按照 M个RC的阻抗 -》 C的阻抗 -》 L的阻抗 -》 Rs的阻抗 拼接--------------
def cal_residual(self):
"""
按照paper0-Eq 15 and Eq 16
residual_arr = Z_arr - Z_sim_arr
:return:
"""
self.simulate_Z()
z_abs_arr = np.abs(self.z_arr)
self.residual_arr = (self.z_arr - self.z_sim_arr) / z_abs_arr
def residual_statistic(self, type):
"""
我定义衡量残差的几种定量标准;
1 残差的绝对值
实部残差的绝对值
虚部残差的绝对值
2 残差的 平方
实部残差的 平方
虚部残差的 平方
:param
type: str
'abs'
'square'
"""
self.cal_residual()
if type == 'abs':
residual_real_abs_arr = np.abs(self.residual_arr.real)
residual_imag_abd_arr = np.abs(self.residual_arr.imag)
return residual_real_abs_arr, residual_imag_abd_arr
elif type == 'square':
residual_real_square_arr = self.residual_arr.real ** 2
residual_imag_square_arr = self.residual_arr.imag ** 2
return residual_real_square_arr, residual_imag_square_arr
def cal_chiSquare(self, weight_type='modulus'):
"""
这里不能按照ZSimpWin的方式计算,因ZSimpWin的方式计算 涉及到 ECM中参数的数量,删除点前后的ECM可能不一样,没法计算
故只能按照 chiSquare = weight * [▲Re**2 + ▲Im**2]
:return:
"""
self.simulate_Z()
if weight_type == 'modulus':
self.chi_square = cal_ChiSquare_0(z_arr=self.z_arr, z_sim_arr=self.z_sim_arr, weight_type=weight_type)
return self.chi_square
def save2pkl(self, fp, fn):
pickle_file(obj=self, fn=fn, fp=fp)
# ---------------------------------- Test Vogit_3 on Lin-KK-Ex1_LIB_time_invariant ----------------------------------
# 1- load data
# fit_type = 'real'
# fit_type = 'imag'
# fit_type = 'complex'
# lib_res_fp = '../plugins_test/jupyter_code/rbp_files/2/example_data_sets/LIB_res'
# if fit_type == 'complex':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_res.npz'))
# elif fit_type == 'real':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_real_addC_res.npz'))
# elif fit_type == 'imag':
# ex1_data_dict = np.load(os.path.join(lib_res_fp, 'Ex1_LIB_time_invariant_imag_addC_res.npz'))
# ex1_z_arr = ex1_data_dict['z_arr']
# ex1_f_arr = ex1_data_dict['fre']
# ex1_z_MS_sim_arr = ex1_data_dict['z_sim']
# ex1_real_residual_arr = ex1_data_dict['real_residual']
# ex1_imag_residual_arr = ex1_data_dict['imag_residual']
# ex1_IS = IS_0()
# ex1_IS.raw_z_arr = ex1_z_arr
# ex1_IS.exp_area = 1.0
# ex1_IS.z_arr = ex1_z_arr
# ex1_IS.fre_arr = ex1_f_arr
# ex1_IS.w_arr = ex1_IS.fre_arr * 2 * math.pi
# --------------- real Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, fit_type=fit_type, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-real-Fit','Mine-real-Fit'])
# --------------- real Fit ---------------
# --------------- imag Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, fit_type=fit_type, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-imag-Fit','Mine-imag-Fit'])
# --------------- imag Fit ---------------
# --------------- Complex Fit ---------------
# ex1_vogit = Vogit_3(impSpe=ex1_IS, add_C=True)
# ex1_vogit.lin_KK()
# # compare nyquist plots of MS-Lin-KK and Mine
# ex1_z_MS_sim_list = ex1_z_MS_sim_arr.tolist()
# ex1_vogit.simulate_Z()
# z_pack_list = [ex1_z_arr.tolist(), ex1_z_MS_sim_list, ex1_vogit.z_sim_arr.tolist()]
# nyquist_multiPlots_1(z_pack_list=z_pack_list, x_lim=[0.015, 0.045], y_lim=[0, 0.02], plot_label_list=['Ideal IS', 'MS-Fit','Mine-Fit'])
# --------------- Complex Fit ---------------
# ---------------------------------- Test Vogit_1 on Lin-KK-Ex1_LIB_time_invariant ---------------------------------- | 1,881 | 0 | 112 |
8ce4728ba2ad7bcb6e60aff77117e3d48d808d4a | 406 | py | Python | Python3/FileIO/write.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | Python3/FileIO/write.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | Python3/FileIO/write.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | with open("haiku.txt", "w") as file:
file.write("Writing files is great\n")
file.write("Here's another line of text\n")
file.write("Closing now, goodbye!")
with open("haiku.txt", "w") as file:
file.write("Here's one more haiku\n")
file.write("What about the older one?\n")
file.write("Let's go check it out")
with open("lol.txt", "w") as file:
file.write("lol" * 1000) | 33.833333 | 47 | 0.62069 | with open("haiku.txt", "w") as file:
file.write("Writing files is great\n")
file.write("Here's another line of text\n")
file.write("Closing now, goodbye!")
with open("haiku.txt", "w") as file:
file.write("Here's one more haiku\n")
file.write("What about the older one?\n")
file.write("Let's go check it out")
with open("lol.txt", "w") as file:
file.write("lol" * 1000) | 0 | 0 | 0 |
4a3e2b0ffbf9e7280df0c8a13b538c297ebb0165 | 7,562 | py | Python | packnet_sfm/utils/image.py | bingai/packnet-sfm-nrs | 2e9fb8850b4e1ae2227e30bff580997fb5377802 | [
"MIT"
] | null | null | null | packnet_sfm/utils/image.py | bingai/packnet-sfm-nrs | 2e9fb8850b4e1ae2227e30bff580997fb5377802 | [
"MIT"
] | null | null | null | packnet_sfm/utils/image.py | bingai/packnet-sfm-nrs | 2e9fb8850b4e1ae2227e30bff580997fb5377802 | [
"MIT"
] | null | null | null | # Copyright 2020 Toyota Research Institute. All rights reserved.
import cv2
import torch
import torch.nn.functional as funct
from functools import lru_cache
from PIL import Image
from packnet_sfm.utils.misc import same_shape
def load_image(path):
"""
Read an image using PIL
Parameters
----------
path : str
Path to the image
Returns
-------
image : PIL.Image
Loaded image
"""
# print("----------", path)
return Image.open(path)
def write_image(filename, image):
"""
Write an image to file.
Parameters
----------
filename : str
File where image will be saved
image : np.array [H,W,3]
RGB image
"""
cv2.imwrite(filename, image[:, :, ::-1])
def flip_lr(image):
"""
Flip image horizontally
Parameters
----------
image : torch.Tensor [B,3,H,W]
Image to be flipped
Returns
-------
image_flipped : torch.Tensor [B,3,H,W]
Flipped image
"""
assert image.dim() == 4, 'You need to provide a [B,C,H,W] image to flip'
return torch.flip(image, [3])
def flip_model(model, image, flip):
"""
Flip input image and flip output inverse depth map
Parameters
----------
model : nn.Module
Module to be used
image : torch.Tensor [B,3,H,W]
Input image
flip : bool
True if the flip is happening
Returns
-------
inv_depths : list of torch.Tensor [B,1,H,W]
List of predicted inverse depth maps
"""
if flip:
return [flip_lr(inv_depth) for inv_depth in model(flip_lr(image))]
else:
return model(image)
########################################################################################################################
def gradient_x(image):
"""
Calculates the gradient of an image in the x dimension
Parameters
----------
image : torch.Tensor [B,3,H,W]
Input image
Returns
-------
gradient_x : torch.Tensor [B,3,H,W-1]
Gradient of image with respect to x
"""
return image[:, :, :, :-1] - image[:, :, :, 1:]
def gradient_y(image):
"""
Calculates the gradient of an image in the y dimension
Parameters
----------
image : torch.Tensor [B,3,H,W]
Input image
Returns
-------
gradient_y : torch.Tensor [B,3,H-1,W]
Gradient of image with respect to y
"""
return image[:, :, :-1, :] - image[:, :, 1:, :]
########################################################################################################################
def interpolate_image(image, shape, mode='bilinear', align_corners=True):
"""
Interpolate an image to a different resolution
Parameters
----------
image : torch.Tensor [B,?,h,w]
Image to be interpolated
shape : tuple (H, W)
Output shape
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
image : torch.Tensor [B,?,H,W]
Interpolated image
"""
# Take last two dimensions as shape
if len(shape) > 2:
shape = shape[-2:]
# If the shapes are the same, do nothing
if same_shape(image.shape[-2:], shape):
return image
else:
# Interpolate image to match the shape
return funct.interpolate(image, size=shape, mode=mode,
align_corners=align_corners)
def interpolate_scales(images, shape=None, mode='bilinear', align_corners=False):
"""
Interpolate list of images to the same shape
Parameters
----------
images : list of torch.Tensor [B,?,?,?]
Images to be interpolated, with different resolutions
shape : tuple (H, W)
Output shape
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
images : list of torch.Tensor [B,?,H,W]
Interpolated images, with the same resolution
"""
# If no shape is provided, interpolate to highest resolution
if shape is None:
shape = images[0].shape
# Take last two dimensions as shape
if len(shape) > 2:
shape = shape[-2:]
# Interpolate all images
return [funct.interpolate(image, shape, mode=mode,
align_corners=align_corners) for image in images]
def match_scales(image, targets, num_scales,
mode='bilinear', align_corners=True):
"""
Interpolate one image to produce a list of images with the same shape as targets
Parameters
----------
image : torch.Tensor [B,?,h,w]
Input image
targets : list of torch.Tensor [B,?,?,?]
Tensors with the target resolutions
num_scales : int
Number of considered scales
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
images : list of torch.Tensor [B,?,?,?]
List of images with the same resolutions as targets
"""
# For all scales
images = []
image_shape = image.shape[-2:]
for i in range(num_scales):
target_shape = targets[i].shape
# If image shape is equal to target shape
if same_shape(image_shape, target_shape):
images.append(image)
else:
# Otherwise, interpolate
images.append(interpolate_image(
image, target_shape, mode=mode, align_corners=align_corners))
# Return scaled images
return images
########################################################################################################################
@lru_cache(maxsize=None)
def meshgrid(B, H, W, dtype, device, normalized=False):
"""
Create meshgrid with a specific resolution
Parameters
----------
B : int
Batch size
H : int
Height size
W : int
Width size
dtype : torch.dtype
Meshgrid type
device : torch.device
Meshgrid device
normalized : bool
True if grid is normalized between -1 and 1
Returns
-------
xs : torch.Tensor [B,1,W]
Meshgrid in dimension x
ys : torch.Tensor [B,H,1]
Meshgrid in dimension y
"""
if normalized:
xs = torch.linspace(-1, 1, W, device=device, dtype=dtype)
ys = torch.linspace(-1, 1, H, device=device, dtype=dtype)
else:
xs = torch.linspace(0, W-1, W, device=device, dtype=dtype)
ys = torch.linspace(0, H-1, H, device=device, dtype=dtype)
ys, xs = torch.meshgrid([ys, xs])
return xs.repeat([B, 1, 1]), ys.repeat([B, 1, 1])
@lru_cache(maxsize=None)
def image_grid(B, H, W, dtype, device, normalized=False):
"""
Create an image grid with a specific resolution
Parameters
----------
B : int
Batch size
H : int
Height size
W : int
Width size
dtype : torch.dtype
Meshgrid type
device : torch.device
Meshgrid device
normalized : bool
True if grid is normalized between -1 and 1
Returns
-------
grid : torch.Tensor [B,3,H,W]
Image grid containing a meshgrid in x, y and 1
"""
xs, ys = meshgrid(B, H, W, dtype, device, normalized=normalized)
ones = torch.ones_like(xs)
grid = torch.stack([xs, ys, ones], dim=1)
return grid
########################################################################################################################
| 26.440559 | 120 | 0.549855 | # Copyright 2020 Toyota Research Institute. All rights reserved.
import cv2
import torch
import torch.nn.functional as funct
from functools import lru_cache
from PIL import Image
from packnet_sfm.utils.misc import same_shape
def load_image(path):
"""
Read an image using PIL
Parameters
----------
path : str
Path to the image
Returns
-------
image : PIL.Image
Loaded image
"""
# print("----------", path)
return Image.open(path)
def write_image(filename, image):
"""
Write an image to file.
Parameters
----------
filename : str
File where image will be saved
image : np.array [H,W,3]
RGB image
"""
cv2.imwrite(filename, image[:, :, ::-1])
def flip_lr(image):
"""
Flip image horizontally
Parameters
----------
image : torch.Tensor [B,3,H,W]
Image to be flipped
Returns
-------
image_flipped : torch.Tensor [B,3,H,W]
Flipped image
"""
assert image.dim() == 4, 'You need to provide a [B,C,H,W] image to flip'
return torch.flip(image, [3])
def flip_model(model, image, flip):
"""
Flip input image and flip output inverse depth map
Parameters
----------
model : nn.Module
Module to be used
image : torch.Tensor [B,3,H,W]
Input image
flip : bool
True if the flip is happening
Returns
-------
inv_depths : list of torch.Tensor [B,1,H,W]
List of predicted inverse depth maps
"""
if flip:
return [flip_lr(inv_depth) for inv_depth in model(flip_lr(image))]
else:
return model(image)
########################################################################################################################
def gradient_x(image):
"""
Calculates the gradient of an image in the x dimension
Parameters
----------
image : torch.Tensor [B,3,H,W]
Input image
Returns
-------
gradient_x : torch.Tensor [B,3,H,W-1]
Gradient of image with respect to x
"""
return image[:, :, :, :-1] - image[:, :, :, 1:]
def gradient_y(image):
"""
Calculates the gradient of an image in the y dimension
Parameters
----------
image : torch.Tensor [B,3,H,W]
Input image
Returns
-------
gradient_y : torch.Tensor [B,3,H-1,W]
Gradient of image with respect to y
"""
return image[:, :, :-1, :] - image[:, :, 1:, :]
########################################################################################################################
def interpolate_image(image, shape, mode='bilinear', align_corners=True):
"""
Interpolate an image to a different resolution
Parameters
----------
image : torch.Tensor [B,?,h,w]
Image to be interpolated
shape : tuple (H, W)
Output shape
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
image : torch.Tensor [B,?,H,W]
Interpolated image
"""
# Take last two dimensions as shape
if len(shape) > 2:
shape = shape[-2:]
# If the shapes are the same, do nothing
if same_shape(image.shape[-2:], shape):
return image
else:
# Interpolate image to match the shape
return funct.interpolate(image, size=shape, mode=mode,
align_corners=align_corners)
def interpolate_scales(images, shape=None, mode='bilinear', align_corners=False):
"""
Interpolate list of images to the same shape
Parameters
----------
images : list of torch.Tensor [B,?,?,?]
Images to be interpolated, with different resolutions
shape : tuple (H, W)
Output shape
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
images : list of torch.Tensor [B,?,H,W]
Interpolated images, with the same resolution
"""
# If no shape is provided, interpolate to highest resolution
if shape is None:
shape = images[0].shape
# Take last two dimensions as shape
if len(shape) > 2:
shape = shape[-2:]
# Interpolate all images
return [funct.interpolate(image, shape, mode=mode,
align_corners=align_corners) for image in images]
def match_scales(image, targets, num_scales,
mode='bilinear', align_corners=True):
"""
Interpolate one image to produce a list of images with the same shape as targets
Parameters
----------
image : torch.Tensor [B,?,h,w]
Input image
targets : list of torch.Tensor [B,?,?,?]
Tensors with the target resolutions
num_scales : int
Number of considered scales
mode : str
Interpolation mode
align_corners : bool
True if corners will be aligned after interpolation
Returns
-------
images : list of torch.Tensor [B,?,?,?]
List of images with the same resolutions as targets
"""
# For all scales
images = []
image_shape = image.shape[-2:]
for i in range(num_scales):
target_shape = targets[i].shape
# If image shape is equal to target shape
if same_shape(image_shape, target_shape):
images.append(image)
else:
# Otherwise, interpolate
images.append(interpolate_image(
image, target_shape, mode=mode, align_corners=align_corners))
# Return scaled images
return images
########################################################################################################################
@lru_cache(maxsize=None)
def meshgrid(B, H, W, dtype, device, normalized=False):
"""
Create meshgrid with a specific resolution
Parameters
----------
B : int
Batch size
H : int
Height size
W : int
Width size
dtype : torch.dtype
Meshgrid type
device : torch.device
Meshgrid device
normalized : bool
True if grid is normalized between -1 and 1
Returns
-------
xs : torch.Tensor [B,1,W]
Meshgrid in dimension x
ys : torch.Tensor [B,H,1]
Meshgrid in dimension y
"""
if normalized:
xs = torch.linspace(-1, 1, W, device=device, dtype=dtype)
ys = torch.linspace(-1, 1, H, device=device, dtype=dtype)
else:
xs = torch.linspace(0, W-1, W, device=device, dtype=dtype)
ys = torch.linspace(0, H-1, H, device=device, dtype=dtype)
ys, xs = torch.meshgrid([ys, xs])
return xs.repeat([B, 1, 1]), ys.repeat([B, 1, 1])
@lru_cache(maxsize=None)
def image_grid(B, H, W, dtype, device, normalized=False):
"""
Create an image grid with a specific resolution
Parameters
----------
B : int
Batch size
H : int
Height size
W : int
Width size
dtype : torch.dtype
Meshgrid type
device : torch.device
Meshgrid device
normalized : bool
True if grid is normalized between -1 and 1
Returns
-------
grid : torch.Tensor [B,3,H,W]
Image grid containing a meshgrid in x, y and 1
"""
xs, ys = meshgrid(B, H, W, dtype, device, normalized=normalized)
ones = torch.ones_like(xs)
grid = torch.stack([xs, ys, ones], dim=1)
return grid
########################################################################################################################
| 0 | 0 | 0 |
54cd5059fa45484cd8e7fbee1025e19845882aa9 | 901 | py | Python | pokemongo_bot/tree_config_builder.py | Jasperrr91/pokemongo | 67b64870939a9f19e88321dbe2b0ff6174e7397b | [
"MIT"
] | null | null | null | pokemongo_bot/tree_config_builder.py | Jasperrr91/pokemongo | 67b64870939a9f19e88321dbe2b0ff6174e7397b | [
"MIT"
] | null | null | null | pokemongo_bot/tree_config_builder.py | Jasperrr91/pokemongo | 67b64870939a9f19e88321dbe2b0ff6174e7397b | [
"MIT"
] | null | null | null | import cell_workers
| 25.742857 | 85 | 0.604883 | import cell_workers
class ConfigException(Exception):
pass
class TreeConfigBuilder(object):
def __init__(self, bot, tasks_raw):
self.bot = bot
self.tasks_raw = tasks_raw
def _get_worker_by_name(self, name):
try:
worker = getattr(cell_workers, name)
except AttributeError:
raise ConfigException('No worker named {} defined'.format(name))
return worker
def build(self):
workers = []
for task in self.tasks_raw:
task_type = task.get('type', None)
if task_type is None:
raise ConfigException('No type found for given task {}'.format(task))
task_config = task.get('config', {})
worker = self._get_worker_by_name(task_type)
instance = worker(self.bot, task_config)
workers.append(instance)
return workers
| 722 | 32 | 126 |
d31474846e26a56b1a8fe87101c4edc3c8f19c43 | 4,475 | py | Python | aki.py | wolfniey/school-diary-telegram-bot | 7a50a4649c2cd9f8052f47dbf9ef697bcfa151bd | [
"MIT"
] | 2 | 2020-04-27T10:45:31.000Z | 2020-07-28T08:55:55.000Z | aki.py | wolfniey/school-diary-telegram-bot | 7a50a4649c2cd9f8052f47dbf9ef697bcfa151bd | [
"MIT"
] | null | null | null | aki.py | wolfniey/school-diary-telegram-bot | 7a50a4649c2cd9f8052f47dbf9ef697bcfa151bd | [
"MIT"
] | null | null | null | import logging
import accounts
import diary
from datetime import datetime, timedelta
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
COMMON, SCHOOL, LOGIN = range(3)
KREPLY = ['Marks', 'Homework', 'Timetable', 'Choose School']
if __name__ == '__main__':
main() | 29.833333 | 144 | 0.693631 | import logging
import accounts
import diary
from datetime import datetime, timedelta
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
COMMON, SCHOOL, LOGIN = range(3)
KREPLY = ['Marks', 'Homework', 'Timetable', 'Choose School']
def start(update, context):
reply_keyboard = [KREPLY]
uid = update.message.from_user
update.message.reply_text(
'こんにちは! 私はAkiです。'
'''Do I know you? Let's see...''')
if uid in accounts.accounts:
update.message.reply_text('Come back ^_^', reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=False, resize_keyboard=True))
return COMMON
else:
update.message.reply_text('Wait, who are you? Please tell me. Type your login and psw like this: Kuristina 1234')
return LOGIN
def action_common(update, context):
if update.message.text == KREPLY[0]:
return marks(update, context)
if update.message.text == KREPLY[1]:
return homework(update, context)
if update.message.text == KREPLY[2]:
return timetable(update, context)
if update.message.text == KREPLY[3]:
update.message.reply_text('Okay, so now just type the name of your school. Currently I know only 1 school: ',
'Lyceum of Kirovo-Chepetsk' + '\n\n',
'Type /cancel if you want to leave everything as it is now.')
return SCHOOL
def action_login(update, context):
uid = update.message.from_user
msg = update.message.text.split(' ')
if len(msg) == 2:
update.message.reply_text('Got it! Trying to authenticate you, please wait...')
status, user_full_name, data = diary.login_account(msg[0], msg[1], 'http://85.93.46.58:8082/')
if status:
accounts.accounts[uid] = data
update.message.reply_text('Awesome! Now you can ask me anything you want!')
return COMMON
else:
update.message.reply_text('Nah, it doesn\'t work. Please, try again.')
return LOGIN
else:
update.message.reply_text('Wrong format, I need only 2 words: your login and password')
return LOGIN
def marks(update, context):
uid = update.message.from_user
if uid in accounts.accounts:
status, data = diary.get_student_journal(accounts.accounts[uid], 'http://85.93.46.58:8082/')
if status:
update.message.reply_text('Here you go:\n' + data)
else:
update.message.reply_text('Whoops, something went wrong, try to log in again, I suppose.')
return COMMON
else:
update.message.reply_text('Something went wrong, I can\'t recognise you :( Please, log in again.')
return LOGIN
def homework(update, context):
uid = update.message.from_user
if uid in accounts.accounts:
now = datetime.today()
date = now
day_count = 7
status, data = diary.get_student_homework(accounts.accounts[uid], day_count, date, 'http://85.93.46.58:8082/')
if status:
update.message.reply_text('Here you go:\n' + data)
else:
update.message.reply_text('Whoops, something went wrong, try to log in again, I suppose')
return COMMON
else:
update.message.reply_text('Something went wrong, I can\'t recognise you :( Please, log in again.')
return LOGIN
def timetable(update, context):
print('c')
def action_school(update, context):
print('d')
def cancel(update, context):
user = update.message.from_user
update.message.reply_text('Bye!',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def back(update, context):
return COMMON
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
TOKEN = open("token.txt", "r").read()
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
COMMON: [MessageHandler(Filters.text, action_common)],
SCHOOL: [MessageHandler(Filters.text, action_school),
CommandHandler('cancel', back)],
LOGIN: [MessageHandler(Filters.text, action_login)]
},
fallbacks=[CommandHandler('bye', cancel)]
)
dp.add_handler(conv_handler)
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() | 3,661 | 0 | 253 |
c01b111751bee63eeaa564788b3609ddc25c43bc | 819 | py | Python | 1_small-problems/1.1_the fibonacci sequence/fib4.py | bimri/Classic-Computer-Science-Problems-in-Python | d91a1120ef93a5f1b213fcbb7f8e1669c298a826 | [
"MIT"
] | null | null | null | 1_small-problems/1.1_the fibonacci sequence/fib4.py | bimri/Classic-Computer-Science-Problems-in-Python | d91a1120ef93a5f1b213fcbb7f8e1669c298a826 | [
"MIT"
] | null | null | null | 1_small-problems/1.1_the fibonacci sequence/fib4.py | bimri/Classic-Computer-Science-Problems-in-Python | d91a1120ef93a5f1b213fcbb7f8e1669c298a826 | [
"MIT"
] | null | null | null | "Automatic memoization"
'''
fib3() can be further simplified. Python has a built-in decorator for memoizing any
function automagically. In fib4(), the decorator @functools.lru_cache() is used
with the same exact code as we used in fib2(). Each time fib4() is executed with a
novel argument, the decorator causes the return value to be cached. Upon future calls
of fib4() with the same argument, the previous return value of fib4() for that argument
is retrieved from the cache and returned.
'''
from functools import lru_cache
@lru_cache(maxsize=None)
if __name__ == '__main__':
print(fib4(5))
print(fib4(50))
| 32.76 | 87 | 0.672772 | "Automatic memoization"
'''
fib3() can be further simplified. Python has a built-in decorator for memoizing any
function automagically. In fib4(), the decorator @functools.lru_cache() is used
with the same exact code as we used in fib2(). Each time fib4() is executed with a
novel argument, the decorator causes the return value to be cached. Upon future calls
of fib4() with the same argument, the previous return value of fib4() for that argument
is retrieved from the cache and returned.
'''
from functools import lru_cache
@lru_cache(maxsize=None)
def fib4(n) -> int: # same definition as fib2()
if n < 2: # base case
return n
return fib4(n-1) + fib4(n-2) # recursive case
if __name__ == '__main__':
print(fib4(5))
print(fib4(50))
| 172 | 0 | 22 |
869e1a8a1acc2e79662297051e47a12a2bfb89e6 | 1,133 | py | Python | patients/urls.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | 1 | 2021-11-29T15:24:41.000Z | 2021-11-29T15:24:41.000Z | patients/urls.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | 46 | 2021-11-29T16:05:55.000Z | 2022-03-01T13:04:45.000Z | patients/urls.py | Curewell-Homeo-Clinic/admin-system | c8ce56a2bdbccfe1e6bec09068932f1943498b9f | [
"MIT"
] | null | null | null | from django.urls import path, include
from patients.views.dashboard import dashboard
from patients.views.patient import patient_detail, patient_list
from patients.views.doctor import doctor_detail, doctor_list
from patients.views.appointment import appointment_detail, appointment_list
from patients.views.invoice import invoice_detail, invoice_list, invoice_print
from patients.views.stats import stats
urlpatterns = [
path('', dashboard, name='index'),
path('patients/', patient_list, name='patients'),
path('patient/<int:pk>/', patient_detail, name='patient'),
path('appointments/', appointment_list, name="appointments"),
path('appointment/<int:pk>/', appointment_detail, name="appointment"),
path('doctors/', doctor_list, name='doctors'),
path('doctor/<int:pk>/', doctor_detail, name='doctor'),
path('invoices/', invoice_list, name='invoices'),
path('invoice/<int:pk>/', invoice_detail, name='invoice'),
path('invoice/<int:pk>/print/', invoice_print, name='invoice_print'),
path('stats/', stats, name='stats'),
]
urlpatterns += [
path('api/v1/', include('patients.api.urls')),
] | 45.32 | 78 | 0.729038 | from django.urls import path, include
from patients.views.dashboard import dashboard
from patients.views.patient import patient_detail, patient_list
from patients.views.doctor import doctor_detail, doctor_list
from patients.views.appointment import appointment_detail, appointment_list
from patients.views.invoice import invoice_detail, invoice_list, invoice_print
from patients.views.stats import stats
urlpatterns = [
path('', dashboard, name='index'),
path('patients/', patient_list, name='patients'),
path('patient/<int:pk>/', patient_detail, name='patient'),
path('appointments/', appointment_list, name="appointments"),
path('appointment/<int:pk>/', appointment_detail, name="appointment"),
path('doctors/', doctor_list, name='doctors'),
path('doctor/<int:pk>/', doctor_detail, name='doctor'),
path('invoices/', invoice_list, name='invoices'),
path('invoice/<int:pk>/', invoice_detail, name='invoice'),
path('invoice/<int:pk>/print/', invoice_print, name='invoice_print'),
path('stats/', stats, name='stats'),
]
urlpatterns += [
path('api/v1/', include('patients.api.urls')),
] | 0 | 0 | 0 |
fef9c9b2ba53d6862218a750158a4731be0bac4e | 9,763 | py | Python | main.py | arame/SLAM | 7841dce5e6da641e676a1e4f0f2667300e2f8a15 | [
"OML"
] | null | null | null | main.py | arame/SLAM | 7841dce5e6da641e676a1e4f0f2667300e2f8a15 | [
"OML"
] | null | null | null | main.py | arame/SLAM | 7841dce5e6da641e676a1e4f0f2667300e2f8a15 | [
"OML"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
from pandas import DataFrame
import seaborn as sns
from robot_class import Robot
from helpers import display_world, make_data
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
side_len = N + num_landmarks
omega = [[[[1, 0], [0, 1]] if x==0 and y==0 else [[0, 0], [0, 0]] for x in range(side_len)] for y in range(side_len)]
xi = [[int(world_size / 2) if y==0 else 0 for x in range(2)] for y in range(side_len)]
return omega, xi
if __name__ == "__main__":
main()
| 37.263359 | 121 | 0.626754 | import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
from pandas import DataFrame
import seaborn as sns
from robot_class import Robot
from helpers import display_world, make_data
def main():
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
# print out some stats about the data
time_step = 0
print('Example measurements: \n', data[time_step][0])
print('\n')
print('Example motion: \n', data[time_step][1])
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
plt.rcParams["figure.figsize"] = (10,7)
# display omega (need to convert omega to a 2x2 matrix for the heatmap to show)
display_omega = reformat_omega(initial_omega)
sns.heatmap(display_omega, cmap='Blues', annot=True, linewidths=.5)
#plt.show()
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)
#plt.show()
## TODO: Complete the code to implement SLAM
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N, num_landmarks)
print_all(poses, landmarks)
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
print("*** THE END ***")
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
omega, xi = initialize_constraints(N, num_landmarks, world_size)
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
i_robot = -1
measurement_noise_val = 1.0 / measurement_noise
motion_noise_val = 1.0/motion_noise
for (measurement, motion) in data:
i_robot += 1
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
for i in range(len(measurement)):
i_landmark = N + measurement[i][0]
for j in range(2):
omega[i_landmark][i_landmark][j][j] += measurement_noise_val
omega[i_robot][i_robot][j][j] += measurement_noise_val
omega[i_robot][i_landmark][j][j] -= measurement_noise_val
omega[i_landmark][i_robot][j][j] -= measurement_noise_val
measurement_val = measurement[i][j + 1]/measurement_noise
xi[i_landmark][j] += measurement_val
xi[i_robot][j] -= measurement_val
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
for j in range(2):
omega[i_robot][i_robot][j][j] += motion_noise_val
omega[i_robot + 1][i_robot + 1][j][j] += motion_noise_val
omega[i_robot][i_robot+1][j][j] -= motion_noise_val
omega[i_robot+1][i_robot][j][j] -= motion_noise_val
xi[i_robot][j] -= motion[j]/motion_noise
xi[i_robot+1][j] += motion[j]/motion_noise
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
reformt_omega = reformat_omega(omega)
reformt_xi = reformat_xi(xi)
#display_omega_xi(reformt_omega, reformt_xi)
print("omega = ", reformt_omega)
print("=================================================================")
print("xi = ", reformt_xi)
print("=================================================================")
mu = inv(reformt_omega) @ reformt_xi
return mu # return `mu`
def display_omega_xi(omega, xi):
# define figure size
plt.clf()
plt.rcParams["figure.figsize"] = (30,24)
# display omega
sns.heatmap(DataFrame(omega), cmap='Blues', annot=True, linewidths=.5)
plt.show()
plt.rcParams["figure.figsize"] = (1,17)
# display xi
sns.heatmap(DataFrame(xi), cmap='Oranges', annot=True, linewidths=.5)
plt.show()
def reformat_omega(omega):
reformat_omega = []
for i in range(len(omega)):
for k in range(2):
row = []
for j in range(len(omega)):
item = omega[i][j]
for l in range(2):
row.append(item[k][l])
reformat_omega.append(row)
return reformat_omega
def reformat_xi(xi):
reformat_xi = []
for i in range(len(xi)):
for j in range(2):
reformat_xi.append(xi[i][j])
matrix_xi = np.array(reformat_xi).T
return matrix_xi
def get_poses_landmarks(mu, N, num_landmarks):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
side_len = N + num_landmarks
omega = [[[[1, 0], [0, 1]] if x==0 and y==0 else [[0, 0], [0, 0]] for x in range(side_len)] for y in range(side_len)]
xi = [[int(world_size / 2) if y==0 else 0 for x in range(2)] for y in range(side_len)]
return omega, xi
def main1():
print("started")
print("-------")
world_size = 10.0 # size of world (square)
measurement_range = 5.0 # range at which we can sense landmarks
motion_noise = 0.2 # noise in robot motion
measurement_noise = 0.2 # noise in the measurements
# instantiate a robot, r
r = Robot(world_size, measurement_range, motion_noise, measurement_noise)
# print out the location of r
print(r)
# define figure size
plt.rcParams["figure.figsize"] = (5,5)
# call display_world and display the robot in it's grid world
print(r)
display_world(int(world_size), [r.x, r.y])
# choose values of dx and dy (negative works, too)
dx = 1
dy = 2
r.move(dx, dy)
# print out the exact location
print(r)
# display the world after movement, not that this is the same call as before
# the robot tracks its own movement
display_world(int(world_size), [r.x, r.y])
# create any number of landmarks
num_landmarks = 3
r.make_landmarks(num_landmarks)
# print out our robot's exact location
print(r)
# display the world including these landmarks
display_world(int(world_size), [r.x, r.y], r.landmarks)
# print the locations of the landmarks
print('Landmark locations [x,y]: ', r.landmarks)
# try to sense any surrounding landmarks
measurements = r.sense()
# this will print out an empty list if `sense` has not been implemented
print(measurements)
data = []
# after a robot first senses, then moves (one time step)
# that data is appended like so:
data.append([measurements, [dx, dy]])
# for our example movement and measurement
print(data)
# in this example, we have only created one time step (0)
time_step = 0
# so you can access robot measurements:
print('Measurements: ', data[time_step][0])
# and its motion for a given time step:
print('Motion: ', data[time_step][1])
if __name__ == "__main__":
main()
| 8,461 | 0 | 183 |
eebde987156255ed4144196616f7930d77d68959 | 2,264 | py | Python | src/models/basic_linear_model.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | null | null | null | src/models/basic_linear_model.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | 4 | 2020-11-13T18:37:11.000Z | 2022-02-10T01:24:26.000Z | src/models/basic_linear_model.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.layers import Convolution2D, Convolution3D
from tensorflow.python.keras.layers import MaxPooling2D, MaxPooling3D
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.layers import Dropout, Flatten, Dense
from tensorflow.python.keras.layers import Cropping2D, Cropping3D
| 36.516129 | 91 | 0.693905 | import tensorflow as tf
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.layers import Convolution2D, Convolution3D
from tensorflow.python.keras.layers import MaxPooling2D, MaxPooling3D
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.layers import Dropout, Flatten, Dense
from tensorflow.python.keras.layers import Cropping2D, Cropping3D
def create_model(
image_width,
image_height,
image_channels,
crop_margin_from_top=80,
weight_loss_angle=0.8,
weight_loss_throttle=0.2,
):
tf.keras.backend.clear_session()
img_in = Input(shape=(image_height, image_width, image_channels), name="img_in")
x = img_in
x = Cropping2D(((crop_margin_from_top, 0), (0, 0)))(x)
# Define convolutional neural network to extract features from the images
x = Convolution2D(filters=24, kernel_size=(5, 5), strides=(2, 2), activation="relu")(x)
x = Convolution2D(filters=32, kernel_size=(5, 5), strides=(2, 2), activation="relu")(x)
x = Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation="relu")(x)
x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation="relu")(x)
x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu")(x)
# Define decision layers to predict steering and throttle
x = Flatten(name="flattened")(x)
x = Dense(units=100, activation="linear")(x)
x = Dropout(rate=0.5)(x)
x = Dense(units=50, activation="linear")(x)
x = Dropout(rate=0.5)(x)
# categorical output of the angle
angle_out = Dense(units=1, activation="linear", name="angle_out")(x)
# continous output of throttle
throttle_out = Dense(units=1, activation="linear", name="throttle_out")(x)
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.summary()
model.compile(
optimizer="adam",
loss={"angle_out": "mean_squared_error", "throttle_out": "mean_squared_error"},
loss_weights={
"angle_out": weight_loss_angle,
"throttle_out": weight_loss_throttle,
},
metrics=["mse", "mae", "mape"],
)
return model
| 1,777 | 0 | 23 |
bb0297f41becea0709adc150426ee1cd3b03c974 | 858 | py | Python | projects/vault-of-scripts/YT-Playlist-Downloader/ytpldl.py | anouaraissani/H4ckT0b3rF3st-2k20 | 1f77652add0effdc7462c829dfb88d5f6818d07e | [
"MIT"
] | 1 | 2020-10-12T16:23:55.000Z | 2020-10-12T16:23:55.000Z | projects/vault-of-scripts/YT-Playlist-Downloader/ytpldl.py | anouaraissani/H4ckT0b3rF3st-2k20 | 1f77652add0effdc7462c829dfb88d5f6818d07e | [
"MIT"
] | 1 | 2020-10-11T17:06:48.000Z | 2020-10-11T17:06:48.000Z | projects/vault-of-scripts/YT-Playlist-Downloader/ytpldl.py | anouaraissani/H4ckT0b3rF3st-2k20 | 1f77652add0effdc7462c829dfb88d5f6818d07e | [
"MIT"
] | null | null | null | from pytube import YouTube # pip install pytube or pytube3
from pytube import Playlist
import os, re
if __name__ == '__main__':
playlist = Playlist("https://www.youtube.com/playlist?list=PL8A83A276F0D85E70")
main(1, playlist)
| 27.677419 | 83 | 0.622378 | from pytube import YouTube # pip install pytube or pytube3
from pytube import Playlist
import os, re
def Download(yt):
print("Downloading....")
# Filter Streams (Optional)
vids = yt.streams.filter()
# Get only .mp4 format
vids[0].download(r"Tracks/")
def main(c, playlist):
# Filter Playlist Url
playlist._video_regex = re.compile(r"\"url\":\"(/watch\?v=[\w-]*)")
# Iterate Through Playlist
urls = playlist.video_urls
print("Number of tracks: ", len(urls))
for url in urls:
# Handle Url
yt = YouTube(url)
# Filename specification
_filename = yt.title
print(c, ". ", _filename)
# Downloading
Download(yt)
c = c + 1
if __name__ == '__main__':
playlist = Playlist("https://www.youtube.com/playlist?list=PL8A83A276F0D85E70")
main(1, playlist)
| 577 | 0 | 46 |
43a7e7000b9dabe6e0dbe1d58235a074bdf1c40f | 5,593 | py | Python | pybgp/test/test_pathattr.py | toddjcrane/pybgp | 5fa7699675c120b98c5b6fc637bfc29ba5a665f4 | [
"MIT"
] | 5 | 2015-06-14T02:51:23.000Z | 2019-01-05T15:54:22.000Z | pybgp/test/test_pathattr.py | toddjcrane/pybgp | 5fa7699675c120b98c5b6fc637bfc29ba5a665f4 | [
"MIT"
] | null | null | null | pybgp/test/test_pathattr.py | toddjcrane/pybgp | 5fa7699675c120b98c5b6fc637bfc29ba5a665f4 | [
"MIT"
] | 4 | 2016-11-26T01:43:10.000Z | 2021-08-13T16:08:27.000Z | #!/usr/bin/python
import socket
import unittest
from pybgp import pathattr, nlri
| 27.551724 | 190 | 0.550331 | #!/usr/bin/python
import socket
import unittest
from pybgp import pathattr, nlri
class TestOrigin(unittest.TestCase):
def test_encode(self):
orig = pathattr.Origin('igp')
b = orig.encode()
self.assertEqual(b, '\x40\x01\x01\x00')
def test_decode(self):
b = '\x40\x01\x01\x02'
used, orig = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(orig, pathattr.Origin))
self.assertEqual(orig.value, 'incomplete')
class TestAsPath(unittest.TestCase):
def sample(self):
shouldb = '\x40\x02' # as path
payload = '\x02\x02' # as path
payload += '\xff\xff' # 65535
payload += '\xff\xfe' # 65534
payload += '\x01\x02' # as set
payload += '\xde\xad' # 57005
payload += '\xbe\xef' # 48879
shouldb += chr(len(payload))
shouldb += payload
return shouldb
def test_encode(self):
aspath = pathattr.AsPath([
[65535,65534],
set([57005, 48879]),
])
b = aspath.encode()
self.assertEqual(b, self.sample())
def test_decode(self):
b = self.sample()
used, aspath = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(aspath, pathattr.AsPath))
self.assertEqual(aspath.value, [
[65535,65534],
set([57005,48879]),
])
class TestMed(unittest.TestCase):
def test_encode(self):
med = pathattr.Med(32)
b = med.encode()
self.assertEqual(b, '\x80\x04\x04\x00\x00\x00 ')
def test_decode(self):
b = '\x80\x04\x04\x00\x00\x00 '
used, med = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(med, pathattr.Med))
self.assertEqual(med.value, 32)
class TestExtCommunity(unittest.TestCase):
def test_encode(self):
ext = pathattr.ExtCommunity()
ext.value.append(
'RT:192.168.0.0:1'
)
b = ext.encode()
self.assertEqual(b, '\x00\x10\x08\x01\x02\xc0\xa8\x00\x00\x00\x01')
def test_decode(self):
b = '\x00\x10\x08\x01\x02\xc0\xa8\x00\x00\x00\x01'
used, ext = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(ext, pathattr.ExtCommunity))
self.assertEqual(ext.value, ['RT:192.168.0.0:1'])
class TestMpReachNlri(unittest.TestCase):
def test_encode(self):
r = pathattr.MpReachNlri(dict(
afi=1,
safi=128,
nh='192.168.1.1',
nlri=[nlri.vpnv4([111,222,333], '192.168.0.0:2', '192.168.2.0/24')],
))
b = r.encode()
self.assertEqual(b, '\x00\x0e&\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xa8\x01\x01\x00\xa0\x00\x06\xf0\x00\r\xe0\x00\x14\xd1\x00\x01\xc0\xa8\x00\x00\x00\x02\xc0\xa8\x02')
def test_decode(self):
nh = '\0'*8 + socket.inet_aton('192.168.1.1')
payload = '\x00\x01'# afi
payload += chr(128) # safi
payload += chr(len(nh))
payload += nh
payload += chr(0) # reserved
prefix = '\x00\x06\xf0' # mpls label 0x0006f
prefix += '\x00\x0d\xe0' # mpls label 0x000de
prefix += '\x00\x14\xd1' # mpls label 0x0014d & bottom of stack
prefix += '\x00\x01\xc0\xa8\x00\x00\x00\x02' # rd 192.168.0.0:2
prefix += '\xc0\xa8\x02\x80' # 192.168.2
masklen = 25
prefix_len = 3*24 + 8*8 + masklen
payload += chr(prefix_len)
payload += prefix
b = '\x00\x0e'
b += chr(len(payload))
b += payload
used, mpreach = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(mpreach, pathattr.MpReachNlri))
self.assertEqual(mpreach.value['afi'], 1)
self.assertEqual(mpreach.value['safi'], 128)
self.assertEqual(mpreach.value['nh'], '192.168.1.1')
self.assertEqual(mpreach.value['nlri'], [
nlri.vpnv4(
[0x6f, 0xde, 0x14d],
'192.168.0.0:2',
'192.168.2.128/25'
)
]
)
class TestMpUnreachNlri(unittest.TestCase):
def test_encode(self):
r = pathattr.MpUnreachNlri(dict(
afi=1,
safi=128,
withdraw=[nlri.vpnv4([111,222,333], '192.168.0.0:2', '192.168.2.0/24')],
))
b = r.encode()
self.assertEqual(b, '\x00\x0f\x18\x00\x01\x80\xa0\x00\x06\xf0\x00\r\xe0\x00\x14\xd1\x00\x01\xc0\xa8\x00\x00\x00\x02\xc0\xa8\x02')
def test_decode(self):
payload = '\x00\x01'# afi
payload += chr(128) # safi
prefix = '\x80\x00\x00' # mpls special no-label
prefix += '\x00\x01\xc0\xa8\x00\x00\x00\x02' # rd 192.168.0.0:2
prefix += '\xc0\xa8\x02\x80' # 192.168.2
masklen = 25
prefix_len = 24 + 8*8 + masklen
payload += chr(prefix_len)
payload += prefix
b = '\x00\x0f'
b += chr(len(payload))
b += payload
used, mpunreach = pathattr.decode(b)
self.assertEqual(used, len(b))
self.failUnless(isinstance(mpunreach, pathattr.MpUnreachNlri))
self.assertEqual(mpunreach.value['afi'], 1)
self.assertEqual(mpunreach.value['safi'], 128)
self.assertEqual(mpunreach.value['withdraw'], [
nlri.vpnv4(None, '192.168.0.0:2', '192.168.2.128/25')
]
)
| 4,921 | 105 | 483 |
1edaa9931abb7a139a3d7450677da8402ba4adb9 | 141 | py | Python | lightnlp/utils/data_utils/__init__.py | SHolic/LightNLP | babb4d650b1d120c10130286d472048d542b068c | [
"MIT"
] | 1 | 2020-11-03T08:21:59.000Z | 2020-11-03T08:21:59.000Z | lightnlp/utils/data_utils/__init__.py | SHolic/LightNLP | babb4d650b1d120c10130286d472048d542b068c | [
"MIT"
] | null | null | null | lightnlp/utils/data_utils/__init__.py | SHolic/LightNLP | babb4d650b1d120c10130286d472048d542b068c | [
"MIT"
] | null | null | null | from ._data_loader import RawDataLoader, EmbeddingLoader, NERDataLoader, ATCDataLoader, \
AlbertBaseATCDataLoader, BertBaseATCDataLoader
| 47 | 89 | 0.851064 | from ._data_loader import RawDataLoader, EmbeddingLoader, NERDataLoader, ATCDataLoader, \
AlbertBaseATCDataLoader, BertBaseATCDataLoader
| 0 | 0 | 0 |
5bbc80849986a5e15c02563156c13327c939f1e4 | 19,357 | py | Python | models/cdae.py | chenrz925/DiamondNet | d195dbd5fc6c8ffcf7485a5180f790532f068db9 | [
"Apache-2.0"
] | null | null | null | models/cdae.py | chenrz925/DiamondNet | d195dbd5fc6c8ffcf7485a5180f790532f068db9 | [
"Apache-2.0"
] | null | null | null | models/cdae.py | chenrz925/DiamondNet | d195dbd5fc6c8ffcf7485a5180f790532f068db9 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Text, Any, Tuple, Union
import torch
from torch import nn
| 58.129129 | 118 | 0.573384 | from typing import Dict, Text, Any, Tuple, Union
import torch
from torch import nn
class DenoiseL(nn.Module):
def __init__(self, in_features: int, ratio: float):
super(DenoiseL, self).__init__()
assert in_features > 0
assert 0.0 <= ratio < 1.0
self.permutation = nn.Parameter(torch.randperm(in_features), requires_grad=False)
self.ratio = ratio
self.in_features = in_features
def forward(self, *input: torch.Tensor, **kwargs: Any) -> torch.Tensor:
return input[0].index_fill(-1, self.permutation[:int(self.ratio * self.in_features)], 0.0)
def __repr__(self):
return f'DenoiseL({self.in_features}, ratio={self.ratio})'
class ConvAutoEncoder1LayerDeCoBnCotSi(nn.Module):
def __init__(self, **kwargs):
super(ConvAutoEncoder1LayerDeCoBnCotSi, self).__init__()
self.add_module('encoder', nn.ModuleDict({
'denoise': DenoiseL(kwargs['in_features'],
kwargs['denoise']['ratio'] if 'denoise' in kwargs and 'ratio' in kwargs[
'denoise'] else 0.2),
'conv': nn.Conv1d(
in_channels=kwargs['conv1d']['in_channels'],
out_channels=kwargs['conv1d']['out_channels'],
kernel_size=kwargs['conv1d']['kernel_size'],
stride=kwargs['conv1d']['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'] else 1,
padding=kwargs['conv1d']['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'] else 0,
dilation=kwargs['conv1d']['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs['conv1d'] else 1,
groups=kwargs['conv1d']['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'] else 1,
bias=kwargs['conv1d']['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
'batchnorm': nn.BatchNorm1d(
num_features=kwargs['conv1d']['out_channels']
),
}))
self.add_module('decoder', nn.ModuleDict({
'convtranspose': nn.ConvTranspose1d(
in_channels=kwargs['conv1d']['out_channels'],
out_channels=kwargs['conv1d']['in_channels'],
kernel_size=kwargs['conv1d']['kernel_size'],
stride=kwargs['conv1d']['stride'] if 'stride' in kwargs else 1,
padding=kwargs['conv1d']['padding'] if 'padding' in kwargs else 0,
dilation=kwargs['conv1d']['dilation'] if 'dilation' in kwargs else 1,
groups=kwargs['conv1d']['groups'] if 'groups' in kwargs else 1,
bias=kwargs['conv1d']['bias'] if 'bias' in kwargs else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs else 'zeros',
),
'sigmoid': nn.Sigmoid()
}))
def forward(self, *input: torch.Tensor, **kwargs: Dict[Text, torch.Tensor]) -> Union[
Tuple[torch.Tensor, torch.Tensor], torch.Tensor
]:
return_features = kwargs['return_features'] if 'return_features' in kwargs else False
childrens = dict(self.named_children())
features = childrens['encoder']['denoise'](input[0])
features = childrens['encoder']['conv'](features)
features = childrens['encoder']['batchnorm'](features)
output_features = features
features = childrens['decoder']['convtranspose'](features)
features = childrens['decoder']['sigmoid'](features)
if return_features:
return features, output_features
else:
return features
class ConvAutoEncoder1LayerDeCoSeCotSi(nn.Module):
def __init__(self, **kwargs: Dict[Text, Any]):
super(ConvAutoEncoder1LayerDeCoSeCotSi, self).__init__()
self.add_module('encoder', nn.ModuleDict({
'denoise': DenoiseL(kwargs['in_features'],
kwargs['denoise']['ratio'] if 'denoise' in kwargs and 'ratio' in kwargs[
'denoise'] else 0.2),
'conv': nn.Conv1d(
in_channels=kwargs['conv1d']['in_channels'],
out_channels=kwargs['conv1d']['out_channels'],
kernel_size=kwargs['conv1d']['kernel_size'],
stride=kwargs['conv1d']['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'] else 1,
padding=kwargs['conv1d']['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'] else 0,
dilation=kwargs['conv1d']['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs['conv1d'] else 1,
groups=kwargs['conv1d']['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'] else 1,
bias=kwargs['conv1d']['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
'selu': nn.SELU(),
}))
self.add_module('decoder', nn.ModuleDict({
'convtranspose': nn.ConvTranspose1d(
in_channels=kwargs['conv1d']['out_channels'],
out_channels=kwargs['conv1d']['in_channels'],
kernel_size=kwargs['conv1d']['kernel_size'],
stride=kwargs['conv1d']['stride'] if 'stride' in kwargs else 1,
padding=kwargs['conv1d']['padding'] if 'padding' in kwargs else 0,
dilation=kwargs['conv1d']['dilation'] if 'dilation' in kwargs else 1,
groups=kwargs['conv1d']['groups'] if 'groups' in kwargs else 1,
bias=kwargs['conv1d']['bias'] if 'bias' in kwargs else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs else 'zeros',
),
'sigmoid': nn.Sigmoid()
}))
def forward(self, *input: torch.Tensor, **kwargs: Dict[Text, torch.Tensor]) -> Union[
Tuple[torch.Tensor, torch.Tensor], torch.Tensor
]:
return_features = kwargs['return_features'] if 'return_features' in kwargs else False
childrens = dict(self.named_children())
features = childrens['encoder']['denoise'](input[0])
features = childrens['encoder']['conv'](features)
features = childrens['encoder']['selu'](features)
output_features = features
features = childrens['decoder']['convtranspose'](features)
features = childrens['decoder']['sigmoid'](features)
if return_features:
return features, output_features
else:
return features
class ConvAutoEncoder2LayerDeCoSeCoSeCotSeCotSi(nn.Module):
def __init__(self, **kwargs):
super(ConvAutoEncoder2LayerDeCoSeCoSeCotSeCotSi, self).__init__()
self.add_module('encoder', nn.ModuleDict({
'denoise': DenoiseL(kwargs['in_features'],
kwargs['denoise']['ratio'] if 'denoise' in kwargs and 'ratio' in kwargs[
'denoise'] else 0.2),
'conv1': nn.Conv1d(
in_channels=kwargs['conv1d'][0]['in_channels'],
out_channels=kwargs['conv1d'][0]['out_channels'],
kernel_size=kwargs['conv1d'][0]['kernel_size'],
stride=kwargs['conv1d'][0]['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'][0] else 1,
padding=kwargs['conv1d'][0]['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'][
0] else 0,
dilation=kwargs['conv1d'][0]['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs[
'conv1d'] else 1,
groups=kwargs['conv1d'][0]['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'][0] else 1,
bias=kwargs['conv1d'][0]['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'][0] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
'bn1': nn.BatchNorm1d(kwargs['conv1d'][0]['in_channels']),
'selu1': nn.SELU(),
'conv2': nn.Conv1d(
in_channels=kwargs['conv1d'][1]['in_channels'],
out_channels=kwargs['conv1d'][1]['out_channels'],
kernel_size=kwargs['conv1d'][1]['kernel_size'],
stride=kwargs['conv1d'][1]['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'][1] else 1,
padding=kwargs['conv1d'][1]['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'][
1] else 0,
dilation=kwargs['conv1d'][1]['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs['conv1d'][
1] else 1,
groups=kwargs['conv1d'][1]['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'][1] else 1,
bias=kwargs['conv1d'][1]['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'][1] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
'bn2': nn.BatchNorm1d(kwargs['conv1d'][1]['in_channels']),
'selu2': nn.SELU(),
}))
self.add_module('decoder', nn.ModuleDict({
'convtranspose1': nn.ConvTranspose1d(
in_channels=kwargs['conv1d'][1]['out_channels'],
out_channels=kwargs['conv1d'][1]['in_channels'],
kernel_size=kwargs['conv1d'][1]['kernel_size'],
stride=kwargs['conv1d'][1]['stride'] if 'stride' in kwargs['conv1d'][1] else 1,
padding=kwargs['conv1d'][1]['padding'] if 'padding' in kwargs['conv1d'][1] else 0,
dilation=kwargs['conv1d'][1]['dilation'] if 'dilation' in kwargs['conv1d'][1] else 1,
groups=kwargs['conv1d'][1]['groups'] if 'groups' in kwargs['conv1d'][1] else 1,
bias=kwargs['conv1d'][1]['bias'] if 'bias' in kwargs['conv1d'][1] else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs['conv1d'][1] else 'zeros',
),
'selu': nn.SELU(),
'bn1': nn.BatchNorm1d(kwargs['conv1d'][1]['out_channels']),
'convtranspose2': nn.ConvTranspose1d(
in_channels=kwargs['conv1d'][0]['out_channels'],
out_channels=kwargs['conv1d'][0]['in_channels'],
kernel_size=kwargs['conv1d'][0]['kernel_size'],
stride=kwargs['conv1d'][0]['stride'] if 'stride' in kwargs['conv1d'][0] else 1,
padding=kwargs['conv1d'][0]['padding'] if 'padding' in kwargs['conv1d'][0] else 0,
dilation=kwargs['conv1d'][0]['dilation'] if 'dilation' in kwargs['conv1d'][0] else 1,
groups=kwargs['conv1d'][0]['groups'] if 'groups' in kwargs['conv1d'][0] else 1,
bias=kwargs['conv1d'][0]['bias'] if 'bias' in kwargs['conv1d'][0] else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs['conv1d'][0] else 'zeros',
),
'bn2': nn.BatchNorm1d(kwargs['conv1d'][0]['out_channels']),
'sigmoid': nn.Sigmoid()
}))
def forward(self, *input: torch.Tensor, **kwargs: Any) -> Union[
Tuple[torch.Tensor, torch.Tensor], torch.Tensor
]:
return_features = kwargs['return_features'] if 'return_features' in kwargs else False
childrens = dict(self.named_children())
features = childrens['encoder']['denoise'](input[0])
# features = childrens['encoder']['bn1'](features)
features = childrens['encoder']['conv1'](features)
# print(features.shape)
features = childrens['encoder']['selu1'](features)
# features = childrens['encoder']['bn2'](features)
features = childrens['encoder']['conv2'](features)
features = childrens['encoder']['selu2'](features)
output_features = features
# features = childrens['decoder']['bn1'](features)
features = childrens['decoder']['convtranspose1'](features)
features = childrens['decoder']['selu'](features)
# features = childrens['decoder']['bn2'](features)
features = childrens['decoder']['convtranspose2'](features)
features = childrens['decoder']['sigmoid'](features)
if return_features:
return features, output_features
else:
return features
class ConvAutoEncoder2LayerLiDeCoSeCoSeCotSeCotSi(nn.Module):
def __init__(self, **kwargs):
super(ConvAutoEncoder2LayerLiDeCoSeCoSeCotSeCotSi, self).__init__()
self.add_module('encoder', nn.ModuleDict({
'linear': nn.Linear(
in_features=kwargs['in_features'],
out_features=kwargs['linear_out_features']
),
'selu0': nn.SELU(),
'denoise': DenoiseL(kwargs['linear_out_features'],
kwargs['denoise']['ratio'] if 'denoise' in kwargs and 'ratio' in kwargs[
'denoise'] else 0.2),
'conv1': nn.Conv1d(
in_channels=kwargs['conv1d'][0]['in_channels'],
out_channels=kwargs['conv1d'][0]['out_channels'],
kernel_size=kwargs['conv1d'][0]['kernel_size'],
stride=kwargs['conv1d'][0]['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'][0] else 1,
padding=kwargs['conv1d'][0]['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'][
0] else 0,
dilation=kwargs['conv1d'][0]['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs[
'conv1d'] else 1,
groups=kwargs['conv1d'][0]['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'][0] else 1,
bias=kwargs['conv1d'][0]['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'][0] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
# 'bn1': nn.BatchNorm1d(config['conv1d'][0]['in_channels']),
'selu1': nn.SELU(),
'conv2': nn.Conv1d(
in_channels=kwargs['conv1d'][1]['in_channels'],
out_channels=kwargs['conv1d'][1]['out_channels'],
kernel_size=kwargs['conv1d'][1]['kernel_size'],
stride=kwargs['conv1d'][1]['stride'] if 'conv1d' in kwargs and 'stride' in kwargs['conv1d'][1] else 1,
padding=kwargs['conv1d'][1]['padding'] if 'conv1d' in kwargs and 'padding' in kwargs['conv1d'][
1] else 0,
dilation=kwargs['conv1d'][1]['dilation'] if 'conv1d' in kwargs and 'dilation' in kwargs['conv1d'][
1] else 1,
groups=kwargs['conv1d'][1]['groups'] if 'conv1d' in kwargs and 'groups' in kwargs['conv1d'][1] else 1,
bias=kwargs['conv1d'][1]['bias'] if 'conv1d' in kwargs and 'bias' in kwargs['conv1d'][1] else True,
padding_mode=kwargs['padding_mode'] if 'conv1d' in kwargs and 'padding_mode' in kwargs[
'conv1d'] else 'zeros',
),
# 'bn2': nn.BatchNorm1d(config['conv1d'][1]['in_channels']),
'selu2': nn.SELU(),
}))
self.add_module('decoder', nn.ModuleDict({
'convtranspose1': nn.ConvTranspose1d(
in_channels=kwargs['conv1d'][1]['out_channels'],
out_channels=kwargs['conv1d'][1]['in_channels'],
kernel_size=kwargs['conv1d'][1]['kernel_size'],
stride=kwargs['conv1d'][1]['stride'] if 'stride' in kwargs['conv1d'][1] else 1,
padding=kwargs['conv1d'][1]['padding'] if 'padding' in kwargs['conv1d'][1] else 0,
dilation=kwargs['conv1d'][1]['dilation'] if 'dilation' in kwargs['conv1d'][1] else 1,
groups=kwargs['conv1d'][1]['groups'] if 'groups' in kwargs['conv1d'][1] else 1,
bias=kwargs['conv1d'][1]['bias'] if 'bias' in kwargs['conv1d'][1] else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs['conv1d'][1] else 'zeros',
),
'selu1': nn.SELU(),
# 'bn1': nn.BatchNorm1d(config['conv1d'][1]['out_channels']),
'convtranspose2': nn.ConvTranspose1d(
in_channels=kwargs['conv1d'][0]['out_channels'],
out_channels=kwargs['conv1d'][0]['in_channels'],
kernel_size=kwargs['conv1d'][0]['kernel_size'],
stride=kwargs['conv1d'][0]['stride'] if 'stride' in kwargs['conv1d'][0] else 1,
padding=kwargs['conv1d'][0]['padding'] if 'padding' in kwargs['conv1d'][0] else 0,
dilation=kwargs['conv1d'][0]['dilation'] if 'dilation' in kwargs['conv1d'][0] else 1,
groups=kwargs['conv1d'][0]['groups'] if 'groups' in kwargs['conv1d'][0] else 1,
bias=kwargs['conv1d'][0]['bias'] if 'bias' in kwargs['conv1d'][0] else True,
padding_mode=kwargs['padding_mode'] if 'padding_mode' in kwargs['conv1d'][0] else 'zeros',
),
# 'bn2': nn.BatchNorm1d(config['conv1d'][0]['out_channels']),
'selu2': nn.SELU(),
'linear': nn.Linear(
in_features=kwargs['linear_out_features'],
out_features=kwargs['in_features'],
),
'sigmoid': nn.Sigmoid()
}))
def forward(self, *input: torch.Tensor, **kwargs: Any) -> Union[
Tuple[torch.Tensor, torch.Tensor], torch.Tensor
]:
return_features = kwargs['return_features'] if 'return_features' in kwargs else False
childrens = dict(self.named_children())
features = childrens['encoder']['linear'](input[0])
features = childrens['encoder']['selu0'](features)
features = childrens['encoder']['denoise'](features)
# features = childrens['encoder']['bn1'](features)
features = childrens['encoder']['conv1'](features)
# print(features.shape)
features = childrens['encoder']['selu1'](features)
# features = childrens['encoder']['bn2'](features)
features = childrens['encoder']['conv2'](features)
features = childrens['encoder']['selu2'](features)
output_features = features
# features = childrens['decoder']['bn1'](features)
features = childrens['decoder']['convtranspose1'](features)
features = childrens['decoder']['selu1'](features)
# features = childrens['decoder']['bn2'](features)
features = childrens['decoder']['convtranspose2'](features)
features = childrens['decoder']['selu2'](features)
features = childrens['decoder']['linear'](features)
features = childrens['decoder']['sigmoid'](features)
if return_features:
return features, output_features
else:
return features
| 18,720 | 141 | 407 |
3a6ac3e083b43f0be52796162d41f654222059ea | 2,487 | py | Python | checkmate/management/commands/init.py | marcinguy/checkmate-ce | fc33c7c27bc640ab4db5dbda274a0edd3b3db218 | [
"MIT"
] | 80 | 2015-01-06T17:42:39.000Z | 2022-02-08T19:08:21.000Z | checkmate/management/commands/init.py | ravikumarpurbey/checkmate | 1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2 | [
"MIT"
] | 6 | 2015-08-04T12:16:48.000Z | 2021-02-27T12:09:16.000Z | checkmate/management/commands/init.py | ravikumarpurbey/checkmate | 1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2 | [
"MIT"
] | 33 | 2015-01-02T14:18:11.000Z | 2021-03-18T05:06:54.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import BaseCommand
from checkmate.management.helpers import save_project_config
import sys
import os
import os.path
import json
import time
import uuid
import logging
logger = logging.getLogger(__name__)
"""
Creates a new project. The command proceeds as follows:
-We create a .checkmate directory in the current directory.
-If a project already exists in the same directory, we do nothing.
"""
| 27.633333 | 101 | 0.519099 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import BaseCommand
from checkmate.management.helpers import save_project_config
import sys
import os
import os.path
import json
import time
import uuid
import logging
logger = logging.getLogger(__name__)
"""
Creates a new project. The command proceeds as follows:
-We create a .checkmate directory in the current directory.
-If a project already exists in the same directory, we do nothing.
"""
class Command(BaseCommand):
requires_valid_project = False
options = BaseCommand.options + [
{
'name' : '--backend',
'action' : 'store',
'dest' : 'backend',
'type' : str,
'default' : 'sql',
'help' : 'The backend to use.'
},
{
'name' : '--backend-opts',
'action' : 'store',
'dest' : 'backend_opts',
'type' : str,
'default' : '',
'help' : 'Backend options (e.g. connection string).'
},
{
'name' : '--path',
'action' : 'store',
'dest' : 'path',
'default' : None,
'type' : str,
'help' : 'The path where to create a new project (default: current working directory)'
},
{
'name' : '--pk',
'action' : 'store',
'dest' : 'pk',
'type' : str,
'default' : None,
'help' : 'The primary key to use for the project',
}]
description = """
Initializes a new checkmate project.
"""
def run(self):
logger.info("Initializing new project in the current directory.")
project_path = self.opts['path'] or os.getcwd()
config_path = project_path+"/.checkmate"
if os.path.exists(config_path):
logger.error("Found another project with the same path, aborting.")
return -1
if not self.opts['backend'] in ('sql'):
logger.error("Unsupported backend: %s" % self.opts['backend'])
return -1
config = {
'project_id' : uuid.uuid4().hex if not self.opts['pk'] else self.opts['pk'],
'project_class' : 'Project',
'backend' : {
'driver' : self.opts['backend'],
}
}
os.makedirs(config_path)
save_project_config(project_path,config)
| 794 | 1,195 | 23 |
f77ef19bc22d083ea7feed89b8aa51d4550f8eda | 4,646 | py | Python | models/model_unet.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | models/model_unet.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | models/model_unet.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from six.moves import cPickle
import unet
import simplified_unet
arg_scope = tf.contrib.framework.arg_scope
| 41.482143 | 137 | 0.655833 | import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from six.moves import cPickle
import unet
import simplified_unet
arg_scope = tf.contrib.framework.arg_scope
class UnetModel(object):
def __init__(self, number_class=3, is_training=True, is_simplified = False, dropout = True):
"""Create the model"""
self.n_classes = number_class
self.is_training = is_training
self.is_simplified = is_simplified
self.dropout = dropout
def _create_network(self, input_batch, dropout = False, is_training = True):
"""
Args:
input_batch: batch of pre-processed images.
keep_prob: probability of keeping neurons intact.
Returns:
A downsampled segmentation mask.
"""
if not self.is_simplified:
net, _ = unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)
else:
net, _ = simplified_unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)
return net
def prepare_label(self, input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch,
new_size) # As labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=self.n_classes)
return input_batch
def preds(self, input_batch):
"""Create the network and run inference on the input batch.
Args:
input_batch: batch of pre-processed images.
Returns:
Argmax over the predictions of the network of the same shape as the input.
"""
raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])
raw_output = tf.argmax(raw_output, axis=3)
raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.
return tf.cast(raw_output, tf.uint8)
def loss(self, img_batch, label_batch, mask_batch):
"""Create the network, run inference on the input batch and compute loss.
Args:
input_batch: batch of pre-processed images.
Returns:
Pixel-wise softmax loss.
"""
raw_output = self._create_network(tf.cast(img_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)
# Get prediction output
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])
raw_output_up = tf.argmax(raw_output_up, axis=3)
raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.
pred = tf.cast(raw_output_up, tf.uint8)
prediction = tf.reshape(raw_output, [-1, self.n_classes])
# Prepare ground truth output
label_batch = tf.image.resize_nearest_neighbor(label_batch, tf.stack(raw_output.get_shape()[1:3]))
gt = tf.expand_dims(tf.cast(tf.reshape(label_batch, [-1]), tf.int32), axis=1)
# Prepare mask
if mask_batch != None:
resized_mask_batch = tf.image.resize_nearest_neighbor(mask_batch, tf.stack(raw_output.get_shape()[1:3]))
resized_mask_batch = tf.cast(tf.reshape(resized_mask_batch, [-1]), tf.float32)
mask = tf.reshape(resized_mask_batch, gt.get_shape())
# Calculate the masked loss
epsilon = 0.00001 * tf.ones(prediction.get_shape(), tf.float32)
if mask_batch != None:
loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt, weights=mask)
else:
loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt)
reduced_loss = tf.reduce_mean(loss)
print(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
reduced_loss = control_flow_ops.with_dependencies([updates], reduced_loss)
return pred, reduced_loss | 0 | 4,439 | 23 |
8bb61dd567b5ca0da6e74a0f8a595d90330016ec | 3,981 | py | Python | pysnmp-with-texts/GNOME-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/GNOME-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/GNOME-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module GNOME-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GNOME-SMI
# Produced by pysmi-0.3.4 at Wed May 1 13:19:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ModuleIdentity, TimeTicks, iso, Unsigned32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Integer32, enterprises, Counter32, Bits, ObjectIdentity, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ModuleIdentity", "TimeTicks", "iso", "Unsigned32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Integer32", "enterprises", "Counter32", "Bits", "ObjectIdentity", "Gauge32", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
gnome = ModuleIdentity((1, 3, 6, 1, 4, 1, 3319))
gnome.setRevisions(('2007-09-07 00:00', '2005-05-07 00:00', '2003-12-07 00:00', '1998-09-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: gnome.setRevisionsDescriptions(('Fixed wrong enterprise number (how comes this typo was unnoticed for so long?).', 'Added gnomeLDAP subtree for LDAP definitions.', 'Added gnomeSysadmin subtree for GNOME project system administration. Updated contact info.', 'Initial version.',))
if mibBuilder.loadTexts: gnome.setLastUpdated('200709070000Z')
if mibBuilder.loadTexts: gnome.setOrganization('GNOME project')
if mibBuilder.loadTexts: gnome.setContactInfo('GNU Network Object Model Environment project see http://www.gnome.org for contact persons of a particular area or subproject of GNOME. Administrative contact for MIB module: Jochen Friedrich Ramsaystr. 9 63450 Hanau Germany email: jochen@scram.de')
if mibBuilder.loadTexts: gnome.setDescription('The Structure of GNOME.')
gnomeProducts = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1))
if mibBuilder.loadTexts: gnomeProducts.setStatus('current')
if mibBuilder.loadTexts: gnomeProducts.setDescription('gnomeProducts is the root OBJECT IDENTIFIER from which sysObjectID values are assigned.')
gnomeMgmt = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 2))
if mibBuilder.loadTexts: gnomeMgmt.setStatus('current')
if mibBuilder.loadTexts: gnomeMgmt.setDescription('gnomeMgmt defines the subtree for production GNOME related MIB registrations.')
gnomeTest = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 3))
if mibBuilder.loadTexts: gnomeTest.setStatus('current')
if mibBuilder.loadTexts: gnomeTest.setDescription('gnomeTest defines the subtree for testing GNOME related MIB registrations.')
gnomeSysadmin = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 4))
if mibBuilder.loadTexts: gnomeSysadmin.setStatus('current')
if mibBuilder.loadTexts: gnomeSysadmin.setDescription('gnomeSysadmin defines the subtree for GNOME related Sysadmin MIB registrations.')
gnomeLDAP = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 5))
if mibBuilder.loadTexts: gnomeLDAP.setStatus('current')
if mibBuilder.loadTexts: gnomeLDAP.setDescription('gnomeLDAP defines the subtree for GNOME related LDAP registrations.')
mibBuilder.exportSymbols("GNOME-SMI", gnomeMgmt=gnomeMgmt, gnomeSysadmin=gnomeSysadmin, gnomeTest=gnomeTest, gnomeLDAP=gnomeLDAP, PYSNMP_MODULE_ID=gnome, gnome=gnome, gnomeProducts=gnomeProducts)
| 102.076923 | 505 | 0.786486 | #
# PySNMP MIB module GNOME-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GNOME-SMI
# Produced by pysmi-0.3.4 at Wed May 1 13:19:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ModuleIdentity, TimeTicks, iso, Unsigned32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Integer32, enterprises, Counter32, Bits, ObjectIdentity, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ModuleIdentity", "TimeTicks", "iso", "Unsigned32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Integer32", "enterprises", "Counter32", "Bits", "ObjectIdentity", "Gauge32", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
gnome = ModuleIdentity((1, 3, 6, 1, 4, 1, 3319))
gnome.setRevisions(('2007-09-07 00:00', '2005-05-07 00:00', '2003-12-07 00:00', '1998-09-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: gnome.setRevisionsDescriptions(('Fixed wrong enterprise number (how comes this typo was unnoticed for so long?).', 'Added gnomeLDAP subtree for LDAP definitions.', 'Added gnomeSysadmin subtree for GNOME project system administration. Updated contact info.', 'Initial version.',))
if mibBuilder.loadTexts: gnome.setLastUpdated('200709070000Z')
if mibBuilder.loadTexts: gnome.setOrganization('GNOME project')
if mibBuilder.loadTexts: gnome.setContactInfo('GNU Network Object Model Environment project see http://www.gnome.org for contact persons of a particular area or subproject of GNOME. Administrative contact for MIB module: Jochen Friedrich Ramsaystr. 9 63450 Hanau Germany email: jochen@scram.de')
if mibBuilder.loadTexts: gnome.setDescription('The Structure of GNOME.')
gnomeProducts = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1))
if mibBuilder.loadTexts: gnomeProducts.setStatus('current')
if mibBuilder.loadTexts: gnomeProducts.setDescription('gnomeProducts is the root OBJECT IDENTIFIER from which sysObjectID values are assigned.')
gnomeMgmt = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 2))
if mibBuilder.loadTexts: gnomeMgmt.setStatus('current')
if mibBuilder.loadTexts: gnomeMgmt.setDescription('gnomeMgmt defines the subtree for production GNOME related MIB registrations.')
gnomeTest = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 3))
if mibBuilder.loadTexts: gnomeTest.setStatus('current')
if mibBuilder.loadTexts: gnomeTest.setDescription('gnomeTest defines the subtree for testing GNOME related MIB registrations.')
gnomeSysadmin = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 4))
if mibBuilder.loadTexts: gnomeSysadmin.setStatus('current')
if mibBuilder.loadTexts: gnomeSysadmin.setDescription('gnomeSysadmin defines the subtree for GNOME related Sysadmin MIB registrations.')
gnomeLDAP = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 5))
if mibBuilder.loadTexts: gnomeLDAP.setStatus('current')
if mibBuilder.loadTexts: gnomeLDAP.setDescription('gnomeLDAP defines the subtree for GNOME related LDAP registrations.')
mibBuilder.exportSymbols("GNOME-SMI", gnomeMgmt=gnomeMgmt, gnomeSysadmin=gnomeSysadmin, gnomeTest=gnomeTest, gnomeLDAP=gnomeLDAP, PYSNMP_MODULE_ID=gnome, gnome=gnome, gnomeProducts=gnomeProducts)
| 0 | 0 | 0 |
eb7fcc229e2738f4ef94d625899ae037199f88ae | 375 | py | Python | ftp.py | ZDYC/hacker | 3dd9556bda629a1f2b96905ed3e62ed3f02ae3f6 | [
"Apache-2.0"
] | null | null | null | ftp.py | ZDYC/hacker | 3dd9556bda629a1f2b96905ed3e62ed3f02ae3f6 | [
"Apache-2.0"
] | null | null | null | ftp.py | ZDYC/hacker | 3dd9556bda629a1f2b96905ed3e62ed3f02ae3f6 | [
"Apache-2.0"
] | null | null | null | import ftplib
if __name__ == '__main__':
anonlogin('154.221.18.35') | 22.058824 | 57 | 0.573333 | import ftplib
def anonlogin(hostname):
try:
ftp = ftplib.FTP(hostname)
ftp.login('root', 'hx1NM396')
print('\n[*]' + str(hostname) + 'ftp successed!')
ftp.quit()
return True
except Exception as e:
print('failded to ftp' + str(hostname))
return False
if __name__ == '__main__':
anonlogin('154.221.18.35') | 278 | 0 | 23 |
458bb15a61c36499cdc3ad3c42cfe8316646e17b | 2,497 | py | Python | scripts/pcap2csv.py | rqtx/Nymphenburg | 08ed27b25b336d6201afaa27698ac405a53e537c | [
"MIT"
] | null | null | null | scripts/pcap2csv.py | rqtx/Nymphenburg | 08ed27b25b336d6201afaa27698ac405a53e537c | [
"MIT"
] | null | null | null | scripts/pcap2csv.py | rqtx/Nymphenburg | 08ed27b25b336d6201afaa27698ac405a53e537c | [
"MIT"
] | null | null | null | #!/bin/python
import getopt
import sys
convert('amplifier')
convert('attacker')
convert('victim')
convert('amplifier_input')
convert('amplifier_output')
| 28.375 | 123 | 0.470164 | #!/bin/python
import getopt
import sys
class Pcap2Csv():
__SHORTARGS = 'p:ho:t:'
__LONGARGS = ['pcap=', 'help', 'output=', 'time=']
__USAGE = ['Pcap file', 'Help', 'Output file', 'Start attack time']
start_time = 0
end_time = 0
def __init__(self):
self.__cliParser()
self.__convert()
def __cliParser(self):
options, remainder = getopt.getopt(sys.argv[1:], self.__SHORTARGS, self.__LONGARGS)
for opt, arg in options:
if opt in ('-p', '--file'):
self.input_file = arg
elif opt in ('-o', '--output'):
self.output_file = arg
elif opt in ('-t', '--time'):
start, end = arg.split(':')
self.start_time = int(start)
self.end_time = int(end)
elif opt in ('-h', '--help'):
for idx, item in enumerate(self.__LONGARGS):
print('-' + self.__LONGARGS[idx][0] + ', --' + self.__LONGARGS[idx] + ' ' + self.__USAGE[idx])
quit()
def __convert(self):
pcap = open(self.input_file, 'r')
csv = open(self.output_file, 'w')
lineCtn = 1
ptrLine = 1
#csv.write("Interval;Frames;Bytes\n")
#csv.write(str(0) + ';' + str(0) + ';' + str(0) + '\n')
for line in pcap:
if lineCtn > (12 + self.start_time):
if line[0] != '=':
if ptrLine > self.end_time:
break
splited = line[1:].split('|')
csv.write( str(ptrLine) + ';' + splited[1].replace(" ", "") + ';' + splited[2].replace(" ", "") + '\n')
ptrLine += 1
lineCtn += 1
def convert(prefix):
txt = prefix + '.txt'
csv = prefix + '.csv'
fileTxt = open(txt, 'r')
fileCsv = open(csv, 'w')
lineCtn = 1
ptrLine = 1
#csv.write("Interval;Frames;Bytes\n")
#csv.write(str(0) + ';' + str(0) + ';' + str(0) + '\n')
for line in fileTxt:
if lineCtn > 12:
if line[0] != '=':
if ptrLine > 50:
break
splited = line[1:].split('|')
fileCsv.write( str(ptrLine) + ';' + splited[1].replace(" ", "") + ';' + splited[2].replace(" ", "") + '\n')
ptrLine += 1
lineCtn += 1
convert('amplifier')
convert('attacker')
convert('victim')
convert('amplifier_input')
convert('amplifier_output')
| 2,025 | 270 | 46 |
c8a775210c813d4a841be4a5bc26ac6f6f6141bb | 6,013 | py | Python | src/relstorage/adapters/postgresql/connmanager.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 40 | 2015-10-08T05:35:13.000Z | 2022-03-28T23:50:06.000Z | src/relstorage/adapters/postgresql/connmanager.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 364 | 2015-03-23T15:25:42.000Z | 2022-03-17T08:41:34.000Z | src/relstorage/adapters/postgresql/connmanager.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 33 | 2015-06-08T23:03:22.000Z | 2022-03-21T08:25:53.000Z | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""PostgreSQL adapter for RelStorage."""
from __future__ import absolute_import
from __future__ import print_function
import logging
from ..._util import metricmethod
from ..connmanager import AbstractConnectionManager
from .util import backend_pid_for_connection
logger = logging.getLogger(__name__)
| 42.64539 | 84 | 0.622984 | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""PostgreSQL adapter for RelStorage."""
from __future__ import absolute_import
from __future__ import print_function
import logging
from ..._util import metricmethod
from ..connmanager import AbstractConnectionManager
from .util import backend_pid_for_connection
logger = logging.getLogger(__name__)
class Psycopg2ConnectionManager(AbstractConnectionManager):
def __init__(self, driver, dsn, options):
self._dsn = dsn
self.isolation_read_committed = driver.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_serializable = driver.ISOLATION_LEVEL_SERIALIZABLE
self.isolation_repeatable_read = driver.ISOLATION_LEVEL_REPEATABLE_READ
self.keep_history = options.keep_history
self._db_connect_with_isolation = driver.connect_with_isolation
super(Psycopg2ConnectionManager, self).__init__(options, driver)
def _alter_dsn(self, replica):
"""Alter the DSN to use the specified replica.
The replica parameter is a string specifying either host or host:port.
"""
if ':' in replica:
host, port = replica.split(':')
dsn = '%s host=%s port=%s' % (self._dsn, host, port)
else:
dsn = '%s host=%s' % (self._dsn, replica)
return dsn
@metricmethod
def open(self, isolation=None, read_only=False, deferrable=False,
replica_selector=None, application_name=None, **kwargs):
"""Open a database connection and return (conn, cursor)."""
# pylint:disable=arguments-differ
if isolation is None:
isolation = self.isolation_store
if replica_selector is None:
replica_selector = self.replica_selector
if replica_selector is not None:
replica = replica_selector.current()
dsn = self._alter_dsn(replica)
else:
replica = None
dsn = self._dsn
while True:
try:
# psycopg2 seems to have a cache of Connection objects
# so closing one and then opening again often gets the same
# object back.
conn = self._db_connect_with_isolation(
dsn,
isolation=isolation,
deferrable=deferrable,
read_only=read_only,
application_name=application_name
)
cursor = self.cursor_for_connection(conn)
conn.replica = replica
return conn, cursor
except self.driver.use_replica_exceptions as e:
if replica is not None:
next_replica = replica_selector.next()
if next_replica is not None:
logger.warning("Unable to connect to replica %s: %s, "
"now trying %s", replica, e, next_replica)
replica = next_replica
dsn = self._alter_dsn(replica)
continue
logger.warning("Unable to connect: %s", e)
raise
def _do_open_for_load(self):
# In RelStorage 1, 2 and <= 3.0b2, we used SERIALIZABLE isolation,
# while MySQL used REPEATABLE READ and Oracle used SERIALIZABLE (but
# only because of an apparent issue with RAC).
#
# Although SERIALIZABLE is much cheaper on PostgreSQL than
# most other databases, it has its issues. Most notably,
# SERIALIZABLE isn't allowed on streaming replicas
# (https://www.enterprisedb.com/blog/serializable-postgresql-11-and-beyond),
# and prior to PostgreSQL 12 it disables parallel queries (not
# that we expect many queries to be something that can benefit
# from parallel workers.)
#
# The differences that SERIALIZABLE brings shouldn't be
# relevant as we don't run the write transactions at that
# level, and we never try to commit this transaction. So it's
# mostly just overhead for tracking read anomalies that can
# never happen. And the standby issue became a problem
# (https://github.com/zodb/relstorage/issues/376) and we
# dropped down to REPEATABLE READ.
# Of course, there's a chance that if we could get the store
# connections to work in SERIALIZABLE mode, we'd be able to
# stop the explicit locking altogether. With judicious use of
# savepoints, and proper re-raising of ConflictError, that
# might be possible.
# Using READ ONLY mode lets transactions (especially
# SERIALIZABLE) elide some locks. If we were SERIALIZABLE,
# we'd probably also want to enable deferrable transactions as
# there's special support to make them cheaper (but they might
# have to wait on other serializable transactions, but since
# our only other serializable transactions would be READ ONLY
# that shouldn't matter.)
return self.open(
self.isolation_repeatable_read,
read_only=True,
deferrable=False,
replica_selector=self.ro_replica_selector,
application_name='RS: Load'
)
def describe_connection(self, conn, cursor):
return {'backend_pid': backend_pid_for_connection(conn, cursor)}
| 2,673 | 2,376 | 23 |
f17815b66a9d51bc88fc2a99940804bbcb0693ef | 1,727 | py | Python | app/api/deals.py | dev-johnlopez/assignably-old | 99f550e3e970a979234a724097ed8c940f1562c1 | [
"MIT"
] | null | null | null | app/api/deals.py | dev-johnlopez/assignably-old | 99f550e3e970a979234a724097ed8c940f1562c1 | [
"MIT"
] | null | null | null | app/api/deals.py | dev-johnlopez/assignably-old | 99f550e3e970a979234a724097ed8c940f1562c1 | [
"MIT"
] | null | null | null | from flask import jsonify, request, url_for, g, current_app, render_template
from app import db
from app.deals.models import Deal
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.email import send_email
@bp.route('/deals/<int:id>', methods=['GET'])
@token_auth.login_required
@bp.route('/deals', methods=['GET'])
@token_auth.login_required
@bp.route('/deals', methods=['POST'])
@token_auth.login_required
@bp.route('/deals/<int:id>', methods=['PUT'])
@token_auth.login_required
| 33.862745 | 98 | 0.673422 | from flask import jsonify, request, url_for, g, current_app, render_template
from app import db
from app.deals.models import Deal
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.email import send_email
@bp.route('/deals/<int:id>', methods=['GET'])
@token_auth.login_required
def get_deal(id):
pass
@bp.route('/deals', methods=['GET'])
@token_auth.login_required
def get_deals():
return ''
@bp.route('/deals', methods=['POST'])
@token_auth.login_required
def create_deal():
data = request.get_json() or {}
if 'address' not in data \
or 'sq_feet' not in data \
or 'bedrooms' not in data \
or 'bathrooms' not in data \
or 'after_repair_value' not in data \
or 'rehab_estimate' not in data \
or 'purchase_price' not in data:
return bad_request('must include username, email and password fields')
deal = Deal()
deal.from_dict(data)
db.session.add(deal)
db.session.commit()
send_email('New Deal Notification!',
sender=current_app.config['ADMINS'][0], recipients=[g.current_user.email],
text_body=render_template('emails/new_deal.txt', user=g.current_user, deal=deal),
html_body=render_template('emails/new_deal.html', user=g.current_user, deal=deal),
attachments=[],
sync=True)
#send_deal_notification_email(g.current_user, deal)
response = jsonify(deal.to_dict())
response.status_code = 201
response.headers['Location'] = url_for('api.get_deal', id=deal.id)
return response
@bp.route('/deals/<int:id>', methods=['PUT'])
@token_auth.login_required
def update_deal(id):
pass
| 1,099 | 0 | 88 |
442d3a64baf645e86d88223fdea6691517abbbdd | 706 | py | Python | python/03_tipos_de_dados/tipos.py | ac-gomes/python-iniciante | 002fc91facb5d89c23540d8b05073e8a3c8a4c59 | [
"MIT"
] | null | null | null | python/03_tipos_de_dados/tipos.py | ac-gomes/python-iniciante | 002fc91facb5d89c23540d8b05073e8a3c8a4c59 | [
"MIT"
] | null | null | null | python/03_tipos_de_dados/tipos.py | ac-gomes/python-iniciante | 002fc91facb5d89c23540d8b05073e8a3c8a4c59 | [
"MIT"
] | null | null | null | # função usada abaixo 'print()' é usada para exibir ou imprimir mensagens no console.
# Iteiros | Int
print(10)
# Será exibido no console o numero 10
# Ponto Flutuante | Float
print(9.5)
# Cadeia de caracteres | Strings
cadeia_de_caracter = "Olá Mundo!"
print(cadeia_de_caracter)
# Boleano | Boolean
valor_verdadeiro = True
valor_falso = False
print("valor_verdadeiro: ", valor_verdadeiro)
print("valor_falso: ", valor_falso)
# Tipo de dado None, em Python não existe tipo de dados Null.
valor_none = None
print(valor_none)
# Para verificar o tipo de dado armazenado em uma varivel usar a função type
print("\n")
print(type(valor_none))
print(type(valor_verdadeiro))
print(type(cadeia_de_caracter))
| 23.533333 | 85 | 0.763456 | # função usada abaixo 'print()' é usada para exibir ou imprimir mensagens no console.
# Iteiros | Int
print(10)
# Será exibido no console o numero 10
# Ponto Flutuante | Float
print(9.5)
# Cadeia de caracteres | Strings
cadeia_de_caracter = "Olá Mundo!"
print(cadeia_de_caracter)
# Boleano | Boolean
valor_verdadeiro = True
valor_falso = False
print("valor_verdadeiro: ", valor_verdadeiro)
print("valor_falso: ", valor_falso)
# Tipo de dado None, em Python não existe tipo de dados Null.
valor_none = None
print(valor_none)
# Para verificar o tipo de dado armazenado em uma varivel usar a função type
print("\n")
print(type(valor_none))
print(type(valor_verdadeiro))
print(type(cadeia_de_caracter))
| 0 | 0 | 0 |
c98c3446c8a67fb418f6a8db9d31a0315ee0fc3c | 6,470 | py | Python | tuprolog/jvmutils.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-08-07T06:29:28.000Z | 2021-08-07T06:29:28.000Z | tuprolog/jvmutils.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 14 | 2021-09-16T13:25:12.000Z | 2022-01-03T10:12:22.000Z | tuprolog/jvmutils.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-12-22T00:25:32.000Z | 2021-12-22T00:25:32.000Z | from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyProtectedMember
from _jpype import _JObject as JObjectClass
# noinspection PyUnresolvedReferences
import java.util as _jutils
# noinspection PyUnresolvedReferences
import java.lang as _jlang
# noinspection PyUnresolvedReferences
import kotlin as _kotlin
# noinspection PyUnresolvedReferences
import kotlin.sequences as _ksequences
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.utils as _tuprolog_utils
from typing import Iterable as PyIterable
from typing import Iterator as PyIterator
from typing import Mapping, MutableMapping, Callable, Any
from .jvmioutils import *
Arrays = _jutils.Arrays
ArrayList = _jutils.ArrayList
Iterator = _jutils.Iterator
Map = _jutils.Map
NoSuchElementException = _jutils.NoSuchElementException
Iterable = _jlang.Iterable
JavaSystem = _jlang.System
Object = _jlang.Object
Pair = _kotlin.Pair
Triple = _kotlin.Triple
Sequence = _ksequences.Sequence
SequencesKt = _ksequences.SequencesKt
PyUtils = _tuprolog_utils.PyUtils
@jpype.JImplements("java.util.Iterator", deferred=True)
@jpype.JImplements("java.lang.Iterable", deferred=True)
@jpype.JConversion("kotlin.Pair", instanceof=PyIterable, excludes=str)
@jpype.JConversion("kotlin.Triple", instanceof=PyIterable, excludes=str)
@jpype.JConversion("java.lang.Iterable", instanceof=PyIterable, excludes=str)
# replaces the default __repr__ implementation for java objects, making them use _java_obj_repr
JObjectClass.__repr__ = _java_obj_repr
@jpype.JImplementationFor("kotlin.sequences.Sequence")
@jpype.JConversion("kotlin.sequences.Sequence", instanceof=PyIterable, excludes=str)
@jpype.JImplementationFor("java.util.stream.Stream")
@jpype.JImplementationFor("java.lang.Comparable")
@jpype.JImplementationFor("java.lang.Throwable")
_kt_function_classes: MutableMapping[int, Any] = dict()
logger.debug("Configure JVM-specific extensions")
| 24.323308 | 95 | 0.696909 | from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyProtectedMember
from _jpype import _JObject as JObjectClass
# noinspection PyUnresolvedReferences
import java.util as _jutils
# noinspection PyUnresolvedReferences
import java.lang as _jlang
# noinspection PyUnresolvedReferences
import kotlin as _kotlin
# noinspection PyUnresolvedReferences
import kotlin.sequences as _ksequences
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.utils as _tuprolog_utils
from typing import Iterable as PyIterable
from typing import Iterator as PyIterator
from typing import Mapping, MutableMapping, Callable, Any
from .jvmioutils import *
Arrays = _jutils.Arrays
ArrayList = _jutils.ArrayList
Iterator = _jutils.Iterator
Map = _jutils.Map
NoSuchElementException = _jutils.NoSuchElementException
Iterable = _jlang.Iterable
JavaSystem = _jlang.System
Object = _jlang.Object
Pair = _kotlin.Pair
Triple = _kotlin.Triple
Sequence = _ksequences.Sequence
SequencesKt = _ksequences.SequencesKt
PyUtils = _tuprolog_utils.PyUtils
def protect_iterable(iterable: Iterable) -> Iterable:
return PyUtils.iterable(iterable)
@jpype.JImplements("java.util.Iterator", deferred=True)
class _IteratorAdapter(object):
def __init__(self, iterator):
assert isinstance(iterator, PyIterator)
self._iterator = iterator
self._queue = None
@jpype.JOverride
def hasNext(self):
if self._queue is None:
try:
self._queue = [next(self._iterator)]
return True
except StopIteration:
return False
elif len(self._queue) > 0:
return True
else:
try:
self._queue.append(next(self._iterator))
return True
except StopIteration:
return False
@jpype.JOverride
def next(self):
if self.hasNext():
return self._queue.pop(0)
else:
raise NoSuchElementException()
@jpype.JImplements("java.lang.Iterable", deferred=True)
class _IterableAdapter(object):
def __init__(self, iterable):
assert isinstance(iterable, PyIterable)
self._iterable = iterable
@jpype.JOverride
def iterator(self):
return _IteratorAdapter(iter(self._iterable))
def kpair(items: PyIterable) -> Pair:
if isinstance(items, Pair):
return items
i = iter(items)
first = next(i)
second = next(i)
return Pair(first, second)
@jpype.JConversion("kotlin.Pair", instanceof=PyIterable, excludes=str)
def _kt_pair_covert(jcls, obj):
return kpair(obj)
def ktriple(items: PyIterable) -> Triple:
if isinstance(items, Triple):
return items
i = iter(items)
first = next(i)
second = next(i)
third = next(i)
return Triple(first, second, third)
@jpype.JConversion("kotlin.Triple", instanceof=PyIterable, excludes=str)
def _kt_triple_covert(jcls, obj):
return ktriple(obj)
def jlist(iterable: PyIterable) -> Iterable:
assert isinstance(iterable, PyIterable)
if isinstance(iterable, list):
return Arrays.asList(iterable)
lst = ArrayList()
for item in iterable:
lst.add(item)
return lst
def jiterable(iterable: PyIterable) -> Iterable:
assert isinstance(iterable, PyIterable)
return _IterableAdapter(iterable)
@jpype.JConversion("java.lang.Iterable", instanceof=PyIterable, excludes=str)
def _java_iterable_convert(jcls, obj):
return jiterable(obj)
def jarray(type, rank: int = 1):
return jpype.JArray(type, rank)
def jiterator(iterator: PyIterator) -> Iterator:
assert isinstance(iterator, PyIterator)
return _IteratorAdapter(iterator)
def jmap(dictionary: Mapping) -> Map:
assert isinstance(dictionary, Mapping)
return Map@dictionary
def _java_obj_repr(java_object: Object) -> str:
return str(java_object.toString())
# replaces the default __repr__ implementation for java objects, making them use _java_obj_repr
JObjectClass.__repr__ = _java_obj_repr
@jpype.JImplementationFor("kotlin.sequences.Sequence")
class _KtSequence:
def __jclass_init__(self):
PyIterable.register(self)
def __iter__(self):
return protect_iterable(self).iterator()
def ksequence(iterable: PyIterable) -> Sequence:
return SequencesKt.asSequence(jiterable(iterable))
@jpype.JConversion("kotlin.sequences.Sequence", instanceof=PyIterable, excludes=str)
def _kt_sequence_convert(jcls, obj):
return ksequence(obj)
@jpype.JImplementationFor("java.util.stream.Stream")
class _JvmStream:
def __jclass_init__(self):
PyIterable.register(self)
def __iter__(self):
return self.iterator()
@jpype.JImplementationFor("java.lang.Comparable")
class _JvmComparable:
def __jclass_init__(self):
pass
def __lt__(self, other):
return self.compareTo(other) < 0
def __gt__(self, other):
return self.compareTo(other) > 0
def __le__(self, other):
return self.compareTo(other) <= 0
def __ge__(self, other):
return self.compareTo(other) >= 0
@jpype.JImplementationFor("java.lang.Throwable")
class _JvmThrowable:
def __jclass_init__(self):
pass
@property
def message(self):
return self.getMessage()
@property
def localized_message(self):
return self.getLocalizedMessage()
@property
def cause(self):
return self.getCause()
class _KtFunction(Callable):
def __init__(self, arity: int, function: Callable):
self._function = function
self._arity = arity
def invoke(self, *args):
assert len(args) == self._arity
return self._function(*args)
def __call__(self, *args):
return self.invoke(*args)
_kt_function_classes: MutableMapping[int, Any] = dict()
def kfunction(arity: int):
if arity not in _kt_function_classes:
@jpype.JImplements("kotlin.jvm.functions.Function" + str(arity), deferred=True)
class _KtFunctionN(_KtFunction):
def __init__(self, f):
super().__init__(arity, f)
@jpype.JOverride
def invoke(self, *args):
return super().invoke(*args)
_kt_function_classes[arity] = _KtFunctionN
return _kt_function_classes[arity]
logger.debug("Configure JVM-specific extensions")
| 3,246 | 364 | 816 |
b093d851bb61a63849ee99c0bd9b7eb617be0eb7 | 111 | py | Python | investpy/resources/__init__.py | mdarblade/investpy | 7ace4ac7693f505c199074de3333f56e6b89cfef | [
"MIT"
] | null | null | null | investpy/resources/__init__.py | mdarblade/investpy | 7ace4ac7693f505c199074de3333f56e6b89cfef | [
"MIT"
] | null | null | null | investpy/resources/__init__.py | mdarblade/investpy | 7ace4ac7693f505c199074de3333f56e6b89cfef | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018-2019 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
| 22.2 | 60 | 0.756757 | #!/usr/bin/env python
# Copyright 2018-2019 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
| 0 | 0 | 0 |
9cea7631436a08def5fea287f8493cd8f8305f3c | 1,817 | py | Python | spotdl/types/saved.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 3 | 2021-11-24T17:11:16.000Z | 2021-12-19T05:49:38.000Z | spotdl/types/saved.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 2 | 2021-11-19T20:49:17.000Z | 2021-11-19T20:49:26.000Z | spotdl/types/saved.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 1 | 2021-12-21T01:35:29.000Z | 2021-12-21T01:35:29.000Z | from dataclasses import dataclass
from typing import List
from spotdl.types.song import Song
from spotdl.utils.spotify import SpotifyClient
class SavedError(Exception):
"""
Base class for all exceptions related to saved tracks.
"""
@dataclass(frozen=True)
| 27.953846 | 75 | 0.619703 | from dataclasses import dataclass
from typing import List
from spotdl.types.song import Song
from spotdl.utils.spotify import SpotifyClient
class SavedError(Exception):
"""
Base class for all exceptions related to saved tracks.
"""
@dataclass(frozen=True)
class Saved:
tracks: List[Song]
@classmethod
def load(cls):
"""
Loads saved tracks from Spotify.
Will throw an exception if users is not logged in.
"""
urls = cls.get_urls()
# Remove songs without id
# and create Song objects
tracks = [Song.from_url(url) for url in urls]
return cls(tracks)
@staticmethod
def get_urls() -> List[str]:
"""
Returns a list of urls of all saved tracks.
"""
spotify_client = SpotifyClient()
if spotify_client.user_auth is False: # type: ignore
raise SavedError("You must be logged in to use this function.")
saved_tracks_response = spotify_client.current_user_saved_tracks()
if saved_tracks_response is None:
raise Exception("Couldn't get saved tracks")
saved_tracks = saved_tracks_response["items"]
# Fetch all saved tracks
while saved_tracks_response and saved_tracks_response["next"]:
response = spotify_client.next(saved_tracks_response)
# response is wrong, break
if response is None:
break
saved_tracks_response = response
saved_tracks.extend(saved_tracks_response["items"])
# Remove songs without id
# and return urls
return [
"https://open.spotify.com/track/" + track["track"]["id"]
for track in saved_tracks
if track and track.get("track", {}).get("id")
]
| 0 | 1,523 | 22 |
b832291fdac4819b20b9c725cc9297678bc16751 | 725 | py | Python | scripts/countries.py | rizel/timewarrior-southamerica-holidays | ba412e96b6ab72efef51bf786148476a31003e8c | [
"MIT"
] | null | null | null | scripts/countries.py | rizel/timewarrior-southamerica-holidays | ba412e96b6ab72efef51bf786148476a31003e8c | [
"MIT"
] | 1 | 2021-02-28T19:30:44.000Z | 2021-03-09T04:09:54.000Z | scripts/countries.py | rizel/timewarrior-southamerica-holidays | ba412e96b6ab72efef51bf786148476a31003e8c | [
"MIT"
] | null | null | null | #!/usr/bin/python3.6.0
# -*- coding: utf-8 -*-
COUNTRIES = {
"argentina" : ".com.ar",
"bolivia" : ".com.bo",
"brasil" : "http://www.public-holidays.us/BR_ES_{0}_Feriados%20nacionais",
"chile" : ".cl",
"colombia" : ".co",
"ecuador" : ".la/ecuador",
"guyana" : ".gy",
"paraguay" : ".com.py",
"peru" : ".pe",
"suriname" : ".la/suriname",
"trinidad-and-tobago" : ".la/trinidad-and-tobago",
"uruguay" : ".la/uruguay",
"venezuela" : ".com.ve",
"french-guiana" : ".la/french-guiana"
}
ENGLISH_CONTENTS = ["trinidad-and-tobago", "suriname", "french-guiana", "guyana"] | 34.52381 | 88 | 0.475862 | #!/usr/bin/python3.6.0
# -*- coding: utf-8 -*-
COUNTRIES = {
"argentina" : ".com.ar",
"bolivia" : ".com.bo",
"brasil" : "http://www.public-holidays.us/BR_ES_{0}_Feriados%20nacionais",
"chile" : ".cl",
"colombia" : ".co",
"ecuador" : ".la/ecuador",
"guyana" : ".gy",
"paraguay" : ".com.py",
"peru" : ".pe",
"suriname" : ".la/suriname",
"trinidad-and-tobago" : ".la/trinidad-and-tobago",
"uruguay" : ".la/uruguay",
"venezuela" : ".com.ve",
"french-guiana" : ".la/french-guiana"
}
ENGLISH_CONTENTS = ["trinidad-and-tobago", "suriname", "french-guiana", "guyana"] | 0 | 0 | 0 |
a5ed3b336bf9f20cb77799f38ee5d60cd1216026 | 7,239 | py | Python | src/config/cmssh_extension.py | dmwm/cmssh | 0cd6e104185938d21b10b053479e890c9f4f3b57 | [
"Apache-2.0"
] | 2 | 2016-07-26T18:36:03.000Z | 2017-05-09T08:34:41.000Z | src/config/cmssh_extension.py | dmwm/cmssh | 0cd6e104185938d21b10b053479e890c9f4f3b57 | [
"Apache-2.0"
] | 1 | 2015-01-30T16:00:13.000Z | 2015-01-31T21:59:29.000Z | src/config/cmssh_extension.py | dmwm/cmssh | 0cd6e104185938d21b10b053479e890c9f4f3b57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding: ISO-8859-1 -*-
#pylint: disable-msg=E1101,C0103,R0902
# system modules
import os
import sys
import stat
import time
import thread
import traceback
from types import GeneratorType
# ipython modules
import IPython
from IPython import release
# cmssh modules
import cmssh
from cmssh.iprint import PrintManager, print_error, print_warning, print_info
from cmssh.debug import DebugManager
from cmssh.cms_cmds import dbs_instance, Magic, cms_find, cms_du
from cmssh.cms_cmds import cms_ls, cms_cp, verbose, cmscrab
from cmssh.cms_cmds import cms_rm, cms_rmdir, cms_mkdir, cms_root, cms_xrdcp
from cmssh.cms_cmds import cms_install, cms_releases, cms_info, debug_http
from cmssh.cms_cmds import cmsrel, cmsrun, cms_help, cms_arch, cms_vomsinit
from cmssh.cms_cmds import cms_help_msg, results, cms_apt, cms_das, cms_das_json
from cmssh.cms_cmds import github_issues, demo, cms_json, cms_jobs, cmsenv
from cmssh.cms_cmds import cms_lumi, integration_tests, cms_read
from cmssh.cms_cmds import cms_config, cms_commands, cms_pager
def unregister():
"""Unregister shell"""
ID.prompt = "cms-sh"
ID.name = "cms-sh"
ID.dict[ID.name] = []
ID.funcList = []
def register(prompt, name, funcList=[]):
"""Register shell"""
set_prompt(prompt)
ID.prompt = prompt
ID.name = name
funcList.sort()
ID.dict[name] = funcList
if funcList:
print_info("Available commands within %s sub-shell:" % prompt)
if funcList:
if not funcList.count('_exit'):
funcList.append('_exit')
for func in funcList:
print_info("%s %s" % (" "*10, func))
if not ID.funcList.count(func):
ID.funcList.append(func)
else:
ID.funcList = funcList
def set_prompt(in1):
"""Define shell prompt"""
ip = get_ipython()
prompt = '%s|\#> ' % in1
ip.prompt_manager.width = len(prompt)-1
ip.prompt_manager.in_template = prompt
#
# load managers
#
try:
DEBUG = DebugManager()
ID = ShellName()
except:
traceback.print_exc()
# list of cms-sh magic functions
cmsMagicList = [ \
# generic commands, we use Magic class and its execute function
('cvs', Magic('cvs').execute),
('svn', Magic('svn').execute),
('ssh', Magic('ssh').subprocess),
('kinit', Magic('kinit').subprocess),
('klist', Magic('klist').execute),
('kdestroy', Magic('kdestroy').execute),
('git', Magic('git').execute),
('echo', Magic('echo').execute),
('grep', Magic('grep').execute),
('tail', Magic('tail').execute),
('tar', Magic('tar').execute),
('zip', Magic('zip').execute),
('chmod', Magic('chmod').execute),
('vim', Magic('vim').subprocess),
('python', Magic('python').execute),
('env', Magic('env').execute),
('pip', Magic('pip').subprocess),
# CMS commands
('cmsenv', cmsenv),
('scram', Magic('scramv1').execute),
('vomsinit', cms_vomsinit),
('vomsinfo', Magic('voms-proxy-info').execute),
# specific commands whose execution depends on conditions
('crab', cmscrab),
('read', cms_read),
('jobs', cms_jobs),
('config', cms_config),
('commands', cms_commands),
('das', cms_das),
('das_json', cms_das_json),
('apt', cms_apt),
('xrdcp', cms_xrdcp),
('root', cms_root),
('find', cms_find),
('du', cms_du),
('ls', cms_ls),
('info', cms_info),
('lumi', cms_lumi),
('cms_json', cms_json),
('rm', cms_rm),
('mkdir', cms_mkdir),
('rmdir', cms_rmdir),
('cp', cms_cp),
('verbose', verbose),
('debug_http', debug_http),
('install', cms_install),
('releases', cms_releases),
('dbs_instance', dbs_instance),
('cmsrel', cmsrel),
('cmsRun', cmsrun),
('cmsrun', cmsrun),
('cmshelp', cms_help),
('arch', cms_arch),
('tickets', github_issues),
('ticket', github_issues),
('demo', demo),
('test', integration_tests),
('pager', cms_pager),
]
if os.environ.get('CMSSH_EOS', 0):
eos = '/afs/cern.ch/project/eos/installation/cms/bin/eos.select'
cmsMagicList.append(('eos', Magic(eos).execute))
def check_0400(kfile):
"Check 0400 permission of given file"
mode = os.stat(kfile).st_mode
cond = bool(mode & stat.S_IRUSR) and not bool(mode & stat.S_IWUSR) \
and not bool(mode & stat.S_IXUSR) \
and not bool(mode & stat.S_IRWXO) \
and not bool(mode & stat.S_IRWXG)
return cond
def check_0600(kfile):
"Check 0600 permission of given file"
mode = os.stat(kfile).st_mode
cond = bool(mode & stat.S_IRUSR) and not bool(mode & stat.S_IXUSR) \
and not bool(mode & stat.S_IRWXO) \
and not bool(mode & stat.S_IRWXG)
return cond
def test_key_cert():
"""Test user key/cert file and their permissions"""
kfile = os.path.join(os.environ['HOME'], '.globus/userkey.pem')
cfile = os.path.join(os.environ['HOME'], '.globus/usercert.pem')
if os.path.isfile(kfile):
if not (check_0600(kfile) or check_0400(kfile)):
msg = "File %s has weak permission settings, try" % kfile
print_warning(msg)
print "chmod 0400 %s" % kfile
else:
print_error("File %s does not exists, grid/cp commands will not work" % kfile)
if os.path.isfile(cfile):
if not (check_0600(cfile) or check_0400(cfile)):
msg = "File %s has weak permission settings, try" % cfile
print_warning(msg)
print "chmod 0600 %s" % cfile
else:
msg = "File %s does not exists, grid/cp commands will not work" % cfile
print_error(msg)
#
# Main function
#
def main(ipython):
"""Define custom extentions"""
# global IP API
ip = ipython
# load cms modules and expose them to the shell
for m in cmsMagicList:
magic_name = 'magic_%s' % m[0]
if hasattr(ip, 'register_magic_function'): # ipython 0.13 and above
magic_kind = 'line'
func = m[1]
name = m[0]
ip.register_magic_function(func, magic_kind, name)
else: # ipython 0.12 and below
setattr(ip, magic_name, m[1])
# import required modules for the shell
ip.ex("import os")
ip.ex("from cmssh.cms_cmds import results, cms_vomsinit")
ip.ex("from cmssh.auth_utils import PEMMGR, read_pem")
ip.ex("read_pem()")
ip.ex("cms_vomsinit()")
ip.ex("os.environ['CMSSH_PAGER']='0'")
# Set cmssh prompt
prompt = 'cms-sh'
ip.prompt_manager.in_template = '%s|\#> ' % prompt
print cms_help_msg()
# check existance and permission of key/cert
test_key_cert()
def load_ipython_extension(ipython):
"""Load custom extensions"""
# The ``ipython`` argument is the currently active
# :class:`InteractiveShell` instance that can be used in any way.
# This allows you do to things like register new magics, plugins or
# aliases.
main(ipython)
| 31.473913 | 86 | 0.6226 | #!/usr/bin/env python
#-*- coding: ISO-8859-1 -*-
#pylint: disable-msg=E1101,C0103,R0902
# system modules
import os
import sys
import stat
import time
import thread
import traceback
from types import GeneratorType
# ipython modules
import IPython
from IPython import release
# cmssh modules
import cmssh
from cmssh.iprint import PrintManager, print_error, print_warning, print_info
from cmssh.debug import DebugManager
from cmssh.cms_cmds import dbs_instance, Magic, cms_find, cms_du
from cmssh.cms_cmds import cms_ls, cms_cp, verbose, cmscrab
from cmssh.cms_cmds import cms_rm, cms_rmdir, cms_mkdir, cms_root, cms_xrdcp
from cmssh.cms_cmds import cms_install, cms_releases, cms_info, debug_http
from cmssh.cms_cmds import cmsrel, cmsrun, cms_help, cms_arch, cms_vomsinit
from cmssh.cms_cmds import cms_help_msg, results, cms_apt, cms_das, cms_das_json
from cmssh.cms_cmds import github_issues, demo, cms_json, cms_jobs, cmsenv
from cmssh.cms_cmds import cms_lumi, integration_tests, cms_read
from cmssh.cms_cmds import cms_config, cms_commands, cms_pager
class ShellName(object):
def __init__(self):
"""Hold information about the shell"""
self.prompt = "cms-sh"
self.name = 'cmsHelp'
self.dict = {}
self.funcList = []
def unregister():
"""Unregister shell"""
ID.prompt = "cms-sh"
ID.name = "cms-sh"
ID.dict[ID.name] = []
ID.funcList = []
def register(prompt, name, funcList=[]):
"""Register shell"""
set_prompt(prompt)
ID.prompt = prompt
ID.name = name
funcList.sort()
ID.dict[name] = funcList
if funcList:
print_info("Available commands within %s sub-shell:" % prompt)
if funcList:
if not funcList.count('_exit'):
funcList.append('_exit')
for func in funcList:
print_info("%s %s" % (" "*10, func))
if not ID.funcList.count(func):
ID.funcList.append(func)
else:
ID.funcList = funcList
def set_prompt(in1):
"""Define shell prompt"""
ip = get_ipython()
prompt = '%s|\#> ' % in1
ip.prompt_manager.width = len(prompt)-1
ip.prompt_manager.in_template = prompt
#
# load managers
#
try:
DEBUG = DebugManager()
ID = ShellName()
except:
traceback.print_exc()
# list of cms-sh magic functions
cmsMagicList = [ \
# generic commands, we use Magic class and its execute function
('cvs', Magic('cvs').execute),
('svn', Magic('svn').execute),
('ssh', Magic('ssh').subprocess),
('kinit', Magic('kinit').subprocess),
('klist', Magic('klist').execute),
('kdestroy', Magic('kdestroy').execute),
('git', Magic('git').execute),
('echo', Magic('echo').execute),
('grep', Magic('grep').execute),
('tail', Magic('tail').execute),
('tar', Magic('tar').execute),
('zip', Magic('zip').execute),
('chmod', Magic('chmod').execute),
('vim', Magic('vim').subprocess),
('python', Magic('python').execute),
('env', Magic('env').execute),
('pip', Magic('pip').subprocess),
# CMS commands
('cmsenv', cmsenv),
('scram', Magic('scramv1').execute),
('vomsinit', cms_vomsinit),
('vomsinfo', Magic('voms-proxy-info').execute),
# specific commands whose execution depends on conditions
('crab', cmscrab),
('read', cms_read),
('jobs', cms_jobs),
('config', cms_config),
('commands', cms_commands),
('das', cms_das),
('das_json', cms_das_json),
('apt', cms_apt),
('xrdcp', cms_xrdcp),
('root', cms_root),
('find', cms_find),
('du', cms_du),
('ls', cms_ls),
('info', cms_info),
('lumi', cms_lumi),
('cms_json', cms_json),
('rm', cms_rm),
('mkdir', cms_mkdir),
('rmdir', cms_rmdir),
('cp', cms_cp),
('verbose', verbose),
('debug_http', debug_http),
('install', cms_install),
('releases', cms_releases),
('dbs_instance', dbs_instance),
('cmsrel', cmsrel),
('cmsRun', cmsrun),
('cmsrun', cmsrun),
('cmshelp', cms_help),
('arch', cms_arch),
('tickets', github_issues),
('ticket', github_issues),
('demo', demo),
('test', integration_tests),
('pager', cms_pager),
]
if os.environ.get('CMSSH_EOS', 0):
eos = '/afs/cern.ch/project/eos/installation/cms/bin/eos.select'
cmsMagicList.append(('eos', Magic(eos).execute))
def check_0400(kfile):
"Check 0400 permission of given file"
mode = os.stat(kfile).st_mode
cond = bool(mode & stat.S_IRUSR) and not bool(mode & stat.S_IWUSR) \
and not bool(mode & stat.S_IXUSR) \
and not bool(mode & stat.S_IRWXO) \
and not bool(mode & stat.S_IRWXG)
return cond
def check_0600(kfile):
"Check 0600 permission of given file"
mode = os.stat(kfile).st_mode
cond = bool(mode & stat.S_IRUSR) and not bool(mode & stat.S_IXUSR) \
and not bool(mode & stat.S_IRWXO) \
and not bool(mode & stat.S_IRWXG)
return cond
def test_key_cert():
"""Test user key/cert file and their permissions"""
kfile = os.path.join(os.environ['HOME'], '.globus/userkey.pem')
cfile = os.path.join(os.environ['HOME'], '.globus/usercert.pem')
if os.path.isfile(kfile):
if not (check_0600(kfile) or check_0400(kfile)):
msg = "File %s has weak permission settings, try" % kfile
print_warning(msg)
print "chmod 0400 %s" % kfile
else:
print_error("File %s does not exists, grid/cp commands will not work" % kfile)
if os.path.isfile(cfile):
if not (check_0600(cfile) or check_0400(cfile)):
msg = "File %s has weak permission settings, try" % cfile
print_warning(msg)
print "chmod 0600 %s" % cfile
else:
msg = "File %s does not exists, grid/cp commands will not work" % cfile
print_error(msg)
#
# Main function
#
def main(ipython):
"""Define custom extentions"""
# global IP API
ip = ipython
# load cms modules and expose them to the shell
for m in cmsMagicList:
magic_name = 'magic_%s' % m[0]
if hasattr(ip, 'register_magic_function'): # ipython 0.13 and above
magic_kind = 'line'
func = m[1]
name = m[0]
ip.register_magic_function(func, magic_kind, name)
else: # ipython 0.12 and below
setattr(ip, magic_name, m[1])
# import required modules for the shell
ip.ex("import os")
ip.ex("from cmssh.cms_cmds import results, cms_vomsinit")
ip.ex("from cmssh.auth_utils import PEMMGR, read_pem")
ip.ex("read_pem()")
ip.ex("cms_vomsinit()")
ip.ex("os.environ['CMSSH_PAGER']='0'")
# Set cmssh prompt
prompt = 'cms-sh'
ip.prompt_manager.in_template = '%s|\#> ' % prompt
print cms_help_msg()
# check existance and permission of key/cert
test_key_cert()
def load_ipython_extension(ipython):
"""Load custom extensions"""
# The ``ipython`` argument is the currently active
# :class:`InteractiveShell` instance that can be used in any way.
# This allows you do to things like register new magics, plugins or
# aliases.
main(ipython)
| 0 | 195 | 23 |
ed259b12f038032d8c8c3e7e6c607d1791e80efe | 8,191 | py | Python | python/paddle/fluid/tests/unittests/test_program_prune_backward.py | frankwhzhang/Paddle | 131b1dc3240e53ea295cc49323bb2a7e7dcc717f | [
"Apache-2.0"
] | 3 | 2019-07-17T09:30:31.000Z | 2021-12-27T03:16:55.000Z | python/paddle/fluid/tests/unittests/test_program_prune_backward.py | frankwhzhang/Paddle | 131b1dc3240e53ea295cc49323bb2a7e7dcc717f | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_program_prune_backward.py | frankwhzhang/Paddle | 131b1dc3240e53ea295cc49323bb2a7e7dcc717f | [
"Apache-2.0"
] | 4 | 2019-09-30T02:15:34.000Z | 2019-09-30T02:41:30.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import contextlib
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from simple_nets import init_data, simple_fc_net, fc_with_batchnorm
import seresnext_net
from test_parallel_executor_transformer import transformer, get_feed_data_reader
from fake_reader import fake_imdb_reader
if __name__ == '__main__':
unittest.main()
| 38.455399 | 85 | 0.626297 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import contextlib
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from simple_nets import init_data, simple_fc_net, fc_with_batchnorm
import seresnext_net
from test_parallel_executor_transformer import transformer, get_feed_data_reader
from fake_reader import fake_imdb_reader
def lstm_net(use_feed):
dict_dim = 5147
emb_dim = 128
hid_dim = 128
hid_dim2 = 96
class_dim = 2
emb_lr = 30.0
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(learning_rate=emb_lr))
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
def simple_fc_net_with_accuracy(use_feed):
img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = img
for _ in range(4):
hidden = fluid.layers.fc(
hidden,
size=200,
act='relu',
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.0)))
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
accuracy_out = fluid.layers.accuracy(input=prediction, label=label, k=5)
return loss
class TestProgramPruneBackward(unittest.TestCase):
def program_compare(self, program_a, program_b):
assert isinstance(
program_a, fluid.framework.
Program), "The first argument should be fluid.framework.Program."
assert isinstance(
program_b, fluid.framework.
Program), "The second argument should be fluid.framework Program."
self.assertEqual(len(program_a.blocks), len(program_b.blocks))
for idx in range(len(program_a.blocks)):
block_a = program_a.blocks[idx]
block_b = program_b.blocks[idx]
self.assertEqual(len(block_a.ops), len(block_b.ops))
self.assertEqual(len(block_a.vars), len(block_b.vars))
for op_idx in range(len(block_a.ops)):
self.assertEqual(block_a.ops[op_idx].type,
block_b.ops[op_idx].type)
for var_key in list(block_a.vars.keys()):
self.assertTrue(block_b.has_var(var_key))
def check_prune_correctness(self, method, feed_dict, optimizer):
loss = method(use_feed=False)
main_program = fluid.default_main_program()
test_prog_orig = main_program.clone(for_test=True)
optimizer().minimize(loss)
test_prog_prune = main_program.clone(for_test=True)
self.program_compare(test_prog_orig, test_prog_prune)
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
loss_data_prune, = exe.run(test_prog_prune,
feed=feed_dict,
fetch_list=[loss.name])
loss_data_orig, = exe.run(test_prog_orig,
feed=feed_dict,
fetch_list=[loss.name])
self.assertEqual(loss_data_orig, loss_data_prune)
def test_simple_fc_net(self):
def optimizer():
optimizer = fluid.optimizer.SGD(
learning_rate=0.001,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
with self.program_scope_guard():
img, label = init_data()
self.check_prune_correctness(
method=simple_fc_net,
feed_dict={"image": img,
"label": label},
optimizer=optimizer)
def test_simple_fc_net_with_accuracy(self):
def optimizer():
optimizer = fluid.optimizer.SGD(
learning_rate=0.001,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
with self.program_scope_guard():
img, label = init_data()
self.check_prune_correctness(
method=simple_fc_net_with_accuracy,
feed_dict={"image": img,
"label": label},
optimizer=optimizer)
def test_batchnorm_fc(self):
def optimizer():
optimizer = fluid.optimizer.SGD(
learning_rate=0.001,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
with self.program_scope_guard():
img, label = init_data()
self.check_prune_correctness(
method=fc_with_batchnorm,
feed_dict={"image": img,
"label": label},
optimizer=optimizer)
def test_seresnet(self):
with self.program_scope_guard():
self.check_prune_correctness(
method=seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda=False),
optimizer=seresnext_net.optimizer)
def test_transformer(self):
def optimizer():
optimizer = fluid.optimizer.Adam(
learning_rate=0.001,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
with self.program_scope_guard():
# the program argument is used to distinguish Program and CompiledProgram
feed_dict = get_feed_data_reader().get_next(
fluid.Executor(core.CPUPlace()), fluid.default_main_program())
self.check_prune_correctness(
method=transformer, feed_dict=feed_dict, optimizer=optimizer)
def test_lstm(self):
def optimizer():
optimizer = fluid.optimizer.Adagrad(
learning_rate=0.001,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
with self.program_scope_guard():
word_dict_size = 5147
reader = fake_imdb_reader(word_dict_size, 1)
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
feeder = fluid.DataFeeder(
feed_list=[data, label], place=core.CPUPlace())
feed_data = feeder.feed(reader())
self.check_prune_correctness(
method=lstm_net, feed_dict=feed_data, optimizer=optimizer)
@contextlib.contextmanager
def program_scope_guard(self):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
yield
if __name__ == '__main__':
unittest.main()
| 6,789 | 302 | 69 |
3fd42fb31955c495edab37fede8ea45acb1c582c | 910 | py | Python | docs/conf.py | comforx/spray | b880f41e6afeb69f9ad3b2257965f39411a53f03 | [
"Apache-2.0"
] | 1 | 2019-01-19T15:53:06.000Z | 2019-01-19T15:53:06.000Z | docs/conf.py | comforx/spray | b880f41e6afeb69f9ad3b2257965f39411a53f03 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | comforx/spray | b880f41e6afeb69f9ad3b2257965f39411a53f03 | [
"Apache-2.0"
] | null | null | null | import sys, os
# -- General configuration -----------------------------------------------------
extensions = ['sphinx.ext.todo']
source_suffix = '.rst'
source_encoding = 'utf-8'
master_doc = 'index'
project = u'spray'
copyright = u'2011-2012 spray.cc.'
version = '$VERSION$'
release = '$VERSION$'
exclude_patterns = []
# -- Options for HTML output ---------------------------------------------------
html_theme = 'sprayed'
html_theme_path = ["./_themes"]
html_title = u'spray'
html_logo = u'logo.png'
html_static_path = []
html_use_smartypants = True
html_add_permalinks = None
htmlhelp_basename = 'spraydoc'
todo_include_todos = True
html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
}
latex_documents = [
('index', 'spray.tex', u'spray Documentation', u'spray.cc', 'manual'),
]
| 26 | 80 | 0.586813 | import sys, os
# -- General configuration -----------------------------------------------------
extensions = ['sphinx.ext.todo']
source_suffix = '.rst'
source_encoding = 'utf-8'
master_doc = 'index'
project = u'spray'
copyright = u'2011-2012 spray.cc.'
version = '$VERSION$'
release = '$VERSION$'
exclude_patterns = []
# -- Options for HTML output ---------------------------------------------------
html_theme = 'sprayed'
html_theme_path = ["./_themes"]
html_title = u'spray'
html_logo = u'logo.png'
html_static_path = []
html_use_smartypants = True
html_add_permalinks = None
htmlhelp_basename = 'spraydoc'
todo_include_todos = True
html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
}
latex_documents = [
('index', 'spray.tex', u'spray Documentation', u'spray.cc', 'manual'),
]
| 0 | 0 | 0 |
f08fff9e914531c2eaf0d3773e4791d72fb39839 | 913 | py | Python | primes.py | amacuga/pands-problem-set | ac7461f17f7e5e5d5b6e43db675d9c16a2808d3e | [
"Apache-2.0"
] | null | null | null | primes.py | amacuga/pands-problem-set | ac7461f17f7e5e5d5b6e43db675d9c16a2808d3e | [
"Apache-2.0"
] | null | null | null | primes.py | amacuga/pands-problem-set | ac7461f17f7e5e5d5b6e43db675d9c16a2808d3e | [
"Apache-2.0"
] | null | null | null | # Alexandra Macuga, 2019-03-26
# Write a program that asks the user to input a positive integer and tells the user whether or not the number is a prime.
# Adapted from: https://web.microsoftstream.com/video/3ef695e3-9155-4487-b48e-0867834c76ad
# Ask the user for a value of i (positive integer)
i = int(input('Please enter a positive integer: '))
# For a number in a range from 2 to i (positive integer specified by user)
for n in range(2, i):
# Check if integer is divisible by a number from a range
if i % n == 0:
# If an integer is divisible by the number, print the specified message
print('That is not a prime')
# When the condition is true and the integer is divisible by at least one number, break the loop
break
# If the integer is not divisible by any number from a range
else:
# Loop fell through without finding a factor, print the specified message
print('That is a prime.') | 48.052632 | 121 | 0.732749 | # Alexandra Macuga, 2019-03-26
# Write a program that asks the user to input a positive integer and tells the user whether or not the number is a prime.
# Adapted from: https://web.microsoftstream.com/video/3ef695e3-9155-4487-b48e-0867834c76ad
# Ask the user for a value of i (positive integer)
i = int(input('Please enter a positive integer: '))
# For a number in a range from 2 to i (positive integer specified by user)
for n in range(2, i):
# Check if integer is divisible by a number from a range
if i % n == 0:
# If an integer is divisible by the number, print the specified message
print('That is not a prime')
# When the condition is true and the integer is divisible by at least one number, break the loop
break
# If the integer is not divisible by any number from a range
else:
# Loop fell through without finding a factor, print the specified message
print('That is a prime.') | 0 | 0 | 0 |
f69b0bb6253043a918c780725e986c1292cd16ae | 1,446 | py | Python | model/utils/similarity_scorer.py | KasparPeterson/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 86a9cebe4f24d2397d81c4c263d57d48d17ea76d | [
"MIT"
] | null | null | null | model/utils/similarity_scorer.py | KasparPeterson/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 86a9cebe4f24d2397d81c4c263d57d48d17ea76d | [
"MIT"
] | null | null | null | model/utils/similarity_scorer.py | KasparPeterson/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 86a9cebe4f24d2397d81c4c263d57d48d17ea76d | [
"MIT"
] | 1 | 2018-07-03T07:47:46.000Z | 2018-07-03T07:47:46.000Z | import nltk
import difflib
from nltk.translate.bleu_score import SmoothingFunction
smoothie = SmoothingFunction().method4
# The higher the better
if __name__ == '__main__':
hypothesis = 'It is a cat at the room'
reference = 'It is a cat inside the room'
print("Bleu:", get_bleu_score(hypothesis, reference))
print("Secquence:", get_sequence_matcher_score(hypothesis, reference))
print("Levenshtein:", get_levenshtein_score(hypothesis, reference))
| 30.765957 | 74 | 0.623098 | import nltk
import difflib
from nltk.translate.bleu_score import SmoothingFunction
smoothie = SmoothingFunction().method4
# The higher the better
def get_bleu_score(hypothesis, reference):
try:
return nltk.translate.bleu_score.sentence_bleu(
[reference.split()],
hypothesis.split(),
smoothing_function=smoothie)
except:
return 0
def get_sequence_matcher_score(hypothesis, reference):
return difflib.SequenceMatcher(None, hypothesis, reference).ratio()
def get_levenshtein_score(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, char2 in enumerate(s2):
newDistances = [index2 + 1]
for index1, char1 in enumerate(s1):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1],
distances[index1 + 1],
newDistances[-1])))
distances = newDistances
return distances[-1]
if __name__ == '__main__':
hypothesis = 'It is a cat at the room'
reference = 'It is a cat inside the room'
print("Bleu:", get_bleu_score(hypothesis, reference))
print("Secquence:", get_sequence_matcher_score(hypothesis, reference))
print("Levenshtein:", get_levenshtein_score(hypothesis, reference))
| 903 | 0 | 68 |
93e2d415765ad9ccffdebc7e31e8dc3e85bd50ab | 2,052 | py | Python | test/test_page.py | aspose-diagram-cloud/aspose-diagram-cloud-python | 58254fccb833fb7e3a0453407e21b55edb96b81c | [
"MIT"
] | 3 | 2019-12-10T08:42:21.000Z | 2022-02-04T19:14:02.000Z | test/test_page.py | aspose-diagram-cloud/aspose-diagram-cloud-python | 58254fccb833fb7e3a0453407e21b55edb96b81c | [
"MIT"
] | null | null | null | test/test_page.py | aspose-diagram-cloud/aspose-diagram-cloud-python | 58254fccb833fb7e3a0453407e21b55edb96b81c | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Aspose.Diagram Cloud API Reference
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
#from asposediagramcloud.apis.diagram_api import DiagramApi
#from asposediagramcloud.rest import ApiException
#import asposediagramcloud
import os
import sys
import unittest
import test_base
from asposediagramcloud.models import *
ABSPATH = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
sys.path.append(ABSPATH)
localtestFile = "testData/FileUpload.vdx"
storageTestFOLDER = "SDKTests\\Python"
fileName="pageTest.vsdx"
class TestPage(unittest.TestCase):
""" DiagramApi unit test stubs """
def test_create_new(self):
"""
Test case for create_new
Create Empty file into the specified format.
"""
folder = storageTestFOLDER
is_overwrite = "true"
result = self.api.create_new(fileName, folder=folder, is_overwrite=is_overwrite)
self.assertIsNotNone(result.created, 'Error has occurred while create file')
pass
if __name__ == '__main__':
unittest.main()
| 26.307692 | 105 | 0.698343 | # coding: utf-8
"""
Aspose.Diagram Cloud API Reference
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
#from asposediagramcloud.apis.diagram_api import DiagramApi
#from asposediagramcloud.rest import ApiException
#import asposediagramcloud
import os
import sys
import unittest
import test_base
from asposediagramcloud.models import *
ABSPATH = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
sys.path.append(ABSPATH)
localtestFile = "testData/FileUpload.vdx"
storageTestFOLDER = "SDKTests\\Python"
fileName="pageTest.vsdx"
class TestPage(unittest.TestCase):
""" DiagramApi unit test stubs """
def setUp(self):
self.api = test_base.GetDiagramApi()
self.storageApi = test_base.GetStorageApi()
def tearDown(self):
pass
def test_create_new(self):
"""
Test case for create_new
Create Empty file into the specified format.
"""
folder = storageTestFOLDER
is_overwrite = "true"
result = self.api.create_new(fileName, folder=folder, is_overwrite=is_overwrite)
self.assertIsNotNone(result.created, 'Error has occurred while create file')
pass
def test_put_new(self):
result=self.api.put_new_page(fileName, "newPage", folder = storageTestFOLDER)
self.assertTrue(result.is_success)
pass
def test_post_page_setup(self):
setting=PageSetting()
setting.page_width=2
setting.page_height=2
result=self.api.post_page_setup(fileName, "Page-0", setting,folder = storageTestFOLDER)
self.assertTrue(result.is_success)
pass
def test_get_pages(self):
result=self.api.get_pages(fileName, folder = storageTestFOLDER)
self.assertTrue(len(result.model)>0)
pass
if __name__ == '__main__':
unittest.main()
| 632 | 0 | 135 |
db0f1d7eb5314ad8755c331f7baacae6965c2ce5 | 775 | py | Python | basics/word_vector_data_frame.py | eshanMewantha/natural-language-processing | 0071d96106d43e2c263d179cc78ba82e3450fda4 | [
"MIT"
] | 1 | 2019-07-06T05:17:08.000Z | 2019-07-06T05:17:08.000Z | basics/word_vector_data_frame.py | eshanMewantha/natural-language-processing | 0071d96106d43e2c263d179cc78ba82e3450fda4 | [
"MIT"
] | null | null | null | basics/word_vector_data_frame.py | eshanMewantha/natural-language-processing | 0071d96106d43e2c263d179cc78ba82e3450fda4 | [
"MIT"
] | null | null | null | # Simple python implementation of creating a pandas data frame with word vectors
import pandas as pd
from collections import Counter
sayings = [
"Rose is a rose is a rose is a rose.",
"We are going to need a bigger boat.",
"Huston, we have a problem"
]
unique_words = set()
for saying in sayings:
unique_words |= set(saying.split())
all_rows = {}
row_number = 0
for saying in sayings:
word_vector = {}
frequencies = Counter(saying.split())
for word in unique_words:
if word in frequencies.keys():
word_vector[word] = frequencies[word]
else:
word_vector[word] = 0
all_rows[row_number] = word_vector
row_number += 1
data_frame = pd.DataFrame.from_dict(all_rows, orient='index')
print(data_frame)
| 25 | 80 | 0.676129 | # Simple python implementation of creating a pandas data frame with word vectors
import pandas as pd
from collections import Counter
sayings = [
"Rose is a rose is a rose is a rose.",
"We are going to need a bigger boat.",
"Huston, we have a problem"
]
unique_words = set()
for saying in sayings:
unique_words |= set(saying.split())
all_rows = {}
row_number = 0
for saying in sayings:
word_vector = {}
frequencies = Counter(saying.split())
for word in unique_words:
if word in frequencies.keys():
word_vector[word] = frequencies[word]
else:
word_vector[word] = 0
all_rows[row_number] = word_vector
row_number += 1
data_frame = pd.DataFrame.from_dict(all_rows, orient='index')
print(data_frame)
| 0 | 0 | 0 |
a27b43c5afda4a8f00affd838fa4143b5b41b88e | 328 | py | Python | mozillians/announcements/templatetags/helpers.py | divyamoncy/mozillians | d53d1d05d1f05b74f8533541e37083dcb89b29a8 | [
"BSD-3-Clause"
] | 202 | 2015-01-14T10:19:55.000Z | 2021-12-11T06:04:16.000Z | mozillians/announcements/templatetags/helpers.py | divyamoncy/mozillians | d53d1d05d1f05b74f8533541e37083dcb89b29a8 | [
"BSD-3-Clause"
] | 2,924 | 2015-01-07T11:27:32.000Z | 2021-01-19T14:05:17.000Z | mozillians/announcements/templatetags/helpers.py | divyamoncy/mozillians | d53d1d05d1f05b74f8533541e37083dcb89b29a8 | [
"BSD-3-Clause"
] | 270 | 2015-01-02T18:31:01.000Z | 2021-02-17T20:57:44.000Z | from django_jinja import library
from mozillians.announcements.models import Announcement
@library.global_function
def latest_announcement():
"""Return the latest published announcement or None."""
if Announcement.objects.published().count():
return Announcement.objects.published().latest()
return None
| 25.230769 | 59 | 0.765244 | from django_jinja import library
from mozillians.announcements.models import Announcement
@library.global_function
def latest_announcement():
"""Return the latest published announcement or None."""
if Announcement.objects.published().count():
return Announcement.objects.published().latest()
return None
| 0 | 0 | 0 |
4afd7e95b95b47b26f79e2fae7cc7645e5b5784f | 1,099 | py | Python | test/unit/optimizer/test_timer.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 358 | 2020-06-11T09:34:53.000Z | 2022-03-31T12:56:22.000Z | test/unit/optimizer/test_timer.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 467 | 2020-06-11T13:49:45.000Z | 2022-03-31T14:19:48.000Z | test/unit/optimizer/test_timer.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 48 | 2020-07-13T14:50:45.000Z | 2022-03-26T09:37:13.000Z | import datetime
import time
from fedot.core.optimisers.timer import OptimisationTimer
from fedot.core.pipelines.tuning.timer import TunerTimer
| 29.702703 | 78 | 0.66606 | import datetime
import time
from fedot.core.optimisers.timer import OptimisationTimer
from fedot.core.pipelines.tuning.timer import TunerTimer
def test_composition_timer():
generation_num = 100
reached = False
start = datetime.datetime.now()
with OptimisationTimer(timeout=datetime.timedelta(minutes=0.01)) as timer:
for generation in range(generation_num):
time.sleep(1)
if timer.is_time_limit_reached(generation_num=generation):
reached = True
break
spent_time = (datetime.datetime.now() - start).seconds
assert reached and spent_time == 1
def test_tuner_timer():
iter_number = 100
time_limit = datetime.timedelta(minutes=0.01)
start = datetime.datetime.now()
reached = False
with TunerTimer(timeout=time_limit) as timer:
for _ in range(iter_number):
time.sleep(1)
if timer.is_time_limit_reached():
reached = True
break
spent_time = (datetime.datetime.now() - start).seconds
assert reached and spent_time == 1
| 907 | 0 | 46 |
6e82d4864563c11b0cd7a5dd970294a242f9c8ab | 2,635 | py | Python | smok/extras/event_database/base.py | smok-serwis/smok-client | a97b3dac454569f55a8a28a1cac44ae04e3e9cde | [
"MIT"
] | null | null | null | smok/extras/event_database/base.py | smok-serwis/smok-client | a97b3dac454569f55a8a28a1cac44ae04e3e9cde | [
"MIT"
] | 1 | 2021-02-03T14:58:35.000Z | 2021-02-13T17:25:30.000Z | smok/extras/event_database/base.py | smok-serwis/smok-client | a97b3dac454569f55a8a28a1cac44ae04e3e9cde | [
"MIT"
] | null | null | null | import typing as tp
from abc import ABCMeta, abstractmethod
from smok.predicate.event import Event
| 25.833333 | 86 | 0.618216 | import typing as tp
from abc import ABCMeta, abstractmethod
from smok.predicate.event import Event
class BaseEventSynchronization(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def get_events(self) -> tp.List[Event]:
"""
:return: a list of events to synchronize
"""
@abstractmethod
def acknowledge(self, *uuids: str) -> None:
"""
Called by the communicator, when sync succeeds
:param uuids: UUIDs assigned to events"""
def negative_acknowledge(self) -> None:
"""Called by the communicator, when sync fails"""
class BaseEventDatabase(metaclass=ABCMeta):
def checkpoint(self) -> None:
"""
Called by the communicator thread, once every about 60 seconds.
May be called much more often, it's the function responsibility to throttle.
"""
@abstractmethod
def get_open_events(self) -> tp.Iterator[Event]:
"""
:return: an iterator with all open events
"""
@abstractmethod
def get_all_events(self) -> tp.Iterator[Event]:
"""
:return: all events kept in the database
"""
@abstractmethod
def close_event(self, event: Event) -> None:
"""
Close provided event
:param event: event to close
"""
@abstractmethod
def add_event(self, event: Event) -> None:
"""
Register a new event in the database.
Can be called by any thread.
:param event: event to register
"""
@abstractmethod
def get_events_to_sync(self) -> tp.Optional[BaseEventSynchronization]:
"""
At most a single instance of BaseEventSynchronization will be alive at a time.
:return: object to sync, or None if there's nothing to sync.
"""
@abstractmethod
def set_cache(self, predicate_id: str, cache) -> None:
"""
Store predicate's internal data. Do it in a way that will survive restarts.
"""
@abstractmethod
def get_cache(self, predicate_id: str) -> tp.Any:
"""
Return predicate's internal data
:raises KeyError: predicate internal data not found
"""
@abstractmethod
def on_predicate_deleted(self, predicate_id: str) -> None:
"""
Called when a predicate is deleted.
Called by communicator thread.
:param predicate_id: ID of the predicate that was deleted
"""
@abstractmethod
def clear_closed_and_synced_events(self) -> None:
"""
Clear all events that were both closed and are already on the server
"""
| 0 | 2,487 | 46 |
e834ca743ecf2afb241c4a48ffc0d0700d49053c | 811 | py | Python | script/CompanyX-Problem.py | Ingenjoy/Linear-Programming-With-Python | 320a8956baa369dd83f5963230aafadcddded3b4 | [
"MIT"
] | 1 | 2022-03-19T16:19:53.000Z | 2022-03-19T16:19:53.000Z | script/CompanyX-Problem.py | Ingenjoy/Linear-Programming-With-Python | 320a8956baa369dd83f5963230aafadcddded3b4 | [
"MIT"
] | null | null | null | script/CompanyX-Problem.py | Ingenjoy/Linear-Programming-With-Python | 320a8956baa369dd83f5963230aafadcddded3b4 | [
"MIT"
] | null | null | null | from scipy.optimize import linprog
import numpy as np
# Objective function
z = np.array([300,500,200])
expense = 75000
# Constraints
C = np.array([
[ 10, 7.5, 4], #C1
[ 0, 10, 0], #C2
[0.5, 0.4, 0.5], #C3
[ 0, 0.4, 0], #C4
[0.5, 0.1, 0.5], #C5
[0.4, 0.2, 0.4], #C6
[ 1, 1.5, 0.5], #C7
[ 1, 0, 0], #C8
[ 0, 1, 0], #C9
[ 0, 0, 1] #C10
])
b = np.array([4350, 2500, 280, 140, 280, 140, 700, 300, 180, 400])
# Bounds
x1 = (0, None)
x2 = (0, None)
x3 = (0, None)
#Solution
sol = linprog(-z, A_ub = C, b_ub = b, bounds = (x1, x2, x3), method='simplex')
#Profit Monthly.
profit = (sol.fun*-1) - expense
print(f"x1 = {sol.x[0]}, x2 = {sol.x[1]}, x3 = {sol.x[2]}, z = {profit}") | 23.171429 | 78 | 0.448829 | from scipy.optimize import linprog
import numpy as np
# Objective function
z = np.array([300,500,200])
expense = 75000
# Constraints
C = np.array([
[ 10, 7.5, 4], #C1
[ 0, 10, 0], #C2
[0.5, 0.4, 0.5], #C3
[ 0, 0.4, 0], #C4
[0.5, 0.1, 0.5], #C5
[0.4, 0.2, 0.4], #C6
[ 1, 1.5, 0.5], #C7
[ 1, 0, 0], #C8
[ 0, 1, 0], #C9
[ 0, 0, 1] #C10
])
b = np.array([4350, 2500, 280, 140, 280, 140, 700, 300, 180, 400])
# Bounds
x1 = (0, None)
x2 = (0, None)
x3 = (0, None)
#Solution
sol = linprog(-z, A_ub = C, b_ub = b, bounds = (x1, x2, x3), method='simplex')
#Profit Monthly.
profit = (sol.fun*-1) - expense
print(f"x1 = {sol.x[0]}, x2 = {sol.x[1]}, x3 = {sol.x[2]}, z = {profit}") | 0 | 0 | 0 |
4326f7a82279646c6831b022a3b6cce31baade64 | 4,638 | py | Python | src/real_estate_scrapers/concrete_items/__init__.py | tuw-eeg/real-estate-scrapers | d86e304119f7abc5a9702044fcc08a2387c7e5ac | [
"MIT"
] | null | null | null | src/real_estate_scrapers/concrete_items/__init__.py | tuw-eeg/real-estate-scrapers | d86e304119f7abc5a9702044fcc08a2387c7e5ac | [
"MIT"
] | null | null | null | src/real_estate_scrapers/concrete_items/__init__.py | tuw-eeg/real-estate-scrapers | d86e304119f7abc5a9702044fcc08a2387c7e5ac | [
"MIT"
] | null | null | null | """
Exposing concrete items dynamically.
Makes it possible to add support for a new website just by
creating a new Python module under this package, and declaring
a concrete implementation for ``RealEstateHomePage``, ``RealEstateListPage`` and ``RealEstatePage``.
"""
import importlib.util
import inspect
import pkgutil
from pathlib import Path
from typing import Dict, List, Tuple, Type, TypeVar
from loguru import logger
from web_poet import WebPage # type: ignore
from real_estate_scrapers.items import RealEstateHomePage, RealEstateListPage, RealEstatePage
T = TypeVar("T", bound=WebPage)
def _get_concrete_class(class_tuples: List[Tuple[str, Type[T]]], abstract_class: Type[T]) -> Type[T]:
"""
Returns the concrete implementation of the specified ``abstract_class``, choosing from ``class_tuples``.
``class_tuples`` can be easily obtained by invoking:
>>> inspect.getmembers(module, inspect.isclass)
Args:
class_tuples: List of tuples of the form (module_name, class_name)
abstract_class: The abstract class whose concrete implementation is to be found.
Returns: The concrete implementation of the specified ``abstract_class``. Always the first match gets returned.
Raises: ``ValueError`` if no concrete implementation is found.
"""
for _, cls in class_tuples:
if issubclass(cls, abstract_class) and cls is not abstract_class:
return cls
raise ValueError(f"No concrete implementation found for {abstract_class.__name__}")
# Used to have a grouping of URLs per page, so that request types can be specified dynamically (e.g. Selenium or plain)
_start_url_dict: Dict[Type[RealEstateHomePage], List[str]] = {}
# Will be assigned to the ``SCRAPY_POET_OVERRIDES`` class variable in the ``RealEstateSpider``
_scrapy_poet_overrides: Dict[str, Dict[Type[WebPage], Type[WebPage]]] = {}
# Loading concrete implementations from the file system automagically
_dirpath = Path(__file__).parent
# Iterates over each module in this package
# and registers the concrete crawling logic implementations
for module_info in pkgutil.iter_modules([str(_dirpath)]):
# Load module which declares concrete implementation
# for ``RealEstateListPage`` and ``RealEstatePage``
full_module_name = f"{__package__}.{module_info.name}"
full_module_path = _dirpath / f"{module_info.name}.py"
spec = importlib.util.spec_from_file_location(full_module_name, str(full_module_path))
module = importlib.util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(module) # type: ignore
# Extract classes
classes = inspect.getmembers(module, inspect.isclass)
home_page_class: Type[RealEstateHomePage] = _get_concrete_class(classes, RealEstateHomePage)
if not home_page_class.should_scrape():
logger.debug(f"Skipping registration of {home_page_class.domain()}, as ``should_scrape`` returned False.")
continue
list_page_class: Type[RealEstateListPage] = _get_concrete_class(classes, RealEstateListPage)
page_class: Type[RealEstatePage] = _get_concrete_class(classes, RealEstatePage)
domain_specific_overrides = {
RealEstateHomePage: home_page_class,
RealEstateListPage: list_page_class,
RealEstatePage: page_class,
}
# Sets the override dict in ``SCRAPY_OVERRIDES`` so that ``scrapy_poet.InjectionMiddleware`` can inject the proper
# concrete implementation for each page type on a per-domain basis
domain = home_page_class.domain()
_scrapy_poet_overrides[domain] = domain_specific_overrides
logger.debug(f"Registered overrides for {domain}: {domain_specific_overrides}")
# Register the static (hard-coded) start urls for this domain,
# to be used as entrypoint(s) to scrape urls to ``RealEstateListPage``s
_start_url_dict[home_page_class] = home_page_class.start_urls()
logger.info(f"Loaded {full_module_name} for {domain}")
def get_scrapy_poet_overrides() -> Dict[str, Dict[Type[WebPage], Type[WebPage]]]:
"""
Returns: Configuration to override the exact ``RealEstateListPage``
and ``RealEstatePage`` implementation dynamically
based on the scraped domain.
"""
return _scrapy_poet_overrides
def get_start_urls() -> List[str]:
"""
Returns: The start urls for the scrapy crawler.
"""
return [url for url_list in _start_url_dict.values() for url in url_list]
def get_start_url_dict() -> Dict[Type[RealEstateHomePage], List[str]]:
"""
Returns: The start urls for the scrapy crawler, grouped by subclasses of ``RealEstateListPage``.
"""
return _start_url_dict
| 42.163636 | 119 | 0.744071 | """
Exposing concrete items dynamically.
Makes it possible to add support for a new website just by
creating a new Python module under this package, and declaring
a concrete implementation for ``RealEstateHomePage``, ``RealEstateListPage`` and ``RealEstatePage``.
"""
import importlib.util
import inspect
import pkgutil
from pathlib import Path
from typing import Dict, List, Tuple, Type, TypeVar
from loguru import logger
from web_poet import WebPage # type: ignore
from real_estate_scrapers.items import RealEstateHomePage, RealEstateListPage, RealEstatePage
T = TypeVar("T", bound=WebPage)
def _get_concrete_class(class_tuples: List[Tuple[str, Type[T]]], abstract_class: Type[T]) -> Type[T]:
"""
Returns the concrete implementation of the specified ``abstract_class``, choosing from ``class_tuples``.
``class_tuples`` can be easily obtained by invoking:
>>> inspect.getmembers(module, inspect.isclass)
Args:
class_tuples: List of tuples of the form (module_name, class_name)
abstract_class: The abstract class whose concrete implementation is to be found.
Returns: The concrete implementation of the specified ``abstract_class``. Always the first match gets returned.
Raises: ``ValueError`` if no concrete implementation is found.
"""
for _, cls in class_tuples:
if issubclass(cls, abstract_class) and cls is not abstract_class:
return cls
raise ValueError(f"No concrete implementation found for {abstract_class.__name__}")
# Used to have a grouping of URLs per page, so that request types can be specified dynamically (e.g. Selenium or plain)
_start_url_dict: Dict[Type[RealEstateHomePage], List[str]] = {}
# Will be assigned to the ``SCRAPY_POET_OVERRIDES`` class variable in the ``RealEstateSpider``
_scrapy_poet_overrides: Dict[str, Dict[Type[WebPage], Type[WebPage]]] = {}
# Loading concrete implementations from the file system automagically
_dirpath = Path(__file__).parent
# Iterates over each module in this package
# and registers the concrete crawling logic implementations
for module_info in pkgutil.iter_modules([str(_dirpath)]):
# Load module which declares concrete implementation
# for ``RealEstateListPage`` and ``RealEstatePage``
full_module_name = f"{__package__}.{module_info.name}"
full_module_path = _dirpath / f"{module_info.name}.py"
spec = importlib.util.spec_from_file_location(full_module_name, str(full_module_path))
module = importlib.util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(module) # type: ignore
# Extract classes
classes = inspect.getmembers(module, inspect.isclass)
home_page_class: Type[RealEstateHomePage] = _get_concrete_class(classes, RealEstateHomePage)
if not home_page_class.should_scrape():
logger.debug(f"Skipping registration of {home_page_class.domain()}, as ``should_scrape`` returned False.")
continue
list_page_class: Type[RealEstateListPage] = _get_concrete_class(classes, RealEstateListPage)
page_class: Type[RealEstatePage] = _get_concrete_class(classes, RealEstatePage)
domain_specific_overrides = {
RealEstateHomePage: home_page_class,
RealEstateListPage: list_page_class,
RealEstatePage: page_class,
}
# Sets the override dict in ``SCRAPY_OVERRIDES`` so that ``scrapy_poet.InjectionMiddleware`` can inject the proper
# concrete implementation for each page type on a per-domain basis
domain = home_page_class.domain()
_scrapy_poet_overrides[domain] = domain_specific_overrides
logger.debug(f"Registered overrides for {domain}: {domain_specific_overrides}")
# Register the static (hard-coded) start urls for this domain,
# to be used as entrypoint(s) to scrape urls to ``RealEstateListPage``s
_start_url_dict[home_page_class] = home_page_class.start_urls()
logger.info(f"Loaded {full_module_name} for {domain}")
def get_scrapy_poet_overrides() -> Dict[str, Dict[Type[WebPage], Type[WebPage]]]:
"""
Returns: Configuration to override the exact ``RealEstateListPage``
and ``RealEstatePage`` implementation dynamically
based on the scraped domain.
"""
return _scrapy_poet_overrides
def get_start_urls() -> List[str]:
"""
Returns: The start urls for the scrapy crawler.
"""
return [url for url_list in _start_url_dict.values() for url in url_list]
def get_start_url_dict() -> Dict[Type[RealEstateHomePage], List[str]]:
"""
Returns: The start urls for the scrapy crawler, grouped by subclasses of ``RealEstateListPage``.
"""
return _start_url_dict
| 0 | 0 | 0 |
1fb6382f646275852ac011441c74f5fb2ad358f9 | 643 | py | Python | alembic/versions/11b80498abeb_add_foreign_key.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | alembic/versions/11b80498abeb_add_foreign_key.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | alembic/versions/11b80498abeb_add_foreign_key.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | """add foreign key
Revision ID: 11b80498abeb
Revises: bce514e0541f
Create Date: 2021-11-08 18:26:51.860396
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '11b80498abeb'
down_revision = 'bce514e0541f'
branch_labels = None
depends_on = None
| 20.09375 | 77 | 0.679627 | """add foreign key
Revision ID: 11b80498abeb
Revises: bce514e0541f
Create Date: 2021-11-08 18:26:51.860396
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '11b80498abeb'
down_revision = 'bce514e0541f'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('posts', sa.Column('owner_id', sa.Integer, nullable=False))
op.create_foreign_key(
'post_users_fk',
'posts', 'users',
['owner_id'], ['id'],
ondelete='CASCADE'
)
def downgrade():
op.drop_constraint('post_users_fk', 'posts')
op.drop_column('posts', 'owner_id')
| 296 | 0 | 46 |
6be9c0d4bcb7421cc79e552d36238a9c6a75fcb0 | 13,128 | py | Python | bak_to_fossil_3.py | wmelvin/bak-to-git | 1ebea7d4b3c14cabc5981dc8d87fe920f30e0b56 | [
"MIT"
] | null | null | null | bak_to_fossil_3.py | wmelvin/bak-to-git | 1ebea7d4b3c14cabc5981dc8d87fe920f30e0b56 | [
"MIT"
] | null | null | null | bak_to_fossil_3.py | wmelvin/bak-to-git | 1ebea7d4b3c14cabc5981dc8d87fe920f30e0b56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# ---------------------------------------------------------------------
# bak_to_fossil_3.py
#
# Step 3 (alternate): Read data from a CSV file that was edited in
# step 2, where commit messages were added and files to be skipped
# were flagged. Run fossil (instead of git) to commit each change
# with the specified date and time.
#
# This script is only for the initial creation and population of a new
# (empty) Fossil repository.
#
# The Fossil repository file is created (fossil init) by this script.
# It must not already exist.
#
# The directory for the repository will be created by this script if
# it does not exist.
#
# ---------------------------------------------------------------------
import argparse
import csv
import os
import subprocess
import sys
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from textwrap import dedent
from typing import List
from bak_to_common import (
ask_to_continue,
datetime_fromisoformat,
log_fmt,
plain_quotes,
split_quoted,
strip_outer_quotes,
)
AppOptions = namedtuple(
"AppOptions",
"input_csv, repo_dir, repo_name, init_date, log_dir, fossil_exe, "
+ "filter_file",
)
CommitProps = namedtuple(
"CommitProps",
"sort_key, full_name, datetime_tag, base_name, "
+ "commit_message, add_command",
)
run_dt = datetime.now()
log_path = Path.cwd() / f"log-bak_to_fossil_3-{run_dt:%Y%m%d_%H%M%S}.txt"
filter_list = []
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 29.108647 | 79 | 0.555683 | #!/usr/bin/env python3
# ---------------------------------------------------------------------
# bak_to_fossil_3.py
#
# Step 3 (alternate): Read data from a CSV file that was edited in
# step 2, where commit messages were added and files to be skipped
# were flagged. Run fossil (instead of git) to commit each change
# with the specified date and time.
#
# This script is only for the initial creation and population of a new
# (empty) Fossil repository.
#
# The Fossil repository file is created (fossil init) by this script.
# It must not already exist.
#
# The directory for the repository will be created by this script if
# it does not exist.
#
# ---------------------------------------------------------------------
import argparse
import csv
import os
import subprocess
import sys
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from textwrap import dedent
from typing import List
from bak_to_common import (
ask_to_continue,
datetime_fromisoformat,
log_fmt,
plain_quotes,
split_quoted,
strip_outer_quotes,
)
AppOptions = namedtuple(
"AppOptions",
"input_csv, repo_dir, repo_name, init_date, log_dir, fossil_exe, "
+ "filter_file",
)
CommitProps = namedtuple(
"CommitProps",
"sort_key, full_name, datetime_tag, base_name, "
+ "commit_message, add_command",
)
run_dt = datetime.now()
log_path = Path.cwd() / f"log-bak_to_fossil_3-{run_dt:%Y%m%d_%H%M%S}.txt"
filter_list = []
def write_log(msg):
print(msg)
with open(log_path, "a") as log_file:
log_file.write(f"{msg}\n")
def get_date_string(dt_tag):
#
# Tag format: yyyymmdd_hhmmss
# index: 012345678901234
#
iso_fmt = "{0}-{1}-{2}T{3}:{4}:{5}".format(
dt_tag[:4],
dt_tag[4:6],
dt_tag[6:8],
dt_tag[9:11],
dt_tag[11:13],
dt_tag[13:],
)
# Convert to datetime and back to string as a validity check.
commit_dt = datetime_fromisoformat(iso_fmt)
return commit_dt.strftime("%Y-%m-%dT%H:%M:%S")
def copy_filtered_content(src_name, dst_name):
with open(src_name, "r") as src_file:
with open(dst_name, "w") as dst_file:
for num, line in enumerate(src_file.readlines(), start=1):
for filter_item in filter_list:
if filter_item[0] in line:
write_log(f"FILTER {src_name} ({num}): {filter_item}")
line = line.replace(filter_item[0], filter_item[1])
dst_file.write(line)
def run_fossil(cmds, run_dir):
result = subprocess.run(
cmds,
cwd=run_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
write_log(f"STDOUT: {result.stdout.strip()}")
assert result.returncode == 0
def fossil_create_repo(opts: AppOptions, do_run: bool):
d = Path(opts.repo_dir)
p = d.joinpath(opts.repo_name)
# Only proceed if the Fossil repository does not already exist.
if p.exists():
sys.stderr.write("Fossil repository already exists: {0}\n".format(p))
sys.exit(1)
if not d.exists():
write_log(f"mkdir {d}")
d.mkdir()
cmds = [
opts.fossil_exe,
"init",
opts.repo_name,
"--date-override",
opts.init_date,
]
write_log(f"RUN: {log_fmt(cmds)}")
if do_run:
run_fossil(cmds, opts.repo_dir)
def fossil_open_repo(opts: AppOptions, do_run: bool):
cmds = [opts.fossil_exe, "open", opts.repo_name]
write_log(f"RUN: {log_fmt(cmds)}")
if do_run:
run_fossil(cmds, opts.repo_dir)
def load_filter_list(filter_file):
if filter_file is None:
return
with open(filter_file) as f:
lines = f.readlines()
for line in lines:
s = line.strip()
if 0 < len(s) and not s.startswith("#"):
a = s.split(",")
assert 2 == len(a)
filter_item = (strip_outer_quotes(a[0]), strip_outer_quotes(a[1]))
filter_list.append(filter_item)
def get_opts(argv) -> AppOptions:
ap = argparse.ArgumentParser(
description="BakToGit Step 3 (alternate): Use fossil instead of "
+ "git..."
)
# TODO: Fill in description.
ap.add_argument(
"input_csv",
action="store",
help="Path to CSV file, manually edited in step 2 to add commit "
+ "messages.",
)
ap.add_argument(
"repo_dir",
action="store",
help="Path to repository directory. This should be a new (empty) "
+ "repository, or one where the first commit from the wipbak files "
+ "is an appropriate next commit.",
)
ap.add_argument(
"--repo-name",
dest="repo_name",
action="store",
help="Name of the fossil repository (usually has a .fossil "
+ "extension).",
)
ap.add_argument(
"--init-date",
dest="init_date",
action="store",
help="Date and time to use for fossil repository initialization. "
+ "This should be at, or before, the time of the first source (.bak) "
+ "file to commit. Use the ISO 8601 format for date and time "
+ "(yyyy-mm-ddThh:mm:ss). Example: 2021-07-14T16:20:01",
)
ap.add_argument(
"--log-dir",
dest="log_dir",
action="store",
help="Output directory for log files.",
)
ap.add_argument(
"--fossil-exe",
dest="fossil_exe",
action="store",
help="Path to the Fossil executable file.",
)
ap.add_argument(
"--filter-file",
dest="filter_file",
action="store",
help="Path to text file with list of string replacements in "
+ 'comma-separated format ("old string", "new string").',
)
args = ap.parse_args(argv[1:])
repo_path = Path(args.repo_dir).expanduser().resolve()
repo_name = args.repo_name
if repo_name is None:
# Default to repo_dir name with a .fossil suffix.
repo_name = f"{repo_path.stem}.fossil"
fossil_exe = args.fossil_exe
if fossil_exe is None:
# Default to assuming the 'fossil' command is available in the PATH.
fossil_exe = "fossil"
opts = AppOptions(
args.input_csv,
str(repo_path),
repo_name,
args.init_date,
args.log_dir,
args.fossil_exe,
args.filter_file,
)
p = Path(opts.input_csv)
if not (p.exists() and p.is_file()):
sys.stderr.write(f"ERROR: File not found: '{p}'")
sys.exit(1)
if opts.log_dir is not None:
if not Path(opts.log_dir).exists():
sys.stderr.write(
f"ERROR: Log directory not found '{opts.log_dir}'"
)
sys.exit(1)
if opts.fossil_exe is not None:
if not Path(opts.fossil_exe).exists():
sys.stderr.write(f"ERROR: File not found '{opts.fossil_exe}'")
sys.exit(1)
if opts.filter_file is not None:
if not Path(opts.filter_file).exists():
sys.stderr.write(f"ERROR: File not found '{opts.filter_file}'")
sys.exit(1)
return opts
def fossil_mv_cmd(add_cmd, base_name):
old_name = add_cmd.split(":")[1].strip().strip('"').strip("'")
assert 0 < len(old_name)
s = f'mv "{old_name}" "{base_name}"'
return s
def main(argv):
opts = get_opts(argv)
global log_path
if opts.log_dir is not None:
log_path = (
Path(opts.log_dir).expanduser().resolve().joinpath(log_path.name)
)
write_log(f"BEGIN at {run_dt:%Y-%m-%d %H:%M:%S}")
if ask_to_continue(
"Commit to repository (otherwise run in 'what-if' mode) [N,y]? ",
["n", "y", ""]
) == "y":
do_commit = True
write_log("MODE: COMMIT")
else:
do_commit = False
write_log("MODE: What-if (actions logged, repository not affected)")
fossil_create_repo(opts, do_commit)
fossil_open_repo(opts, do_commit)
target_path = Path(opts.repo_dir)
load_filter_list(opts.filter_file)
commit_list: List[CommitProps] = []
write_log(f"Read {opts.input_csv}")
with open(opts.input_csv) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if len(row["full_name"]) > 0:
do_skip = str(row["SKIP_Y"]).upper() == "Y"
if not do_skip:
commit_list.append(
CommitProps(
row["sort_key"],
row["full_name"],
row["datetime_tag"],
row["base_name"],
row["COMMIT_MESSAGE"],
row["ADD_COMMAND"],
)
)
commit_list.sort()
datetime_tags = []
for item in commit_list:
if item.datetime_tag not in datetime_tags:
datetime_tags.append(item.datetime_tag)
datetime_tags.sort()
for dt_tag in datetime_tags:
print(dt_tag)
commit_dt = get_date_string(dt_tag)
commit_msg = ""
pre_commit = []
# post_commit = []
commit_this: List[CommitProps] = []
for item in commit_list:
if item.datetime_tag == dt_tag:
com_msg = plain_quotes(item.commit_message.strip())
# Stop on non-ascii characters in the commit message.
# TODO: This check should be temporary, just to see what
# chars, besides left and right quotes, are showing up.
as_ascii = ascii(com_msg)
if "\\u" in as_ascii:
print(com_msg)
print(as_ascii)
assert 0
# If the commit_message has only a single charcter,
# treat it as ditto (no matter what character) indicating
# the message is attached to another file in the same
# commit, and the current file was reviewed in Step 2 of
# the overall process.
if len(com_msg) == 1:
com_msg = ""
if 0 < len(com_msg):
if com_msg.endswith("."):
com_msg += " "
else:
com_msg += ". "
commit_msg += com_msg
add_cmd = item.add_command.strip()
if 0 < len(add_cmd):
if add_cmd.lower().startswith("rename:"):
pre_commit.append(
fossil_mv_cmd(add_cmd, item.base_name)
)
commit_this.append(item)
# Run any pre-commit fossil commands (such as 'mv').
if 0 < len(pre_commit):
for cmd_args in pre_commit:
cmds = [opts.fossil_exe] + split_quoted(cmd_args)
write_log("({0}) RUN (PRE): {1}".format(dt_tag, log_fmt(cmds)))
if do_commit:
run_fossil(cmds, target_path)
# Copy files to commit for current date_time tag.
for props in commit_this:
target_name = target_path / Path(props.base_name).name
existing_file = Path(target_name).exists()
write_log(f"COPY {props.full_name}")
write_log(f" TO {target_name}")
if do_commit:
# Copy file to target repo location.
copy_filtered_content(props.full_name, target_name)
ts = datetime_fromisoformat(commit_dt).timestamp()
os.utime(target_name, (ts, ts))
if not existing_file:
cmds = [opts.fossil_exe, "add", props.base_name]
write_log("({0}) RUN: {1}".format(props.datetime_tag, cmds))
if do_commit:
run_fossil(cmds, target_path)
# Run 'fossil commit' for current date_time tag.
if len(commit_msg) == 0:
commit_msg = f"({dt_tag})"
else:
commit_msg = commit_msg.strip()
cmds = [
opts.fossil_exe,
"commit",
"-m",
commit_msg,
"--date-override",
commit_dt,
]
write_log("({0}) RUN: {1}".format(dt_tag, log_fmt(cmds)))
if do_commit:
run_fossil(cmds, target_path)
write_log(f"END at {datetime.now():%Y-%m-%d %H:%M:%S}")
if do_commit:
print(
dedent(
"""
WARNING: Log file may contain initial password for the Fossil
repository default admin-user. You should change the password,
especially if it will be exposed outside the local system.
You can also edit the log file to remove the password.
"""
)
)
print(f"Log file is '{log_path}'\n")
print("Done (bak_to_fossil_3.py).")
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 11,336 | 0 | 230 |
8812f60cfa1d071ac53c689d76eee185cf18bbbd | 8,319 | py | Python | opexebo/analysis/population_vector_correlation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 4 | 2019-06-12T07:50:42.000Z | 2021-11-19T12:55:47.000Z | opexebo/analysis/population_vector_correlation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 12 | 2019-06-12T07:26:40.000Z | 2021-08-11T15:10:47.000Z | opexebo/analysis/population_vector_correlation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 4 | 2019-11-21T10:44:37.000Z | 2022-01-07T14:21:07.000Z | """
Provide function for population vector correlation calculation
"""
import numpy as np
from .. import errors as err
def population_vector_correlation(stack_0, stack_1, **kwargs):
"""Calculates the bin-wise correlation between two stacks of rate maps
Each stack corresponds to a separate Task, or trial. Each layer is the
ratemap for a single cell from that Task. The same units should be given in
the same order in each stack.
Take a single column through the stack (i.e. 1 single bin/location in
arena, with a firing rate for each cell), from each stack
In the original MatLab implementation, three output modes were supported
* 1D: (`numYbins`) - iterate over `i`
1) Take a 2D slice from each stack - all cells at all `X` positions at a
single `Y` position `i`
2) Reshape from 2D to 1D
3) Calculate the Pearson correlation coefficient between the two 1D
arrays
4) The value of `pv_corr_1d[i]` is the Pearson correlation coefficient
arising from `Y` position `i`
* 2D (`numXbins` x `numYbins`) - iterate over `i`
1) Take a 2D slice from each stack - all cells at all `X` positions at a
single `Y` position `i`
2) Calculate the 2D array (`numXbins` x `numYbins`) where the `[j,k]`th
value is the Pearson correlation coefficient between all
observations at the `j`'th `X` location in `stack_left` and the `k`'th
location in `stack_right`
3) The `i`'th row of `pv_corr_2d` is the DIAGONAL of the correlation matrix
i.e. where `j==k` i.e. the correlation of the the SAME location in
each stack for all observations (`numCells`)
* 3D (`numXbins` x `numYbins` x iteration(=`numYbins`))
Same as 2D BUT take the whole correlation matrix, not the diagonal
i.e. the full [j,k] correlatio between all X locations
A note on correlation in Numpy vs Matlab
Matlab's `corr(a, b)` function returns the correlation of ab
Numpy's `corrcoef` function returns the normalised covariance matrix,
which is:
aa ab
ba aa
The normalised covariance matrix *should* be hermitian, but due to
floating point accuracy, this is not actually guaranteed
the MatLab function can be reproduced by taking either [0, 1] or [1,0]
of the normalised covariance matrix.
If `a`, `b` are 2D matricies, then they should have shape `(num_variables, num_observations)`
In the case of this function, where the iterator is over the `Y` values
of the rate map, that means: `(x_bins, num_cells)`
Parameters
----------
stack_0: 3D array -or- list of 2D arrays
stack_1: 3D array -or- list of 2D arrays
`stack_x[i]` should return the `i`'th ratemap. This corresponds to a
constructor like:
`np.zeros(num_layers, y_bins, x_bins)`
Alternatively, a list or tuple of 2D arrays may be supplied:
`stack_x` = (`ratemap_0`, `ratemap_1`, `ratemap_2`, ...)
row_major: bool
Direction of iteration. If `True`, then each row is iterated over in turn
and correlation is calculated per row.
If `False`, then each column is iterated over in turn, and correlation is
calculated per column.
Default True (same behavior as in BNT)
Returns
-------
(p1, p2, p3)
p1: np.ndarray (1D, iterator x 1)
Array of Pearson correlation coefficients. i'th value is given by the
correlation of the i'th flattened slice of stack_0 to the i'th
flattened slice of stack_1
p2: np.ndarray (2D, iterator x non-iterator)
i'th row is the diagonal of the correlation matrix, i.e. the correlation
of the same location (location i) in each stack, i.e. where j==k
p3: np.ndarray(3D, iterator x non-iterator x non-iterator)
i'th array is the entire correlation matrix, rather than just the diagonal
Notes
--------
BNT.+analyses.populationVectorCorrelation
Copyright (C) 2019 by Simon Ball
"""
debug = kwargs.get("debug", False)
row_major = kwargs.get("row_major", True)
# Perform input validation and ensure we have a pair of 3D arrays
stack_0, stack_1 = _handle_both_inputs(stack_0, stack_1)
# _handle_ has ensured that both arrays meet the shape/type requirements
# Hardcode iterating over Y for now.
num_cells, y_bins, x_bins = stack_0.shape
if row_major:
iterator = y_bins
non_iterator = x_bins
else:
iterator = x_bins
non_iterator = y_bins
if debug:
print(f"Number of ratemaps: {num_cells}")
print(f"Ratemap dimensions: {y_bins} x {x_bins}")
print(f"Iterating over axis length {iterator} (row_major is {row_major})")
p1 = np.zeros(iterator)
p2 = np.zeros((iterator, non_iterator))
p3 = np.zeros((iterator, non_iterator, non_iterator))
for i in range(iterator):
if row_major:
left = stack_0[:, i, :].transpose()
right = stack_1[:, i, :].transpose()
else:
left = stack_0[:, :, i].transpose()
right = stack_1[:, :, i].transpose()
# 1D
# Reshape 2D array to a 1D array
correlation_value = np.corrcoef(left.flatten(), right.flatten())[0,1]
p1[i] = correlation_value
# 2D, 3D
correlation_matrix = np.corrcoef(left, right)[0:non_iterator, non_iterator:]
p2[i, :] = np.diagonal(correlation_matrix)
p3[i, :, :] = correlation_matrix
return (p1, p2, p3)
###############################################################################
#############
############# Error checking
#############
def _handle_both_inputs(stack_0, stack_1):
'''Handle error checking across both main inputs'''
stack_0 = _handle_single_input(stack_0, 0)
stack_1 = _handle_single_input(stack_1, 1)
if stack_0.shape[0] != stack_1.shape[0]:
raise err.ArgumentError("You have a different number of rate maps in each stack.")
if stack_0.shape[1:] != stack_1.shape[1:]:
raise err.ArgumentError("Your rate maps do not have matching dimensions")
return stack_0, stack_1
def _handle_single_input(stack, i):
'''Handle the input stack(s) and provide a correctly formatted 3D array
Handle error checking for a variety of conditions for a single stack
If not already a MaskedArray, then convert to that
Parameters
----------
stack : array-like
One of main inputs to population_vector_correlation.
Should be either a 3D array, where each layer (stack[j]) is a RateMap,
OR a list of 2D arrays, where each array is a 2D RateMap.
If a list of arrays, all arrays must be the same dimension
i : int
Index of stack input, solely used for providing more meaningful error
message
Returns
-------
stack : np.ma.MaskedArray
3D array of RateMaps, masked at invalid values
'''
dims = None
t = type(stack)
if t not in (list, tuple, np.ndarray, np.ma.MaskedArray):
raise ValueError(f"Stack_{i} must be array-like. You provided {t}")
elif t in (tuple, list):
for element in stack:
e = type(element)
if e not in (np.ndarray, np.ma.MaskedArray):
raise err.ArgumentError(f"The elements of the list stack_{i} must be"\
f" NumPy arrays. You provided {e}")
if dims is None:
dims = element.shape
else:
if element.shape != dims:
raise err.ArgumentError(f"Your ratemaps are not a consistent"\
f" shape in stack_{i}")
# Passes error handling, now convert from list to masked array
stack = np.ma.masked_invalid(stack)
elif isinstance(stack, np.ndarray):
# Ok, but convert to masked array
stack = np.ma.masked_invalid(stack)
dims = stack.shape[1:]
else:
# Instance is already a Masked Array
dims = stack.shape[1:]
return stack
| 38.513889 | 97 | 0.617502 | """
Provide function for population vector correlation calculation
"""
import numpy as np
from .. import errors as err
def population_vector_correlation(stack_0, stack_1, **kwargs):
"""Calculates the bin-wise correlation between two stacks of rate maps
Each stack corresponds to a separate Task, or trial. Each layer is the
ratemap for a single cell from that Task. The same units should be given in
the same order in each stack.
Take a single column through the stack (i.e. 1 single bin/location in
arena, with a firing rate for each cell), from each stack
In the original MatLab implementation, three output modes were supported
* 1D: (`numYbins`) - iterate over `i`
1) Take a 2D slice from each stack - all cells at all `X` positions at a
single `Y` position `i`
2) Reshape from 2D to 1D
3) Calculate the Pearson correlation coefficient between the two 1D
arrays
4) The value of `pv_corr_1d[i]` is the Pearson correlation coefficient
arising from `Y` position `i`
* 2D (`numXbins` x `numYbins`) - iterate over `i`
1) Take a 2D slice from each stack - all cells at all `X` positions at a
single `Y` position `i`
2) Calculate the 2D array (`numXbins` x `numYbins`) where the `[j,k]`th
value is the Pearson correlation coefficient between all
observations at the `j`'th `X` location in `stack_left` and the `k`'th
location in `stack_right`
3) The `i`'th row of `pv_corr_2d` is the DIAGONAL of the correlation matrix
i.e. where `j==k` i.e. the correlation of the the SAME location in
each stack for all observations (`numCells`)
* 3D (`numXbins` x `numYbins` x iteration(=`numYbins`))
Same as 2D BUT take the whole correlation matrix, not the diagonal
i.e. the full [j,k] correlatio between all X locations
A note on correlation in Numpy vs Matlab
Matlab's `corr(a, b)` function returns the correlation of ab
Numpy's `corrcoef` function returns the normalised covariance matrix,
which is:
aa ab
ba aa
The normalised covariance matrix *should* be hermitian, but due to
floating point accuracy, this is not actually guaranteed
the MatLab function can be reproduced by taking either [0, 1] or [1,0]
of the normalised covariance matrix.
If `a`, `b` are 2D matricies, then they should have shape `(num_variables, num_observations)`
In the case of this function, where the iterator is over the `Y` values
of the rate map, that means: `(x_bins, num_cells)`
Parameters
----------
stack_0: 3D array -or- list of 2D arrays
stack_1: 3D array -or- list of 2D arrays
`stack_x[i]` should return the `i`'th ratemap. This corresponds to a
constructor like:
`np.zeros(num_layers, y_bins, x_bins)`
Alternatively, a list or tuple of 2D arrays may be supplied:
`stack_x` = (`ratemap_0`, `ratemap_1`, `ratemap_2`, ...)
row_major: bool
Direction of iteration. If `True`, then each row is iterated over in turn
and correlation is calculated per row.
If `False`, then each column is iterated over in turn, and correlation is
calculated per column.
Default True (same behavior as in BNT)
Returns
-------
(p1, p2, p3)
p1: np.ndarray (1D, iterator x 1)
Array of Pearson correlation coefficients. i'th value is given by the
correlation of the i'th flattened slice of stack_0 to the i'th
flattened slice of stack_1
p2: np.ndarray (2D, iterator x non-iterator)
i'th row is the diagonal of the correlation matrix, i.e. the correlation
of the same location (location i) in each stack, i.e. where j==k
p3: np.ndarray(3D, iterator x non-iterator x non-iterator)
i'th array is the entire correlation matrix, rather than just the diagonal
Notes
--------
BNT.+analyses.populationVectorCorrelation
Copyright (C) 2019 by Simon Ball
"""
debug = kwargs.get("debug", False)
row_major = kwargs.get("row_major", True)
# Perform input validation and ensure we have a pair of 3D arrays
stack_0, stack_1 = _handle_both_inputs(stack_0, stack_1)
# _handle_ has ensured that both arrays meet the shape/type requirements
# Hardcode iterating over Y for now.
num_cells, y_bins, x_bins = stack_0.shape
if row_major:
iterator = y_bins
non_iterator = x_bins
else:
iterator = x_bins
non_iterator = y_bins
if debug:
print(f"Number of ratemaps: {num_cells}")
print(f"Ratemap dimensions: {y_bins} x {x_bins}")
print(f"Iterating over axis length {iterator} (row_major is {row_major})")
p1 = np.zeros(iterator)
p2 = np.zeros((iterator, non_iterator))
p3 = np.zeros((iterator, non_iterator, non_iterator))
for i in range(iterator):
if row_major:
left = stack_0[:, i, :].transpose()
right = stack_1[:, i, :].transpose()
else:
left = stack_0[:, :, i].transpose()
right = stack_1[:, :, i].transpose()
# 1D
# Reshape 2D array to a 1D array
correlation_value = np.corrcoef(left.flatten(), right.flatten())[0,1]
p1[i] = correlation_value
# 2D, 3D
correlation_matrix = np.corrcoef(left, right)[0:non_iterator, non_iterator:]
p2[i, :] = np.diagonal(correlation_matrix)
p3[i, :, :] = correlation_matrix
return (p1, p2, p3)
###############################################################################
#############
############# Error checking
#############
def _handle_both_inputs(stack_0, stack_1):
'''Handle error checking across both main inputs'''
stack_0 = _handle_single_input(stack_0, 0)
stack_1 = _handle_single_input(stack_1, 1)
if stack_0.shape[0] != stack_1.shape[0]:
raise err.ArgumentError("You have a different number of rate maps in each stack.")
if stack_0.shape[1:] != stack_1.shape[1:]:
raise err.ArgumentError("Your rate maps do not have matching dimensions")
return stack_0, stack_1
def _handle_single_input(stack, i):
'''Handle the input stack(s) and provide a correctly formatted 3D array
Handle error checking for a variety of conditions for a single stack
If not already a MaskedArray, then convert to that
Parameters
----------
stack : array-like
One of main inputs to population_vector_correlation.
Should be either a 3D array, where each layer (stack[j]) is a RateMap,
OR a list of 2D arrays, where each array is a 2D RateMap.
If a list of arrays, all arrays must be the same dimension
i : int
Index of stack input, solely used for providing more meaningful error
message
Returns
-------
stack : np.ma.MaskedArray
3D array of RateMaps, masked at invalid values
'''
dims = None
t = type(stack)
if t not in (list, tuple, np.ndarray, np.ma.MaskedArray):
raise ValueError(f"Stack_{i} must be array-like. You provided {t}")
elif t in (tuple, list):
for element in stack:
e = type(element)
if e not in (np.ndarray, np.ma.MaskedArray):
raise err.ArgumentError(f"The elements of the list stack_{i} must be"\
f" NumPy arrays. You provided {e}")
if dims is None:
dims = element.shape
else:
if element.shape != dims:
raise err.ArgumentError(f"Your ratemaps are not a consistent"\
f" shape in stack_{i}")
# Passes error handling, now convert from list to masked array
stack = np.ma.masked_invalid(stack)
elif isinstance(stack, np.ndarray):
# Ok, but convert to masked array
stack = np.ma.masked_invalid(stack)
dims = stack.shape[1:]
else:
# Instance is already a Masked Array
dims = stack.shape[1:]
return stack
| 0 | 0 | 0 |
43dfff0b11f9abecb2f386dab390d2464fef68ca | 4,700 | py | Python | openslides_backend/http/application.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | 5 | 2020-01-20T13:57:15.000Z | 2021-03-27T14:14:44.000Z | openslides_backend/http/application.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | 859 | 2020-01-11T22:58:37.000Z | 2022-03-30T14:54:06.000Z | openslides_backend/http/application.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | 16 | 2020-01-04T20:28:57.000Z | 2022-02-10T12:06:54.000Z | import os
import re
from typing import Any, Iterable, Union
import simplejson as json
from werkzeug.exceptions import BadRequest as WerkzeugBadRequest
from werkzeug.wrappers import Response
from ..services.auth.adapter import HEADER_NAME
from ..shared.env import is_truthy
from ..shared.exceptions import ViewException
from ..shared.interfaces.wsgi import StartResponse, WSGIEnvironment
from .http_exceptions import BadRequest, Forbidden, HTTPException, MethodNotAllowed
from .request import Request
health_route = re.compile("^/health$")
class OpenSlidesBackendWSGIApplication:
"""
Central application class for this service.
During initialization we bind injected dependencies to the instance.
"""
def dispatch_request(self, request: Request) -> Union[Response, HTTPException]:
"""
Dispatches request to route according to URL rules. Returns a Response
object or a HTTPException (or a subclass of it). Both are WSGI
applications themselves.
"""
if health_route.match(request.environ["RAW_URI"]):
return self.health_info(request)
return self.default_route(request)
def default_route(self, request: Request) -> Union[Response, HTTPException]:
"""
Default route that calls the injected view.
"""
# Check request method
if request.method != self.view.method:
return MethodNotAllowed(valid_methods=[self.view.method])
self.logger.debug(f"Request method is {request.method}.")
# Check mimetype and parse JSON body. The result is cached in request.json.
if not request.is_json:
return BadRequest(
ViewException(
"Wrong media type. Use 'Content-Type: application/json' instead."
)
)
try:
request_body = request.get_json()
except WerkzeugBadRequest as exception:
return BadRequest(ViewException(exception.description))
self.logger.debug(f"Request contains JSON: {request_body}.")
# Dispatch view and return response.
view_instance = self.view(self.logging, self.services)
try:
response_body, access_token = view_instance.dispatch(request)
except ViewException as exception:
env_var = os.environ.get("OPENSLIDES_BACKEND_RAISE_4XX", "off")
if is_truthy(env_var):
raise exception
if exception.status_code == 400:
return BadRequest(exception)
elif exception.status_code == 403:
return Forbidden(exception)
else:
text = (
f"Unknown ViewException with status_code {exception.status_code} "
f"raised: {exception.message}"
)
self.logger.error(text)
raise
self.logger.debug(
f"All done. Application sends HTTP 200 with body {response_body}."
)
response = Response(json.dumps(response_body), content_type="application/json")
if access_token is not None:
response.headers[HEADER_NAME] = access_token
return response
def health_info(self, request: Request) -> Union[Response, HTTPException]:
"""
Route to provide health data of this service. Retrieves status information
from respective view.
"""
health_info = self.view(self.logging, self.services).get_health_info()
return Response(
json.dumps({"healthinfo": health_info}),
content_type="application/json",
)
def wsgi_application(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Creates Werkzeug's Request object, calls the dispatch_request method and
evaluates Response object (or HTTPException) as WSGI application.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Dispatches request to `wsgi_application` method so that one may apply
custom middlewares to the application.
"""
return self.wsgi_application(environ, start_response)
| 38.52459 | 87 | 0.649149 | import os
import re
from typing import Any, Iterable, Union
import simplejson as json
from werkzeug.exceptions import BadRequest as WerkzeugBadRequest
from werkzeug.wrappers import Response
from ..services.auth.adapter import HEADER_NAME
from ..shared.env import is_truthy
from ..shared.exceptions import ViewException
from ..shared.interfaces.wsgi import StartResponse, WSGIEnvironment
from .http_exceptions import BadRequest, Forbidden, HTTPException, MethodNotAllowed
from .request import Request
health_route = re.compile("^/health$")
class OpenSlidesBackendWSGIApplication:
"""
Central application class for this service.
During initialization we bind injected dependencies to the instance.
"""
def __init__(self, logging: Any, view: Any, services: Any) -> None:
self.logging = logging
self.logger = logging.getLogger(__name__)
self.logger.debug("Initialize OpenSlides Backend WSGI application.")
self.view = view
self.services = services
def dispatch_request(self, request: Request) -> Union[Response, HTTPException]:
"""
Dispatches request to route according to URL rules. Returns a Response
object or a HTTPException (or a subclass of it). Both are WSGI
applications themselves.
"""
if health_route.match(request.environ["RAW_URI"]):
return self.health_info(request)
return self.default_route(request)
def default_route(self, request: Request) -> Union[Response, HTTPException]:
"""
Default route that calls the injected view.
"""
# Check request method
if request.method != self.view.method:
return MethodNotAllowed(valid_methods=[self.view.method])
self.logger.debug(f"Request method is {request.method}.")
# Check mimetype and parse JSON body. The result is cached in request.json.
if not request.is_json:
return BadRequest(
ViewException(
"Wrong media type. Use 'Content-Type: application/json' instead."
)
)
try:
request_body = request.get_json()
except WerkzeugBadRequest as exception:
return BadRequest(ViewException(exception.description))
self.logger.debug(f"Request contains JSON: {request_body}.")
# Dispatch view and return response.
view_instance = self.view(self.logging, self.services)
try:
response_body, access_token = view_instance.dispatch(request)
except ViewException as exception:
env_var = os.environ.get("OPENSLIDES_BACKEND_RAISE_4XX", "off")
if is_truthy(env_var):
raise exception
if exception.status_code == 400:
return BadRequest(exception)
elif exception.status_code == 403:
return Forbidden(exception)
else:
text = (
f"Unknown ViewException with status_code {exception.status_code} "
f"raised: {exception.message}"
)
self.logger.error(text)
raise
self.logger.debug(
f"All done. Application sends HTTP 200 with body {response_body}."
)
response = Response(json.dumps(response_body), content_type="application/json")
if access_token is not None:
response.headers[HEADER_NAME] = access_token
return response
def health_info(self, request: Request) -> Union[Response, HTTPException]:
"""
Route to provide health data of this service. Retrieves status information
from respective view.
"""
health_info = self.view(self.logging, self.services).get_health_info()
return Response(
json.dumps({"healthinfo": health_info}),
content_type="application/json",
)
def wsgi_application(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Creates Werkzeug's Request object, calls the dispatch_request method and
evaluates Response object (or HTTPException) as WSGI application.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Dispatches request to `wsgi_application` method so that one may apply
custom middlewares to the application.
"""
return self.wsgi_application(environ, start_response)
| 262 | 0 | 27 |
78ed8ee145761eaf3c9f43648f0b8fc8e9567525 | 7,899 | py | Python | deep_qa-master/deep_qa/models/sentence_selection/siamese_sentence_selector.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 1 | 2017-04-11T13:03:55.000Z | 2017-04-11T13:03:55.000Z | deep_qa-master/deep_qa/models/sentence_selection/siamese_sentence_selector.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | deep_qa-master/deep_qa/models/sentence_selection/siamese_sentence_selector.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict
from overrides import overrides
from keras.layers import Input
from keras.layers.wrappers import TimeDistributed
from ...data.instances.sentence_selection_instance import SentenceSelectionInstance
from ...layers.attention.attention import Attention
from ...layers.wrappers.encoder_wrapper import EncoderWrapper
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class SiameseSentenceSelector(TextTrainer):
"""
This class implements a (generally) Siamese network for the answer
sentence selectiont ask. Given a question and a collection of sentences,
we aim to identify which sentence has the answer to the question. This
model encodes the question and each sentence with (possibly different)
encoders, and then does a cosine similarity and normalizes to get a
distribution over the set of sentences.
Note that in some cases, this may not be exactly "Siamese" because the
question and sentences encoders can differ.
Parameters
----------
num_hidden_seq2seq_layers : int, optional (default: ``2``)
We use a few stacked biLSTMs (or similar), to give the model some
depth. This parameter controls how many deep layers we should use.
share_hidden_seq2seq_layers : bool, optional (default: ``False``)
Whether or not to encode the sentences and the question with the same
hidden seq2seq layers, or have different ones for each.
"""
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass the questions and each
sentence in the passage through some sort of encoder (e.g. BOW, GRU,
or biGRU).
Then, we take the encoded representation of the question and calculate
a cosine similarity with the encoded representation of each sentence in
the passage, to get a tensor of cosine similarities with shape
(batch_size, num_sentences_per_passage). We then normalize for each
batch to get a probability distribution over sentences in the passage.
"""
# First we create input layers and pass the inputs through embedding layers.
# shape: (batch size, num_question_words)
question_input = Input(shape=self._get_sentence_shape(self.num_question_words),
dtype='int32', name="question_input")
# shape: (batch size, num_sentences, num_sentence_words)
sentences_input_shape = ((self.num_sentences,) +
self._get_sentence_shape())
sentences_input = Input(shape=sentences_input_shape,
dtype='int32', name="sentences_input")
# shape: (batch size, num_question_words, embedding size)
question_embedding = self._embed_input(question_input)
# shape: (batch size, num_sentences, num_sentence_words, embedding size)
sentences_embedding = self._embed_input(sentences_input)
# We encode the question embedding with some more seq2seq layers
modeled_question = question_embedding
for i in range(self.num_hidden_seq2seq_layers):
if self.share_hidden_seq2seq_layers:
seq2seq_encoder_name = "seq2seq_{}".format(i)
else:
seq2seq_encoder_name = "question_seq2seq_{}".format(i)
hidden_layer = self._get_seq2seq_encoder(name=seq2seq_encoder_name,
fallback_behavior="use default params")
# shape: (batch_size, num_question_words, seq2seq output dimension)
modeled_question = hidden_layer(modeled_question)
# We encode the sentence embedding with some more seq2seq layers
modeled_sentence = sentences_embedding
for i in range(self.num_hidden_seq2seq_layers):
if self.share_hidden_seq2seq_layers:
seq2seq_encoder_name = "seq2seq_{}".format(i)
else:
seq2seq_encoder_name = "sentence_seq2seq_{}".format(i)
hidden_layer = TimeDistributed(
self._get_seq2seq_encoder(name=seq2seq_encoder_name,
fallback_behavior="use default params"),
name="TimeDistributed_seq2seq_sentences_encoder_{}".format(i))
# shape: (batch_size, num_question_words, seq2seq output dimension)
modeled_sentence = hidden_layer(modeled_sentence)
# We encode the modeled question with some encoder.
question_encoder = self._get_encoder(name="question_encoder",
fallback_behavior="use default encoder")
# shape: (batch size, encoder_output_dimension)
encoded_question = question_encoder(modeled_question)
# We encode the modeled document with some encoder.
sentences_encoder = EncoderWrapper(self._get_encoder(name="sentence_encoder",
fallback_behavior="use default encoder"),
name="TimeDistributed_sentences_encoder")
# shape: (batch size, num_sentences, encoder_output_dimension)
encoded_sentences = sentences_encoder(modeled_sentence)
# Here we use the Attention layer with the cosine similarity function
# to get the cosine similarities of each sesntence with the question.
# shape: (batch size, num_sentences)
attention_name = 'question_sentences_similarity'
similarity_params = {"type": "cosine_similarity"}
sentence_probabilities = Attention(name=attention_name,
similarity_function=similarity_params)([encoded_question,
encoded_sentences])
return DeepQaModel(input=[question_input, sentences_input],
output=sentence_probabilities)
@overrides
def _instance_type(self):
"""
Return the instance type that the model trains on.
"""
return SentenceSelectionInstance
@overrides
def _get_max_lengths(self) -> Dict[str, int]:
"""
Return a dictionary with the appropriate padding lengths.
"""
max_lengths = super(SiameseSentenceSelector, self)._get_max_lengths()
max_lengths['num_question_words'] = self.num_question_words
max_lengths['num_sentences'] = self.num_sentences
return max_lengths
@overrides
def _set_max_lengths(self, max_lengths: Dict[str, int]):
"""
Set the padding lengths of the model.
"""
super(SiameseSentenceSelector, self)._set_max_lengths(max_lengths)
self.num_question_words = max_lengths['num_question_words']
self.num_sentences = max_lengths['num_sentences']
@overrides
@classmethod
| 48.759259 | 102 | 0.669072 | from typing import Any, Dict
from overrides import overrides
from keras.layers import Input
from keras.layers.wrappers import TimeDistributed
from ...data.instances.sentence_selection_instance import SentenceSelectionInstance
from ...layers.attention.attention import Attention
from ...layers.wrappers.encoder_wrapper import EncoderWrapper
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class SiameseSentenceSelector(TextTrainer):
"""
This class implements a (generally) Siamese network for the answer
sentence selectiont ask. Given a question and a collection of sentences,
we aim to identify which sentence has the answer to the question. This
model encodes the question and each sentence with (possibly different)
encoders, and then does a cosine similarity and normalizes to get a
distribution over the set of sentences.
Note that in some cases, this may not be exactly "Siamese" because the
question and sentences encoders can differ.
Parameters
----------
num_hidden_seq2seq_layers : int, optional (default: ``2``)
We use a few stacked biLSTMs (or similar), to give the model some
depth. This parameter controls how many deep layers we should use.
share_hidden_seq2seq_layers : bool, optional (default: ``False``)
Whether or not to encode the sentences and the question with the same
hidden seq2seq layers, or have different ones for each.
"""
def __init__(self, params: Dict[str, Any]):
self.num_hidden_seq2seq_layers = params.pop('num_hidden_seq2seq_layers', 2)
self.share_hidden_seq2seq_layers = params.pop('share_hidden_seq2seq_layers', False)
self.num_question_words = params.pop('num_question_words', None)
self.num_sentences = params.pop('num_sentences', None)
super(SiameseSentenceSelector, self).__init__(params)
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass the questions and each
sentence in the passage through some sort of encoder (e.g. BOW, GRU,
or biGRU).
Then, we take the encoded representation of the question and calculate
a cosine similarity with the encoded representation of each sentence in
the passage, to get a tensor of cosine similarities with shape
(batch_size, num_sentences_per_passage). We then normalize for each
batch to get a probability distribution over sentences in the passage.
"""
# First we create input layers and pass the inputs through embedding layers.
# shape: (batch size, num_question_words)
question_input = Input(shape=self._get_sentence_shape(self.num_question_words),
dtype='int32', name="question_input")
# shape: (batch size, num_sentences, num_sentence_words)
sentences_input_shape = ((self.num_sentences,) +
self._get_sentence_shape())
sentences_input = Input(shape=sentences_input_shape,
dtype='int32', name="sentences_input")
# shape: (batch size, num_question_words, embedding size)
question_embedding = self._embed_input(question_input)
# shape: (batch size, num_sentences, num_sentence_words, embedding size)
sentences_embedding = self._embed_input(sentences_input)
# We encode the question embedding with some more seq2seq layers
modeled_question = question_embedding
for i in range(self.num_hidden_seq2seq_layers):
if self.share_hidden_seq2seq_layers:
seq2seq_encoder_name = "seq2seq_{}".format(i)
else:
seq2seq_encoder_name = "question_seq2seq_{}".format(i)
hidden_layer = self._get_seq2seq_encoder(name=seq2seq_encoder_name,
fallback_behavior="use default params")
# shape: (batch_size, num_question_words, seq2seq output dimension)
modeled_question = hidden_layer(modeled_question)
# We encode the sentence embedding with some more seq2seq layers
modeled_sentence = sentences_embedding
for i in range(self.num_hidden_seq2seq_layers):
if self.share_hidden_seq2seq_layers:
seq2seq_encoder_name = "seq2seq_{}".format(i)
else:
seq2seq_encoder_name = "sentence_seq2seq_{}".format(i)
hidden_layer = TimeDistributed(
self._get_seq2seq_encoder(name=seq2seq_encoder_name,
fallback_behavior="use default params"),
name="TimeDistributed_seq2seq_sentences_encoder_{}".format(i))
# shape: (batch_size, num_question_words, seq2seq output dimension)
modeled_sentence = hidden_layer(modeled_sentence)
# We encode the modeled question with some encoder.
question_encoder = self._get_encoder(name="question_encoder",
fallback_behavior="use default encoder")
# shape: (batch size, encoder_output_dimension)
encoded_question = question_encoder(modeled_question)
# We encode the modeled document with some encoder.
sentences_encoder = EncoderWrapper(self._get_encoder(name="sentence_encoder",
fallback_behavior="use default encoder"),
name="TimeDistributed_sentences_encoder")
# shape: (batch size, num_sentences, encoder_output_dimension)
encoded_sentences = sentences_encoder(modeled_sentence)
# Here we use the Attention layer with the cosine similarity function
# to get the cosine similarities of each sesntence with the question.
# shape: (batch size, num_sentences)
attention_name = 'question_sentences_similarity'
similarity_params = {"type": "cosine_similarity"}
sentence_probabilities = Attention(name=attention_name,
similarity_function=similarity_params)([encoded_question,
encoded_sentences])
return DeepQaModel(input=[question_input, sentences_input],
output=sentence_probabilities)
@overrides
def _instance_type(self):
"""
Return the instance type that the model trains on.
"""
return SentenceSelectionInstance
@overrides
def _get_max_lengths(self) -> Dict[str, int]:
"""
Return a dictionary with the appropriate padding lengths.
"""
max_lengths = super(SiameseSentenceSelector, self)._get_max_lengths()
max_lengths['num_question_words'] = self.num_question_words
max_lengths['num_sentences'] = self.num_sentences
return max_lengths
@overrides
def _set_max_lengths(self, max_lengths: Dict[str, int]):
"""
Set the padding lengths of the model.
"""
super(SiameseSentenceSelector, self)._set_max_lengths(max_lengths)
self.num_question_words = max_lengths['num_question_words']
self.num_sentences = max_lengths['num_sentences']
@overrides
def _set_max_lengths_from_model(self):
self.set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1][2:])
self.num_question_words = self.model.get_input_shape_at(0)[0][1]
self.num_sentences = self.model.get_input_shape_at(0)[1][1]
@classmethod
def _get_custom_objects(cls):
custom_objects = super(SiameseSentenceSelector, cls)._get_custom_objects()
custom_objects["Attention"] = Attention
custom_objects["EncoderWrapper"] = EncoderWrapper
return custom_objects
| 869 | 0 | 78 |
a8867f5403bb99f5bcbae491f8268e1b65d87f58 | 1,572 | py | Python | aulaspythonintermediario/aula08/aula08.py | lel352/Curso-Python | d65484c807db52d57042eee20ccbd3131825fa98 | [
"MIT"
] | 1 | 2021-09-04T14:34:34.000Z | 2021-09-04T14:34:34.000Z | aulaspythonintermediario/aula08/aula08.py | lel352/Curso-Python | d65484c807db52d57042eee20ccbd3131825fa98 | [
"MIT"
] | null | null | null | aulaspythonintermediario/aula08/aula08.py | lel352/Curso-Python | d65484c807db52d57042eee20ccbd3131825fa98 | [
"MIT"
] | null | null | null | perguntas = {
'Pergunta 1': {
'pergunta': 'Quanto é 2+2?',
'respostas': {
'a': '1',
'b': '4',
'c': '8'
},
'resposta_certa': 'b',
},
'Pergunta 2': {
'pergunta': 'Quanto é 3*2?',
'respostas': {
'a': '4',
'b': '10',
'c': '6'
},
'resposta_certa': 'c',
},
'Pergunta 3': {
'pergunta': 'Quanto é 1+2?',
'respostas': {
'a': '3',
'b': '10',
'c': '6'
},
'resposta_certa': 'a',
},
'Pergunta 4': {
'pergunta': 'Quanto é 1-1?',
'respostas': {
'a': '2',
'b': '1',
'c': '0'
},
'resposta_certa': 'c',
},
'Pergunta 5': {
'pergunta': 'Quanto é 8/4?',
'respostas': {
'a': '0',
'b': '4',
'c': '2'
},
'resposta_certa': 'c',
},
}
resposta_certa = 0
for pk, pv in perguntas.items():
print(f'{pk}:{pv["pergunta"]}')
print('Respostas: ')
for rk, rv in pv['respostas'].items():
print(f'[{rk}]: {rv}')
resposta = input('Sua resposta: ')
if resposta == pv['resposta_certa']:
print('Você Acertou !!!')
resposta_certa += 1
else:
print('Você Errou !!!')
print()
qtd_perguntas = len(perguntas)
por_acerto = resposta_certa / qtd_perguntas * 100
print(f'Você acertou {resposta_certa} pergunta(s). ')
print(f'Sua porcetagem de acerto foi de {por_acerto:.2f}%.') | 22.782609 | 60 | 0.41285 | perguntas = {
'Pergunta 1': {
'pergunta': 'Quanto é 2+2?',
'respostas': {
'a': '1',
'b': '4',
'c': '8'
},
'resposta_certa': 'b',
},
'Pergunta 2': {
'pergunta': 'Quanto é 3*2?',
'respostas': {
'a': '4',
'b': '10',
'c': '6'
},
'resposta_certa': 'c',
},
'Pergunta 3': {
'pergunta': 'Quanto é 1+2?',
'respostas': {
'a': '3',
'b': '10',
'c': '6'
},
'resposta_certa': 'a',
},
'Pergunta 4': {
'pergunta': 'Quanto é 1-1?',
'respostas': {
'a': '2',
'b': '1',
'c': '0'
},
'resposta_certa': 'c',
},
'Pergunta 5': {
'pergunta': 'Quanto é 8/4?',
'respostas': {
'a': '0',
'b': '4',
'c': '2'
},
'resposta_certa': 'c',
},
}
resposta_certa = 0
for pk, pv in perguntas.items():
print(f'{pk}:{pv["pergunta"]}')
print('Respostas: ')
for rk, rv in pv['respostas'].items():
print(f'[{rk}]: {rv}')
resposta = input('Sua resposta: ')
if resposta == pv['resposta_certa']:
print('Você Acertou !!!')
resposta_certa += 1
else:
print('Você Errou !!!')
print()
qtd_perguntas = len(perguntas)
por_acerto = resposta_certa / qtd_perguntas * 100
print(f'Você acertou {resposta_certa} pergunta(s). ')
print(f'Sua porcetagem de acerto foi de {por_acerto:.2f}%.') | 0 | 0 | 0 |
3d0e051a57e016d558884a4d3a81a6b8c4bc541a | 2,521 | py | Python | api.py | dwyer/kvstore | d28e5d30e87663fc659dabd1186f65f7bcc72e7a | [
"BSD-2-Clause"
] | null | null | null | api.py | dwyer/kvstore | d28e5d30e87663fc659dabd1186f65f7bcc72e7a | [
"BSD-2-Clause"
] | null | null | null | api.py | dwyer/kvstore | d28e5d30e87663fc659dabd1186f65f7bcc72e7a | [
"BSD-2-Clause"
] | null | null | null | # -*-coding:utf-8-*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import json
import urllib
import urllib2
| 29.313953 | 74 | 0.571599 | # -*-coding:utf-8-*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import json
import urllib
import urllib2
class NotAuthorized(Exception):
pass
class NotFound(Exception):
pass
class ApiClient(object):
BASE_URL = 'http://localhost:8000/api'
def __init__(self, store=None, token=None):
self.store = store
self.token = token
def __repr__(self):
return '<%s store=%r token=%r>' % (self.__class__.__name__,
self.store, self.token)
def new_store(self):
return self._fetch('/stores', data={})
def get_store(self):
return self._fetch('/stores/%s' % self.store)
def delete_store(self):
return self._fetch('/stores/%s' % self.store, method='DELETE')
def new_token(self):
return self._fetch('/stores/%s/tokens' % (self.store), {})
def get_tokens(self):
return self._fetch('/stores/%s/tokens' % (self.store)).split()
def delete_token(self, token):
return self._fetch('/stores/%s/tokens/%s' % (self.store, token),
method='DELETE')
def get(self, key):
return self._fetch('/stores/%s/values/%s' % (self.store, key))
def set(self, key, value):
return self._fetch('/stores/%s/values/%s' % (self.store, key),
data=value)
def delete(self, key):
return self._fetch('/stores/%s/values/%s' % (self.store, key),
method='DELETE')
def _fetch(self, path, data=None, method=None):
url = self.BASE_URL + path
headers = {}
if self.token is not None:
headers['X-Token'] = self.token
if isinstance(data, dict):
data = urllib.urlencode(data)
request = urllib2.Request(url, data=data, headers=headers)
if method is not None:
request.get_method = lambda: method
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
if response.headers['Content-Type'] == 'application/json':
return json.load(response)
else:
return response.read()
except urllib2.HTTPError as e:
if e.code == 403:
raise NotAuthorized()
elif e.code == 404:
raise NotFound()
else:
raise
| 1,815 | 404 | 69 |
3630eaadfd408312b2a07a52e5f3950986ace3b4 | 9,328 | py | Python | appyter/ext/urllib.py | MaayanLab/jupyter-template | dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5 | [
"Apache-2.0"
] | null | null | null | appyter/ext/urllib.py | MaayanLab/jupyter-template | dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5 | [
"Apache-2.0"
] | 24 | 2020-04-07T17:04:47.000Z | 2020-05-27T00:51:25.000Z | appyter/ext/urllib.py | MaayanLab/jupyter-template | dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5 | [
"Apache-2.0"
] | null | null | null | import re
import itertools
import dataclasses
import typing as t
import urllib.parse
from appyter.ext.pathlib.chroot import ChrootPurePosixPath
from appyter.ext.dict import dict_merge, expand_dotmap
url_expr = re.compile(
r'^((?P<scheme>.+?)://(?P<authority>((?P<username>[^/:@\?#]+?)(:(?P<password>[^/@\?#]+?))?@)?(?P<netloc>(?P<hostname>[^:/\?#]+)(:(?P<port>\d+))?))?)?(?P<path>.*?)(\?(?P<query_string>.*?))?(#(?P<fragment>.*?))?$'
)
fragment_expr = re.compile(
r'^(?P<path>.*?)(\?(?P<query_string>.*?))?$'
)
@dataclasses.dataclass(init=False, repr=False, frozen=True)
class URI:
''' Not unlike yarl's URL class but
- support for `::` notation as used in fsspec URIs
- posix_path path operation
- fragment parsing
- dotmap support (query_ex)
'''
scheme: t.Optional[str]
username: t.Optional[str]
password: t.Optional[str]
hostname: t.Optional[str]
port: t.Optional[int]
path: str
query_string: t.Optional[str]
fragment: t.Optional[str]
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
| 31.836177 | 213 | 0.690502 | import re
import itertools
import dataclasses
import typing as t
import urllib.parse
from appyter.ext.pathlib.chroot import ChrootPurePosixPath
from appyter.ext.dict import dict_merge, expand_dotmap
def parent_url(url):
parent, *filename = url.rsplit('/', maxsplit=1)
return parent if filename else ''
def url_filename(url):
parent, *filename = url.rsplit('/', maxsplit=1)
return filename[0] if filename else parent
def join_slash(*parts):
if not parts: return ''
part0, *parts = parts
return '/'.join(itertools.chain((part0.rstrip('/'),), (part.lstrip('/') for part in parts)))
def join_url(root, *parts):
return join_slash(root, str(ChrootPurePosixPath('/').join(*parts)))
url_expr = re.compile(
r'^((?P<scheme>.+?)://(?P<authority>((?P<username>[^/:@\?#]+?)(:(?P<password>[^/@\?#]+?))?@)?(?P<netloc>(?P<hostname>[^:/\?#]+)(:(?P<port>\d+))?))?)?(?P<path>.*?)(\?(?P<query_string>.*?))?(#(?P<fragment>.*?))?$'
)
fragment_expr = re.compile(
r'^(?P<path>.*?)(\?(?P<query_string>.*?))?$'
)
def parse_qs_values(query_map):
from appyter.ext.itertools import collapse
from appyter.ext.json import try_json_loads
return {
k: collapse([try_json_loads(v) if v != '' else True for v in V])
for k, V in query_map.items()
}
@dataclasses.dataclass(init=False, repr=False, frozen=True)
class URI:
''' Not unlike yarl's URL class but
- support for `::` notation as used in fsspec URIs
- posix_path path operation
- fragment parsing
- dotmap support (query_ex)
'''
scheme: t.Optional[str]
username: t.Optional[str]
password: t.Optional[str]
hostname: t.Optional[str]
port: t.Optional[int]
path: str
query_string: t.Optional[str]
fragment: t.Optional[str]
def __init__(self, url=None, scheme=None, username=None, password=None, hostname=None, port=None, path=None, query_string=None, fragment=None):
if url is not None:
m = url_expr.match(url)
object.__setattr__(self, 'scheme', m.group('scheme'))
object.__setattr__(self, 'username', m.group('username'))
object.__setattr__(self, 'password', m.group('password'))
object.__setattr__(self, 'hostname', m.group('hostname'))
object.__setattr__(self, 'port', int(m.group('port')) if m.group('port') is not None else None)
object.__setattr__(self, 'path', m.group('path'))
object.__setattr__(self, 'query_string', m.group('query_string'))
object.__setattr__(self, 'fragment', m.group('fragment'))
else:
object.__setattr__(self, 'scheme', scheme)
object.__setattr__(self, 'username', username)
object.__setattr__(self, 'password', password)
object.__setattr__(self, 'hostname', hostname)
object.__setattr__(self, 'port', int(port) if port is not None else None)
object.__setattr__(self, 'path', path or '')
object.__setattr__(self, 'query_string', query_string)
object.__setattr__(self, 'fragment', fragment)
@property
def netloc(self):
if self.hostname is not None and self.port is not None:
return f"{self.hostname}:{self.port}"
elif self.hostname is not None:
return self.hostname
else:
return None
@property
def auth(self):
if self.username is not None:
if self.password is not None:
return f"{self.username}:{self.password}"
else:
return self.username
return None
@property
def authority(self):
if self.netloc is not None:
if self.auth is not None:
return f"{self.auth}@{self.netloc}"
else:
return self.netloc
return None
@property
def posix_path(self):
return ChrootPurePosixPath(self.path)
@property
def name(self):
return self.posix_path.root.name
@property
def parent(self):
return self.with_path(str(self.posix_path.root.parent))
@property
def query(self):
return urllib.parse.parse_qs(self.query_string) if self.query_string is not None else None
@property
def query_ex(self):
return expand_dotmap(parse_qs_values(self.query)) if self.query is not None else {}
@property
def fragment_path(self):
return fragment_expr.match(self.fragment).group('path') if self.fragment is not None else None
@property
def fragment_posix_path(self):
return ChrootPurePosixPath(self.fragment_path) if self.fragment_path is not None else None
@property
def fragment_name(self):
return self.fragment_posix_path.root.name if self.fragment_posix_path is not None else None
@property
def fragment_parent(self):
return self.with_path(str(self.fragment_posix_path.root.parent)) if self.fragment_posix_path is not None else None
@property
def fragment_query_string(self):
return fragment_expr.match(self.fragment).group('query_string') if self.fragment is not None else None
@property
def fragment_query(self):
return urllib.parse.parse_qs(self.fragment_query_string) if self.fragment_query_string is not None else None
@property
def fragment_query_ex(self):
return expand_dotmap(parse_qs_values(self.fragment_query)) if self.fragment_query is not None else {}
def __str__(self):
return ''.join(filter(None, (
f"{self.scheme}://" if self.scheme is not None else None,
join_url(self.authority, self.path) if self.authority is not None else self.path,
f"?{self.query_string}" if self.query_string is not None else None,
f"#{self.fragment}" if self.fragment is not None else None,
)))
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, str(self))
def with_scheme(self, scheme):
return URI(
scheme=scheme,
username=self.username,
password=self.password,
hostname=self.hostname,
port=self.port,
path=self.path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_username(self, username):
return URI(
scheme=self.scheme,
username=username,
password=self.password,
hostname=self.hostname,
port=self.port,
path=self.path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_password(self, password):
return URI(
scheme=self.scheme,
username=self.username,
password=password,
hostname=self.hostname,
port=self.port,
path=self.path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_hostname(self, hostname):
return URI(
scheme=self.scheme,
username=self.username,
password=self.password,
hostname=hostname,
port=self.port,
path=self.path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_port(self, port):
return URI(
scheme=self.scheme,
username=self.username,
password=self.password,
hostname=self.hostname,
port=port,
path=self.path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_path(self, path):
return URI(
scheme=self.scheme,
username=self.username,
password=self.password,
hostname=self.hostname,
port=self.port,
path=path,
query_string=self.query_string,
fragment=self.fragment,
)
def with_query_string(self, query_string):
return URI(
scheme=self.scheme,
username=self.username,
password=self.password,
hostname=self.hostname,
port=self.port,
path=self.path,
query_string=query_string,
fragment=self.fragment,
)
def with_query(self, query):
return self.with_query_string(urllib.parse.urlencode(query, doseq=True) if query is not None else None)
def update_query(self, query):
return self.with_query(dict_merge(self.query or {}, **query))
def with_fragment(self, fragment):
return URI(
scheme=self.scheme,
username=self.username,
password=self.password,
hostname=self.hostname,
port=self.port,
path=self.path,
query_string=self.query_string,
fragment=fragment,
)
def with_fragment_path(self, fragment_path):
if fragment_path is None and self.fragment_query_string is None:
return self.with_fragment(None)
elif self.fragment_query_string is None:
return self.with_fragment(fragment_path)
else:
return self.with_fragment(f"{fragment_path or ''}?{self.fragment_query_string}")
def with_fragment_query_string(self, fragment_query_string):
if self.fragment_path is None and fragment_query_string is None:
return self.with_fragment(None)
elif fragment_query_string is None:
return self.with_fragment(self.fragment_path)
else:
return self.with_fragment(f"{self.fragment_path or ''}?{fragment_query_string}")
def with_fragment_query(self, fragment_query):
return self.with_fragment_query_string(urllib.parse.urlencode(fragment_query, doseq=True))
def update_fragment_query(self, fragment_query):
return self.with_fragment_query(dict_merge(self.fragment_query or {}, **fragment_query))
def join(self, *parts):
return self.with_path(str(self.posix_path.join(*parts).realpath())) if self.posix_path is not None else None
def fragment_join(self, *parts):
return self.with_fragment_path(str(self.fragment_posix_path.join(*parts).realpath())) if self.fragment_posix_path is not None else None
| 7,210 | 0 | 950 |
6aa46ba15af9c90cd4de963e915ab8711d8d1291 | 4,891 | py | Python | tensorflow_federated/python/research/optimization/stackoverflow_lr/run_federated.py | matech96/federated | b30a26d66162bd02a89a12f119e17925d161a26b | [
"Apache-2.0"
] | 1 | 2020-05-02T05:08:14.000Z | 2020-05-02T05:08:14.000Z | tensorflow_federated/python/research/optimization/stackoverflow_lr/run_federated.py | RITESG/STATIC | cfe9d3e35ba033b1c4e47d347427a83f682f41de | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/research/optimization/stackoverflow_lr/run_federated.py | RITESG/STATIC | cfe9d3e35ba033b1c4e47d347427a83f682f41de | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates Stackoverflow LR model using TFF."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow_federated.python.research.optimization.shared import fed_avg_schedule
from tensorflow_federated.python.research.optimization.shared import iterative_process_builder
from tensorflow_federated.python.research.optimization.stackoverflow_lr import dataset
from tensorflow_federated.python.research.optimization.stackoverflow_lr import models
from tensorflow_federated.python.research.utils import training_loop
from tensorflow_federated.python.research.utils import training_utils
from tensorflow_federated.python.research.utils import utils_impl
with utils_impl.record_hparam_flags():
# Experiment hyperparameters
flags.DEFINE_integer('vocab_tokens_size', 10000, 'Vocab tokens size used.')
flags.DEFINE_integer('vocab_tags_size', 500, 'Vocab tags size used.')
flags.DEFINE_integer('client_batch_size', 100,
'Batch size used on the client.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer(
'client_epochs_per_round', 1,
'Number of client (inner optimizer) epochs per federated round.')
flags.DEFINE_integer(
'num_validation_examples', 10000, 'Number of examples '
'to use from test set for per-round validation.')
flags.DEFINE_integer('max_elements_per_user', 1000, 'Max number of training '
'sentences to use per user.')
flags.DEFINE_integer(
'client_datasets_random_seed', 1, 'The random seed '
'governing the client dataset selection.')
FLAGS = flags.FLAGS
def metrics_builder():
"""Returns a `list` of `tf.keras.metric.Metric` objects."""
return [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(top_k=5, name='recall_at_5'),
]
if __name__ == '__main__':
app.run(main)
| 38.511811 | 105 | 0.771212 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates Stackoverflow LR model using TFF."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow_federated.python.research.optimization.shared import fed_avg_schedule
from tensorflow_federated.python.research.optimization.shared import iterative_process_builder
from tensorflow_federated.python.research.optimization.stackoverflow_lr import dataset
from tensorflow_federated.python.research.optimization.stackoverflow_lr import models
from tensorflow_federated.python.research.utils import training_loop
from tensorflow_federated.python.research.utils import training_utils
from tensorflow_federated.python.research.utils import utils_impl
with utils_impl.record_hparam_flags():
# Experiment hyperparameters
flags.DEFINE_integer('vocab_tokens_size', 10000, 'Vocab tokens size used.')
flags.DEFINE_integer('vocab_tags_size', 500, 'Vocab tags size used.')
flags.DEFINE_integer('client_batch_size', 100,
'Batch size used on the client.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer(
'client_epochs_per_round', 1,
'Number of client (inner optimizer) epochs per federated round.')
flags.DEFINE_integer(
'num_validation_examples', 10000, 'Number of examples '
'to use from test set for per-round validation.')
flags.DEFINE_integer('max_elements_per_user', 1000, 'Max number of training '
'sentences to use per user.')
flags.DEFINE_integer(
'client_datasets_random_seed', 1, 'The random seed '
'governing the client dataset selection.')
FLAGS = flags.FLAGS
def metrics_builder():
"""Returns a `list` of `tf.keras.metric.Metric` objects."""
return [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(top_k=5, name='recall_at_5'),
]
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
stackoverflow_train, stackoverflow_validation, stackoverflow_test = dataset.get_stackoverflow_datasets(
vocab_tokens_size=FLAGS.vocab_tokens_size,
vocab_tags_size=FLAGS.vocab_tags_size,
client_batch_size=FLAGS.client_batch_size,
client_epochs_per_round=FLAGS.client_epochs_per_round,
max_training_elements_per_user=FLAGS.max_elements_per_user,
num_validation_examples=FLAGS.num_validation_examples)
input_spec = stackoverflow_train.create_tf_dataset_for_client(
stackoverflow_train.client_ids[0]).element_spec
model_builder = functools.partial(
models.create_logistic_model,
vocab_tokens_size=FLAGS.vocab_tokens_size,
vocab_tags_size=FLAGS.vocab_tags_size)
loss_builder = functools.partial(
tf.keras.losses.BinaryCrossentropy,
from_logits=False,
reduction=tf.keras.losses.Reduction.SUM)
training_process = iterative_process_builder.from_flags(
input_spec=input_spec,
model_builder=model_builder,
loss_builder=loss_builder,
metrics_builder=metrics_builder)
client_datasets_fn = training_utils.build_client_datasets_fn(
train_dataset=stackoverflow_train,
train_clients_per_round=FLAGS.clients_per_round,
random_seed=FLAGS.client_datasets_random_seed)
assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model
evaluate_fn = training_utils.build_evaluate_fn(
model_builder=model_builder,
eval_dataset=stackoverflow_validation,
loss_builder=loss_builder,
metrics_builder=metrics_builder,
assign_weights_to_keras_model=assign_weights_fn)
test_fn = training_utils.build_evaluate_fn(
model_builder=model_builder,
# Use both val and test for symmetry with other experiments, which
# evaluate on the entire test set.
eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
loss_builder=loss_builder,
metrics_builder=metrics_builder,
assign_weights_to_keras_model=assign_weights_fn)
logging.info('Training model:')
logging.info(model_builder().summary())
training_loop.run(
training_process, client_datasets_fn, evaluate_fn, test_fn=test_fn)
if __name__ == '__main__':
app.run(main)
| 2,275 | 0 | 23 |
02df5335e8a3f3d4d358a0212ca07a5e325681d9 | 3,839 | py | Python | rlo/src/rlo/plot_empirical_predicted_values.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 31 | 2021-09-09T16:09:55.000Z | 2022-02-20T02:15:19.000Z | rlo/src/rlo/plot_empirical_predicted_values.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 40 | 2021-08-06T14:30:08.000Z | 2022-01-19T08:49:52.000Z | rlo/src/rlo/plot_empirical_predicted_values.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 5 | 2021-08-06T11:20:31.000Z | 2022-01-07T19:39:40.000Z | import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from rlo import experiment_result
from rlo import plotting
from rlo import utils
if __name__ == "__main__":
main()
| 34.585586 | 96 | 0.616567 | import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from rlo import experiment_result
from rlo import plotting
from rlo import utils
def plot_empirical_predicted_values(
outfile, title_suffix, events, probabilities=[10, 50, 90]
):
# import json # determine the size of logs - uncomment if needed
# print("Size", len(json.dumps([e for e in events if e['event']=='plot_value_comparison'])))
train_logs = [r for r in events if r["event"] == "plot_value_comparison"]
by_expr = utils.group_by(train_logs, lambda r: r["expr"])
fig, axs = plt.subplots(
len(by_expr), 1, figsize=[15, 4 * len(by_expr)], squeeze=False,
)
x_axis_func = lambda r: r["generation"]
N_GENERATIONS = (
max([x_axis_func(rec) for rec in train_logs]) + 1
) # The first generation is numbered 0
x_vals = range(N_GENERATIONS)
N_REPETITIONS = max([int(r["repetition"]) for r in train_logs]) + 1
for ax, (expr, logs) in zip(axs.ravel(), by_expr.items()):
expr_cost = logs[0][
"expr_cost"
] # we just need an initial cost for the starting expression expr
# compute percentiles separately for each repetition for each generation
by_generation = utils.group_by(logs, x_axis_func)
all_percentiles = np.full(
(N_GENERATIONS, N_REPETITIONS, len(probabilities)), float("nan")
)
for generation, generation_logs in by_generation.items():
for repetition, rep_logs in utils.group_by(
generation_logs, lambda r: r["repetition"]
).items():
# find percentiles of (predicted - empirical) for repetition
all_percentiles[int(generation), int(repetition), :] = np.percentile(
[
p - e
for r in rep_logs
for p, e in zip(r["predicted_value"], r["empirical_value"])
],
probabilities,
axis=0,
)
# then average across repetitions (ignoring absent values=NaN)
av_percentiles = np.nanmean(all_percentiles, axis=1)
# and plot a line against generation for each percentile
for i in range(len(probabilities)):
ax.plot(
x_vals,
av_percentiles[:, i],
label=str(probabilities[i]) + "th percentile",
)
ax.set_title(
"Value evaluation for {} with cost {}, {}".format(
expr, expr_cost, title_suffix
),
fontsize=9,
)
ax.axhline(0, color="black", linewidth=1)
ax.set_ylabel("(predicted - empirical)", fontsize=9)
ax.set_xlabel("Generations", fontsize=9)
plt.figlegend(*ax.get_legend_handles_labels(), loc="upper left")
fig.tight_layout()
plt.savefig(outfile)
def plot_empirical_predicted_values_from_config(config, events):
plot_empirical_predicted_values(
plotting.format_figure_filename(config, "empirical_predicted_values.png"),
plotting.config_suffix(config),
events,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"run_id",
type=str,
help="a run ID (e.g., 2019_01_06_13_15_48_13172) or path to a config.json file",
)
args = parser.parse_args()
config = experiment_result.load_config(args.run_id)
if "result_save_path" in config:
logs = experiment_result.load_events_from_config(config, verbosity=1)
plot_empirical_predicted_values_from_config(config, logs)
else:
plot_empirical_predicted_values(
"empirical_predicted_values.png", "", events=config
)
if __name__ == "__main__":
main()
| 3,537 | 0 | 69 |
66a4b82e23fde1980e30c512048651186c71ab88 | 2,298 | py | Python | recipes/protobuf/all/test_package/conanfile.py | hoxnox/conan-center-index | 5ecea3b63ebfe08dc672c5cbbb5a277d5e47f0f9 | [
"MIT"
] | 1 | 2020-10-23T13:14:41.000Z | 2020-10-23T13:14:41.000Z | recipes/protobuf/all/test_package/conanfile.py | hoxnox/conan-center-index | 5ecea3b63ebfe08dc672c5cbbb5a277d5e47f0f9 | [
"MIT"
] | 4 | 2019-12-12T14:54:30.000Z | 2020-02-12T19:55:02.000Z | recipes/protobuf/all/test_package/conanfile.py | hoxnox/conan-center-index | 5ecea3b63ebfe08dc672c5cbbb5a277d5e47f0f9 | [
"MIT"
] | 3 | 2019-10-01T21:18:08.000Z | 2021-08-04T12:36:22.000Z | import os
from conans import ConanFile, CMake, RunEnvironment, tools
import shutil
| 44.192308 | 124 | 0.632289 | import os
from conans import ConanFile, CMake, RunEnvironment, tools
import shutil
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package"
@property
def _protoc_available(self):
return not self.options["protobuf"].lite and not tools.cross_building(self.settings)
def build(self):
# Build without protoc
os.mkdir("without_protoc")
shutil.copy(os.path.join(self.source_folder, "addressbook.{}.pb.h".format(self.deps_cpp_info["protobuf"].version)),
os.path.join("without_protoc", "addressbook.pb.h"))
shutil.copy(os.path.join(self.source_folder, "addressbook.{}.pb.cc".format(self.deps_cpp_info["protobuf"].version)),
os.path.join("without_protoc", "addressbook.pb.cc"))
cmake = CMake(self)
cmake.definitions["protobuf_VERBOSE"] = True
cmake.definitions["protobuf_MODULE_COMPATIBLE"] = True
cmake.definitions["PROTOC_AVAILABLE"] = False
cmake.configure(build_folder="without_protoc")
cmake.build()
with tools.environment_append(RunEnvironment(self).vars):
if self._protoc_available:
# Build with protoc
cmake = CMake(self)
cmake.definitions["protobuf_VERBOSE"] = True
cmake.definitions["protobuf_MODULE_COMPATIBLE"] = True
cmake.definitions["PROTOC_AVAILABLE"] = True
cmake.configure(build_folder="with_protoc")
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run("protoc --version", run_environment=True)
# Test the build built without protoc
bin_path = os.path.join("without_protoc", "bin", "test_package")
self.run(bin_path, run_environment=True)
if self._protoc_available:
# Test the build built with protoc
assert os.path.isfile(os.path.join("with_protoc", "addressbook.pb.cc"))
assert os.path.isfile(os.path.join("with_protoc", "addressbook.pb.h"))
bin_path = os.path.join("with_protoc", "bin", "test_package")
self.run(bin_path, run_environment=True)
| 1,982 | 209 | 23 |
439e5ce03fdeb7ff15b9bc418b9bb213e3b0abfd | 1,348 | py | Python | interviewee/05_leetcode/arr_intersection_of_arrays.py | Anshul-GH/interview_prep | 0a30e980e910afbae4ad086dc7ff3b339eba4ec0 | [
"MIT"
] | 1 | 2020-10-10T10:14:27.000Z | 2020-10-10T10:14:27.000Z | interviewee/05_leetcode/arr_intersection_of_arrays.py | Anshul-GH/interview_prep | 0a30e980e910afbae4ad086dc7ff3b339eba4ec0 | [
"MIT"
] | null | null | null | interviewee/05_leetcode/arr_intersection_of_arrays.py | Anshul-GH/interview_prep | 0a30e980e910afbae4ad086dc7ff3b339eba4ec0 | [
"MIT"
] | null | null | null | '''
Given two integer arrays nums1 and nums2, return an array of their intersection. Each element in the result must appear as many times as it shows in both arrays and you may return the result in any order.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
Explanation: [9,4] is also accepted.
Constraints:
1 <= nums1.length, nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 1000
Follow up:
What if the given array is already sorted? How would you optimize your algorithm?
What if nums1's size is small compared to nums2's size? Which algorithm is better?
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
'''
| 29.955556 | 205 | 0.594214 | '''
Given two integer arrays nums1 and nums2, return an array of their intersection. Each element in the result must appear as many times as it shows in both arrays and you may return the result in any order.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
Explanation: [9,4] is also accepted.
Constraints:
1 <= nums1.length, nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 1000
Follow up:
What if the given array is already sorted? How would you optimize your algorithm?
What if nums1's size is small compared to nums2's size? Which algorithm is better?
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
'''
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
len1 = len(nums1)
len2 = len(nums2)
intersec = []
if len1 < len2:
for val in nums1:
if val in nums2:
intersec.append(val)
nums2.remove(val)
else:
for val in nums2:
if val in nums1:
intersec.append(val)
nums1.remove(val)
return intersec
| 486 | -6 | 50 |
f4cb10f0f56ff24847349376a900ee090e9a7376 | 403 | py | Python | pysbd/lang/english.py | nipunsadvilkar/pysbd | 5905f13be4fc95f407b98392e0ec303617a33d86 | [
"MIT"
] | 429 | 2019-03-27T14:42:33.000Z | 2022-03-30T15:52:33.000Z | pysbd/lang/english.py | nipunsadvilkar/pysbd | 5905f13be4fc95f407b98392e0ec303617a33d86 | [
"MIT"
] | 86 | 2017-06-14T17:47:00.000Z | 2022-02-25T07:44:42.000Z | pysbd/lang/english.py | nipunsadvilkar/pysbd | 5905f13be4fc95f407b98392e0ec303617a33d86 | [
"MIT"
] | 55 | 2019-04-16T17:17:39.000Z | 2022-03-09T20:12:48.000Z | # -*- coding: utf-8 -*-
from pysbd.abbreviation_replacer import AbbreviationReplacer
from pysbd.lang.common import Common, Standard
| 33.583333 | 80 | 0.717122 | # -*- coding: utf-8 -*-
from pysbd.abbreviation_replacer import AbbreviationReplacer
from pysbd.lang.common import Common, Standard
class English(Common, Standard):
iso_code = 'en'
class AbbreviationReplacer(AbbreviationReplacer):
SENTENCE_STARTERS = "A Being Did For He How However I In It Millions "\
"More She That The There They We What When Where Who Why".split(" ")
| 0 | 248 | 23 |
f207a86266fcce606728900c23230def441f9355 | 3,578 | py | Python | scarlett_os/utility/file.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 5 | 2016-11-08T21:01:00.000Z | 2018-05-07T11:02:43.000Z | scarlett_os/utility/file.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 854 | 2016-09-21T13:06:32.000Z | 2022-02-10T13:21:47.000Z | scarlett_os/utility/file.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 2 | 2016-12-02T15:12:41.000Z | 2017-02-25T08:21:56.000Z | # -*- coding: utf-8 -*-
from __future__ import with_statement, division
from scarlett_os.compat import os
from scarlett_os.compat import errno
from scarlett_os.compat import environ
from scarlett_os.compat import text_type
from scarlett_os.compat import _FSCODING
def format_size(size):
"""Turn an integer size value into something human-readable."""
# TODO: Better i18n of this (eg use O/KO/MO/GO in French)
if size >= 1024 ** 3:
return "%.1f GB" % (float(size) / (1024 ** 3))
elif size >= 1024 ** 2 * 100:
return "%.0f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 ** 2 * 10:
return "%.1f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 ** 2:
return "%.2f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 * 10:
return "%d KB" % int(size / 1024)
elif size >= 1024:
return "%.2f KB" % (float(size) / 1024)
else:
return "%d B" % size
def mkdir(dir_, *args): # noqa
"""Make a directory, including all its parent directories. This does not
raise an exception if the directory already exists (and is a
directory)."""
try:
os.makedirs(dir_, *args)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(dir_):
raise
def iscommand(s): # noqa
"""True if an executable file `s` exists in the user's path, or is a
fully qualified and existing executable file."""
if s == "" or os.path.sep in s:
return os.path.isfile(s) and os.access(s, os.X_OK)
else:
s = s.split()[0]
path = environ.get("PATH", "") or os.defpath
for p in path.split(os.path.pathsep):
p2 = os.path.join(p, s)
if os.path.isfile(p2) and os.access(p2, os.X_OK):
return True
else:
return False
def is_fsnative(path):
"""Check if file system native"""
return isinstance(path, bytes)
def fsnative(path=u""):
"""File system native"""
assert isinstance(path, text_type)
return path.encode(_FSCODING, "replace")
def listdir(path, hidden=False):
"""List files in a directory, sorted, fully-qualified.
If hidden is false, Unix-style hidden files are not returned.
"""
assert is_fsnative(path)
if hidden:
filt = None
else:
filt = lambda base: not base.startswith(".") # noqa
if path.endswith(os.sep):
join = "".join
else:
join = os.sep.join
return [
join([path, basename])
for basename in sorted(os.listdir(path))
if filt(basename)
]
def mtime(filename):
"""Return the mtime of a file, or 0 if an error occurs."""
try:
return os.path.getmtime(filename)
except OSError:
return 0
def filesize(filename):
"""Return the size of a file, or 0 if an error occurs."""
try:
return os.path.getsize(filename)
except OSError:
return 0
def expanduser(filename): # noqa
"""convience function to have expanduser return wide character paths
"""
return os.path.expanduser(filename)
def unexpand(filename, HOME=expanduser("~")):
"""Replace the user's home directory with ~/, if it appears at the
start of the path name."""
sub = (os.name == "nt" and "%USERPROFILE%") or "~"
if filename == HOME:
return sub
elif filename.startswith(HOME + os.path.sep):
filename = filename.replace(HOME, sub, 1)
return filename
def get_home_dir():
"""Returns the root directory of the user, /home/user"""
return expanduser("~")
| 27.523077 | 76 | 0.605087 | # -*- coding: utf-8 -*-
from __future__ import with_statement, division
from scarlett_os.compat import os
from scarlett_os.compat import errno
from scarlett_os.compat import environ
from scarlett_os.compat import text_type
from scarlett_os.compat import _FSCODING
def format_size(size):
"""Turn an integer size value into something human-readable."""
# TODO: Better i18n of this (eg use O/KO/MO/GO in French)
if size >= 1024 ** 3:
return "%.1f GB" % (float(size) / (1024 ** 3))
elif size >= 1024 ** 2 * 100:
return "%.0f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 ** 2 * 10:
return "%.1f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 ** 2:
return "%.2f MB" % (float(size) / (1024 ** 2))
elif size >= 1024 * 10:
return "%d KB" % int(size / 1024)
elif size >= 1024:
return "%.2f KB" % (float(size) / 1024)
else:
return "%d B" % size
def mkdir(dir_, *args): # noqa
"""Make a directory, including all its parent directories. This does not
raise an exception if the directory already exists (and is a
directory)."""
try:
os.makedirs(dir_, *args)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(dir_):
raise
def iscommand(s): # noqa
"""True if an executable file `s` exists in the user's path, or is a
fully qualified and existing executable file."""
if s == "" or os.path.sep in s:
return os.path.isfile(s) and os.access(s, os.X_OK)
else:
s = s.split()[0]
path = environ.get("PATH", "") or os.defpath
for p in path.split(os.path.pathsep):
p2 = os.path.join(p, s)
if os.path.isfile(p2) and os.access(p2, os.X_OK):
return True
else:
return False
def is_fsnative(path):
"""Check if file system native"""
return isinstance(path, bytes)
def fsnative(path=u""):
"""File system native"""
assert isinstance(path, text_type)
return path.encode(_FSCODING, "replace")
def listdir(path, hidden=False):
"""List files in a directory, sorted, fully-qualified.
If hidden is false, Unix-style hidden files are not returned.
"""
assert is_fsnative(path)
if hidden:
filt = None
else:
filt = lambda base: not base.startswith(".") # noqa
if path.endswith(os.sep):
join = "".join
else:
join = os.sep.join
return [
join([path, basename])
for basename in sorted(os.listdir(path))
if filt(basename)
]
def mtime(filename):
"""Return the mtime of a file, or 0 if an error occurs."""
try:
return os.path.getmtime(filename)
except OSError:
return 0
def filesize(filename):
"""Return the size of a file, or 0 if an error occurs."""
try:
return os.path.getsize(filename)
except OSError:
return 0
def expanduser(filename): # noqa
"""convience function to have expanduser return wide character paths
"""
return os.path.expanduser(filename)
def unexpand(filename, HOME=expanduser("~")):
"""Replace the user's home directory with ~/, if it appears at the
start of the path name."""
sub = (os.name == "nt" and "%USERPROFILE%") or "~"
if filename == HOME:
return sub
elif filename.startswith(HOME + os.path.sep):
filename = filename.replace(HOME, sub, 1)
return filename
def get_home_dir():
"""Returns the root directory of the user, /home/user"""
return expanduser("~")
| 0 | 0 | 0 |
61981eb93391ba5fface0de71de724470844469f | 4,159 | py | Python | Raycast.py | Dylooz/raycasting | f0983ecc569f67cbd4dfeed8b28c0c1568db059f | [
"MIT"
] | null | null | null | Raycast.py | Dylooz/raycasting | f0983ecc569f67cbd4dfeed8b28c0c1568db059f | [
"MIT"
] | null | null | null | Raycast.py | Dylooz/raycasting | f0983ecc569f67cbd4dfeed8b28c0c1568db059f | [
"MIT"
] | null | null | null | import math
import numpy
import pygame
CONFIG = {
"START_POS": (400, 400),
"PLAYER_COLOUR": (0, 0, 255),
"PLAYER_RADIUS": 10,
"FOV": (math.pi / 2),
"RESOLUTION": 0.25,
"ROTATE_SPEED": (math.pi / 360),
"MOVE_SPEED": 0.5,
"VIEW_DIST": 300
}
WIDTH = 800
KEYS = {
1073741904: False, # left
1073741903: False, # right
119: False, # w
97: False, # a
115: False, # s
100: False # d
}
KEY_OPP = {
1073741904: [1073741903],
1073741903: [1073741904],
119: [115],
97: [100],
115: [119],
100: [97]
}
if __name__ == "__main__":
main()
| 28.101351 | 132 | 0.534744 | import math
import numpy
import pygame
CONFIG = {
"START_POS": (400, 400),
"PLAYER_COLOUR": (0, 0, 255),
"PLAYER_RADIUS": 10,
"FOV": (math.pi / 2),
"RESOLUTION": 0.25,
"ROTATE_SPEED": (math.pi / 360),
"MOVE_SPEED": 0.5,
"VIEW_DIST": 300
}
WIDTH = 800
KEYS = {
1073741904: False, # left
1073741903: False, # right
119: False, # w
97: False, # a
115: False, # s
100: False # d
}
KEY_OPP = {
1073741904: [1073741903],
1073741903: [1073741904],
119: [115],
97: [100],
115: [119],
100: [97]
}
def unitVector(a):
return numpy.array([math.cos(a), math.sin(a)])
class Player():
def __init__(self, x=CONFIG["START_POS"][0], y=CONFIG["START_POS"][1]):
self.pos = numpy.array([float(x), float(y)])
self.dir = 0
self.numRays = math.floor(CONFIG["RESOLUTION"] * WIDTH)
self.rays = []
for i in range(0, self.numRays):
self.rays.append(unitVector((i * CONFIG["FOV"] / self.numRays) - (CONFIG["FOV"] / 2)) * CONFIG["VIEW_DIST"])
print(self.rays)
def render(self, colour=CONFIG["PLAYER_COLOUR"], radius=CONFIG["PLAYER_RADIUS"], showFacing=True):
for ray in self.rays:
pygame.draw.line(screen, (0, 255, 0), tuple(self.pos), tuple(self.pos + ray))
pygame.draw.circle(screen, colour, tuple(self.pos), radius)
if showFacing:
pygame.draw.line(screen, (0, 0, 0), tuple(self.pos), tuple(self.pos + unitVector(self.dir) * radius))
def rotateLeft(self, angle=CONFIG["ROTATE_SPEED"]):
self.dir = (self.dir - angle) % (2 * math.pi)
self.rays = []
for i in range(0, self.numRays):
self.rays.append(unitVector(self.dir + (i * CONFIG["FOV"] / self.numRays) - (CONFIG["FOV"] / 2)) * CONFIG["VIEW_DIST"])
def rotateRight(self, angle=CONFIG["ROTATE_SPEED"]):
self.dir = (self.dir + angle) % (2 * math.pi)
self.rays = []
for i in range(0, self.numRays):
self.rays.append(unitVector(self.dir + (i * CONFIG["FOV"] / self.numRays) - (CONFIG["FOV"] / 2)) * CONFIG["VIEW_DIST"])
def move(self, angle, dist=CONFIG["MOVE_SPEED"]):
self.pos += (unitVector(angle % (2 * math.pi)) * dist)
def moveForward(self, dist=CONFIG["MOVE_SPEED"]):
self.move(self.dir, dist)
def moveLeft(self, dist=CONFIG["MOVE_SPEED"]):
self.move(self.dir - (math.pi / 2), dist)
def moveBackward(self, dist=CONFIG["MOVE_SPEED"]):
self.move(self.dir + (math.pi), dist)
def moveRight(self, dist=CONFIG["MOVE_SPEED"]):
self.move(self.dir + (math.pi / 2), dist)
def preload():
pass
def processEvents():
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = True
elif event.type == pygame.KEYDOWN:
k = event.key
if k in KEYS.keys():
KEYS[k] = True
for ko in KEY_OPP[k]:
KEYS[ko] = False
elif event.type == pygame.KEYUP:
k = event.key
if k in KEYS.keys():
KEYS[k] = False
def update():
if KEYS[1073741904]:
player.rotateLeft()
if KEYS[1073741903]:
player.rotateRight()
if KEYS[119]:
player.moveForward()
if KEYS[97]:
player.moveLeft()
if KEYS[115]:
player.moveBackward()
if KEYS[100]:
player.moveRight()
def draw():
screen.fill((255, 255, 255), (0, 0, WIDTH, 800))
player.render()
screen.fill((255, 255, 255), (WIDTH, 0, 2 * WIDTH, 800))
pygame.display.flip()
def gameLoop():
global screen
screen = pygame.display.set_mode([1600, 800])
global player
player = Player()
running = True
while running:
processEvents()
update()
draw()
def main():
pygame.init()
gameLoop()
pygame.quit()
if __name__ == "__main__":
main()
| 2,989 | -6 | 463 |