hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2913d42fe3bb6efd581f01b4fcc69c308fa15e | 10,342 | py | Python | userbot/plugins/upload.py | Aliensuniquebot/CatUserbot | 93561a620fc1198c6fe6c259412088f4bc81d97b | [
"MIT"
] | 1 | 2020-07-18T07:42:58.000Z | 2020-07-18T07:42:58.000Z | userbot/plugins/upload.py | praveen368/CatUserbot | 4b0cd970551ffaf86b9fdd5da584c1b3882821ff | [
"MIT"
] | null | null | null | userbot/plugins/upload.py | praveen368/CatUserbot | 4b0cd970551ffaf86b9fdd5da584c1b3882821ff | [
"MIT"
] | null | null | null | import aiohttp
import asyncio
import os
import time
from datetime import datetime
from telethon import events
from telethon.tl.types import DocumentAttributeVideo
import json
import subprocess
import math
from pySmartDL import SmartDL
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from userbot import LOGS, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.utils import admin_cmd, humanbytes, progress, time_formatter
from userbot.uniborgConfig import Config
thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
import io
@borg.on(admin_cmd(pattern="uploadir (.*)", outgoing=True))
async def uploadir(udir_event):
"""
#For .uploadir command, allows you to upload everything from a folder in the server
"""
input_str = udir_event.pattern_match.group(1)
if os.path.exists(input_str):
await udir_event.edit("Processing ...")
lst_of_files = []
for r, d, f in os.walk(input_str):
for file in f:
lst_of_files.append(os.path.join(r, file))
for file in d:
lst_of_files.append(os.path.join(r, file))
LOGS.info(lst_of_files)
uploaded = 0
await udir_event.edit(
"Found {} files. Uploading will start soon. Please wait!".format(
len(lst_of_files)))
for single_file in lst_of_files:
if os.path.exists(single_file):
# https://stackoverflow.com/a/678242/4723940
caption_rts = os.path.basename(single_file)
c_time = time.time()
if not caption_rts.lower().endswith(".mp4"):
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
else:
thumb_image = os.path.join(input_str, "thumb.jpg")
c_time = time.time()
metadata = extractMetadata(createParser(single_file))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
thumb=thumb_image,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
os.remove(single_file)
uploaded = uploaded + 1
await udir_event.edit(
"Uploaded {} files successfully !!".format(uploaded))
else:
await udir_event.edit("404: Directory Not Found")
@borg.on(admin_cmd(pattern="upload (.*)", outgoing=True))
def get_video_thumb(file, output=None, width=90):
""" Get video thumbnail """
metadata = extractMetadata(createParser(file))
popen = subprocess.Popen(
[
"ffmpeg",
"-i",
file,
"-ss",
str(
int((0, metadata.get("duration").seconds
)[metadata.has("duration")] / 2)),
"-filter:v",
"scale={}:-1".format(width),
"-vframes",
"1",
output,
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
if not popen.returncode and os.path.lexists(file):
return output
return None
def extract_w_h(file):
""" Get width and height of media """
command_to_run = [
"ffprobe",
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
file,
]
# https://stackoverflow.com/a/11236144/4723940
try:
t_response = subprocess.check_output(command_to_run,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
LOGS.warning(exc)
else:
x_reponse = t_response.decode("UTF-8")
response_json = json.loads(x_reponse)
width = int(response_json["streams"][0]["width"])
height = int(response_json["streams"][0]["height"])
return width, height
@borg.on(admin_cmd(pattern="uploadas(stream|vn|all) (.*)", outgoing=True))
async def uploadas(uas_event):
"""
#For .uploadas command, allows you to specify some arguments for upload.
"""
await uas_event.edit("Processing ...")
type_of_upload = uas_event.pattern_match.group(1)
supports_streaming = False
round_message = False
spam_big_messages = False
if type_of_upload == "stream":
supports_streaming = True
if type_of_upload == "vn":
round_message = True
if type_of_upload == "all":
spam_big_messages = True
input_str = uas_event.pattern_match.group(2)
thumb = None
file_name = None
if "|" in input_str:
file_name, thumb = input_str.split("|")
file_name = file_name.strip()
thumb = thumb.strip()
else:
file_name = input_str
thumb_path = "a_random_f_file_name" + ".jpg"
thumb = get_video_thumb(file_name, output=thumb_path)
if os.path.exists(file_name):
metadata = extractMetadata(createParser(file_name))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
try:
if supports_streaming:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
caption=input_str,
force_document=False,
allow_cache=False,
reply_to=uas_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif round_message:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
allow_cache=False,
reply_to=uas_event.message.id,
video_note=True,
attributes=[
DocumentAttributeVideo(
duration=0,
w=1,
h=1,
round_message=True,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif spam_big_messages:
await uas_event.edit("TBD: Not (yet) Implemented")
return
os.remove(thumb)
await uas_event.edit("Uploaded successfully !!")
except FileNotFoundError as err:
await uas_event.edit(str(err))
else:
await uas_event.edit("404: File Not Found")
CMD_HELP.update({
"upload":
".upload <path in server>\
\nUsage: Uploads a locally stored file to the chat."
})
| 36.935714 | 83 | 0.512473 | import aiohttp
import asyncio
import os
import time
from datetime import datetime
from telethon import events
from telethon.tl.types import DocumentAttributeVideo
import json
import subprocess
import math
from pySmartDL import SmartDL
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from userbot import LOGS, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.utils import admin_cmd, humanbytes, progress, time_formatter
from userbot.uniborgConfig import Config
thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
import io
@borg.on(admin_cmd(pattern="uploadir (.*)", outgoing=True))
async def uploadir(udir_event):
"""
#For .uploadir command, allows you to upload everything from a folder in the server
"""
input_str = udir_event.pattern_match.group(1)
if os.path.exists(input_str):
await udir_event.edit("Processing ...")
lst_of_files = []
for r, d, f in os.walk(input_str):
for file in f:
lst_of_files.append(os.path.join(r, file))
for file in d:
lst_of_files.append(os.path.join(r, file))
LOGS.info(lst_of_files)
uploaded = 0
await udir_event.edit(
"Found {} files. Uploading will start soon. Please wait!".format(
len(lst_of_files)))
for single_file in lst_of_files:
if os.path.exists(single_file):
# https://stackoverflow.com/a/678242/4723940
caption_rts = os.path.basename(single_file)
c_time = time.time()
if not caption_rts.lower().endswith(".mp4"):
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
else:
thumb_image = os.path.join(input_str, "thumb.jpg")
c_time = time.time()
metadata = extractMetadata(createParser(single_file))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
thumb=thumb_image,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
os.remove(single_file)
uploaded = uploaded + 1
await udir_event.edit(
"Uploaded {} files successfully !!".format(uploaded))
else:
await udir_event.edit("404: Directory Not Found")
@borg.on(admin_cmd(pattern="upload (.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
mone = await event.reply("Processing ...")
input_str = event.pattern_match.group(1)
thumb = None
if os.path.exists(thumb_image_path):
thumb = thumb_image_path
if os.path.exists(input_str):
start = datetime.now()
c_time = time.time()
await bot.send_file(
event.chat_id,
input_str,
force_document=True,
supports_streaming=False,
allow_cache=False,
reply_to=event.message.id,
thumb=thumb,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, mone, c_time, "trying to upload")
)
)
end = datetime.now()
# os.remove(input_str)
ms = (end - start).seconds
await mone.edit("Uploaded in {} seconds.".format(ms))
else:
await mone.edit("404: File Not Found")
def get_video_thumb(file, output=None, width=90):
""" Get video thumbnail """
metadata = extractMetadata(createParser(file))
popen = subprocess.Popen(
[
"ffmpeg",
"-i",
file,
"-ss",
str(
int((0, metadata.get("duration").seconds
)[metadata.has("duration")] / 2)),
"-filter:v",
"scale={}:-1".format(width),
"-vframes",
"1",
output,
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
if not popen.returncode and os.path.lexists(file):
return output
return None
def extract_w_h(file):
""" Get width and height of media """
command_to_run = [
"ffprobe",
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
file,
]
# https://stackoverflow.com/a/11236144/4723940
try:
t_response = subprocess.check_output(command_to_run,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
LOGS.warning(exc)
else:
x_reponse = t_response.decode("UTF-8")
response_json = json.loads(x_reponse)
width = int(response_json["streams"][0]["width"])
height = int(response_json["streams"][0]["height"])
return width, height
@borg.on(admin_cmd(pattern="uploadas(stream|vn|all) (.*)", outgoing=True))
async def uploadas(uas_event):
"""
#For .uploadas command, allows you to specify some arguments for upload.
"""
await uas_event.edit("Processing ...")
type_of_upload = uas_event.pattern_match.group(1)
supports_streaming = False
round_message = False
spam_big_messages = False
if type_of_upload == "stream":
supports_streaming = True
if type_of_upload == "vn":
round_message = True
if type_of_upload == "all":
spam_big_messages = True
input_str = uas_event.pattern_match.group(2)
thumb = None
file_name = None
if "|" in input_str:
file_name, thumb = input_str.split("|")
file_name = file_name.strip()
thumb = thumb.strip()
else:
file_name = input_str
thumb_path = "a_random_f_file_name" + ".jpg"
thumb = get_video_thumb(file_name, output=thumb_path)
if os.path.exists(file_name):
metadata = extractMetadata(createParser(file_name))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
try:
if supports_streaming:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
caption=input_str,
force_document=False,
allow_cache=False,
reply_to=uas_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif round_message:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
allow_cache=False,
reply_to=uas_event.message.id,
video_note=True,
attributes=[
DocumentAttributeVideo(
duration=0,
w=1,
h=1,
round_message=True,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif spam_big_messages:
await uas_event.edit("TBD: Not (yet) Implemented")
return
os.remove(thumb)
await uas_event.edit("Uploaded successfully !!")
except FileNotFoundError as err:
await uas_event.edit(str(err))
else:
await uas_event.edit("404: File Not Found")
CMD_HELP.update({
"upload":
".upload <path in server>\
\nUsage: Uploads a locally stored file to the chat."
})
| 942 | 0 | 22 |
760213ce9bf57514cc7f63e97e35d4ca368bfe7c | 2,198 | py | Python | hw2/main.py | zyz29/yzhou9-webapps | e56ae829338c396850c5a91668b250ef6fb76292 | [
"MIT"
] | null | null | null | hw2/main.py | zyz29/yzhou9-webapps | e56ae829338c396850c5a91668b250ef6fb76292 | [
"MIT"
] | null | null | null | hw2/main.py | zyz29/yzhou9-webapps | e56ae829338c396850c5a91668b250ef6fb76292 | [
"MIT"
] | null | null | null | import cherrypy
import sys
import mysql.connector
from collections import OrderedDict
#Define database variables
DATABASE_USER = 'root'
DATABASE_HOST = '127.0.0.1'
DATABASE_NAME = 'feedND'
#Create connection to MySQL
cnx = mysql.connector.connect(user=DATABASE_USER, host=DATABASE_HOST, database=DATABASE_NAME)
cursor = cnx.cursor()
application = cherrypy.Application(ExampleApp(), None)
| 24.422222 | 93 | 0.589172 | import cherrypy
import sys
import mysql.connector
from collections import OrderedDict
#Define database variables
DATABASE_USER = 'root'
DATABASE_HOST = '127.0.0.1'
DATABASE_NAME = 'feedND'
#Create connection to MySQL
cnx = mysql.connector.connect(user=DATABASE_USER, host=DATABASE_HOST, database=DATABASE_NAME)
cursor = cnx.cursor()
class ExampleApp(object):
@cherrypy.expose
def index(self):
#d = {'Subway':427.0, "O'Rourke's Public House":632.0, 'The Mark Dine & Tap':730.0}
#OrderedDict(sorted(d.items(), key=lambda t: t[1]))
result = """
<!DOCTYPE html>
<html>
<head>
<title>FeedND</title>
<style>
ul {
list-style-type: none;
margin: 0;
padding: 0;
overflow: hidden;
}
li {
float: left;
}
a {
display: block;
width: 120px;
background-color: #dddddd;
font-size: 120%;
}
th, td {
padding: 5px;
}
th {
text-align: left;
}
</style>
</head>
<body>
<h1>FeedND</h1>
<ul>
<li><a href="">Orders</a></li>
<li><a href="">Restaurants</a></li>
<li><a href="">Account</a></li>
</ul>
<p></p>
<table>
<tr>
<th>Location</th>
<th>Address</th>
</tr>
"""
#for item in reversed(d.items()):
# result += "<tr><td>"+item[0]+"</td><td>"+str(item[1])+"</tr>"
cursor.execute('select name, address, state from restaurants')
row = cursor.fetchone()
while (cursor is not None) and (row is not None):
result += "<tr><td>"+row[0]+"</td><td>"+row[1]+", "+row[2]+"</tr>"
row = cursor.fetchone()
result += "</table></body></html>"
#Define database variables
cnx.close()
return result
@cherrypy.expose
def showdb(self):
cnx = mysql.connector.connect(user='test', password='mypass',
host='127.0.0.1',
database='testdb')
cursor = cnx.cursor()
query = ("SELECT firstname,lastname,email FROM Invitations")
cursor.execute(query)
info = str()
print cursor
for (firstname, lastname, email) in cursor:
info = info + "Full Name:" + lastname + firstname + "Email: "+email
return info
application = cherrypy.Application(ExampleApp(), None)
| 1,687 | 98 | 23 |
a13de6988b1abf618a24ce034423ff9614e01f1a | 306 | py | Python | xin-era/foundation/python/tricks.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | xin-era/foundation/python/tricks.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | xin-era/foundation/python/tricks.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | import math
@stars
if __name__ == '__main__':
explain('prints out all math functions')
print(dir(math))
| 17 | 44 | 0.568627 | import math
def stars(func):
def inner(*args, **kwargs):
print("*" * 30)
func(*args, **kwargs)
print("*" * 30 + '\n')
return inner
@stars
def explain(string):
print(string)
if __name__ == '__main__':
explain('prints out all math functions')
print(dir(math))
| 146 | 0 | 45 |
4eecd7f199b243a3436be78149ea66118f308fd4 | 454 | py | Python | Ethanyan_mall/Ethanyan_mall/apps/orders/urls.py | Jaylen0829/E-commerce-sites | faa4443c11d1534642c8dc9f8262f818f489c554 | [
"MIT"
] | 17 | 2019-01-22T00:14:40.000Z | 2022-02-03T12:29:49.000Z | Ethanyan_mall/Ethanyan_mall/apps/orders/urls.py | Jaylen0829/E-commerce-sites | faa4443c11d1534642c8dc9f8262f818f489c554 | [
"MIT"
] | 1 | 2020-06-28T15:16:38.000Z | 2020-08-03T15:34:14.000Z | Ethanyan_mall/Ethanyan_mall/apps/orders/urls.py | EthanYan6/E-commerce-sites | faa4443c11d1534642c8dc9f8262f818f489c554 | [
"MIT"
] | 11 | 2020-02-24T00:22:08.000Z | 2022-03-26T17:03:40.000Z | from django.conf.urls import url
from orders import views
urlpatterns = [
url(r'^orders/settlement/$', views.OrderSettlementView.as_view()),
url(r'^orders/$', views.OrdersView.as_view()),
url(r'^orders/(?P<order_id>\d+)/uncommentgoods/$',views.OrdersUnCommentView.as_view()),
url(r'^orders/(?P<order_id>\d+)/comments/$',views.OrdersCommentView.as_view()),
url(r'^skus/(?P<pk>\d+)/comments/$',views.OrdersCommentSkuView.as_view()),
] | 41.272727 | 91 | 0.696035 | from django.conf.urls import url
from orders import views
urlpatterns = [
url(r'^orders/settlement/$', views.OrderSettlementView.as_view()),
url(r'^orders/$', views.OrdersView.as_view()),
url(r'^orders/(?P<order_id>\d+)/uncommentgoods/$',views.OrdersUnCommentView.as_view()),
url(r'^orders/(?P<order_id>\d+)/comments/$',views.OrdersCommentView.as_view()),
url(r'^skus/(?P<pk>\d+)/comments/$',views.OrdersCommentSkuView.as_view()),
] | 0 | 0 | 0 |
5319b7ce8f2eb863da9fa356bf2c15a47b27ed34 | 911 | py | Python | physics/thermo.py | claywahlstrom/pack | 86b70198a4b185611c2ce3d29df99dd01233a6ac | [
"BSD-2-Clause"
] | 2 | 2019-05-04T09:32:15.000Z | 2021-02-08T08:38:23.000Z | physics/thermo.py | claywahlstrom/pack | 86b70198a4b185611c2ce3d29df99dd01233a6ac | [
"BSD-2-Clause"
] | null | null | null | physics/thermo.py | claywahlstrom/pack | 86b70198a4b185611c2ce3d29df99dd01233a6ac | [
"BSD-2-Clause"
] | null | null | null |
"""
thermodynamics
"""
def x(yf, y, yg):
"""Returns the quality of the two-phase mixture"""
if y < yf:
return 0
elif y > yg:
return 1
else:
return (y - yf) / (yg - yf)
if __name__ == '__main__':
from clay.tests import testif
from clay.utils import qualify
single_phase_tests = [
((2, 1, 3), 0),
((2, 4, 3), 1)
]
for test in single_phase_tests:
testif('returns correct quality for single-phase mixture (x: {})'.format(test[1]),
x(*test[0]),
test[1],
name=qualify(x))
two_phase_tests = [
((1, 5, 8), 0.57143),
((0.00079275, 0.01505, 0.04925), 0.29422)
]
for test in two_phase_tests:
testif('returns correct quality for two-phase mixture (x: {})'.format(test[1]),
round(x(*test[0]), 5),
test[1],
name=qualify(x))
| 21.690476 | 90 | 0.514819 |
"""
thermodynamics
"""
def x(yf, y, yg):
"""Returns the quality of the two-phase mixture"""
if y < yf:
return 0
elif y > yg:
return 1
else:
return (y - yf) / (yg - yf)
if __name__ == '__main__':
from clay.tests import testif
from clay.utils import qualify
single_phase_tests = [
((2, 1, 3), 0),
((2, 4, 3), 1)
]
for test in single_phase_tests:
testif('returns correct quality for single-phase mixture (x: {})'.format(test[1]),
x(*test[0]),
test[1],
name=qualify(x))
two_phase_tests = [
((1, 5, 8), 0.57143),
((0.00079275, 0.01505, 0.04925), 0.29422)
]
for test in two_phase_tests:
testif('returns correct quality for two-phase mixture (x: {})'.format(test[1]),
round(x(*test[0]), 5),
test[1],
name=qualify(x))
| 0 | 0 | 0 |
d85afd676288fb28551bd9a4df6bdb18e7886857 | 4,287 | py | Python | src/controller.py | hackingyseguridad/OpenDoor | 279ddf7b9f99caa6039b0b4d67db06c16d31347a | [
"Xnet",
"X11",
"Apache-1.1",
"RSA-MD"
] | null | null | null | src/controller.py | hackingyseguridad/OpenDoor | 279ddf7b9f99caa6039b0b4d67db06c16d31347a | [
"Xnet",
"X11",
"Apache-1.1",
"RSA-MD"
] | null | null | null | src/controller.py | hackingyseguridad/OpenDoor | 279ddf7b9f99caa6039b0b4d67db06c16d31347a | [
"Xnet",
"X11",
"Apache-1.1",
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Development Team: Stanislav WEB
"""
from src.lib import ArgumentsError
from src.lib import BrowserError
from src.lib import PackageError
from src.lib import ReporterError
from src.lib import TplError
from src.lib import args
from src.lib import browser
from src.lib import events
from src.lib import package
from src.lib import reporter
from src.lib import tpl
from . import execution_time
from .exceptions import SrcError
class Controller(object):
"""Controller class"""
def __init__(self):
"""
Init constructor
:raise SrcError
"""
events.terminate()
try:
interpreter = package.check_interpreter()
if interpreter is not True:
raise SrcError(tpl.error(key='unsupported', actual=interpreter.get('actual'),
expected=interpreter.get('expected')))
self.ioargs = args().get_arguments()
except ArgumentsError as e:
raise SrcError(tpl.error(e.message))
@execution_time(log=tpl)
def run(self):
"""
Bootstrap action
:raise SrcError
:return: None
"""
try:
tpl.message(package.banner())
if 'host' in self.ioargs:
getattr(self, 'scan_action')(self.ioargs)
else:
for action in self.ioargs.keys():
if hasattr(self, '{0}_action'.format(action)) and callable(
getattr(self, '{0}_action'.format(action))):
getattr(self, '{func}_action'.format(func=action))()
break
except (SrcError, PackageError, BrowserError, AttributeError) as e:
raise SrcError(tpl.error(e.message))
@staticmethod
def examples_action():
"""
Show examples action
:return: None
"""
tpl.message(package.examples())
@staticmethod
def update_action():
"""
App update action
:raise SrcError
:return: None
"""
try:
tpl.message(package.update())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def version_action():
"""
Show app version action
:raise SrcError
:return: None
"""
try:
tpl.message(package.version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def local_version():
"""
Show app local version
:raise SrcError
:return: None
"""
try:
tpl.message(package.local_version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@classmethod
def scan_action(cls, params):
"""
URL scan action
:param dict params: console input args
:raise SrcError
:return: None
"""
try:
brows = browser(params)
if True is reporter.is_reported(params.get('host')):
try:
tpl.prompt(key='logged')
except KeyboardInterrupt:
tpl.cancel(key='abort')
if reporter.default is params.get('reports'):
tpl.info(key='use_reports')
brows.ping()
brows.scan()
brows.done()
except (AttributeError, BrowserError, ReporterError, TplError) as e:
raise SrcError(e.message)
except (KeyboardInterrupt, SystemExit):
tpl.cancel(key='abort')
| 27.132911 | 93 | 0.576627 | # -*- coding: utf-8 -*-
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Development Team: Stanislav WEB
"""
from src.lib import ArgumentsError
from src.lib import BrowserError
from src.lib import PackageError
from src.lib import ReporterError
from src.lib import TplError
from src.lib import args
from src.lib import browser
from src.lib import events
from src.lib import package
from src.lib import reporter
from src.lib import tpl
from . import execution_time
from .exceptions import SrcError
class Controller(object):
"""Controller class"""
def __init__(self):
"""
Init constructor
:raise SrcError
"""
events.terminate()
try:
interpreter = package.check_interpreter()
if interpreter is not True:
raise SrcError(tpl.error(key='unsupported', actual=interpreter.get('actual'),
expected=interpreter.get('expected')))
self.ioargs = args().get_arguments()
except ArgumentsError as e:
raise SrcError(tpl.error(e.message))
@execution_time(log=tpl)
def run(self):
"""
Bootstrap action
:raise SrcError
:return: None
"""
try:
tpl.message(package.banner())
if 'host' in self.ioargs:
getattr(self, 'scan_action')(self.ioargs)
else:
for action in self.ioargs.keys():
if hasattr(self, '{0}_action'.format(action)) and callable(
getattr(self, '{0}_action'.format(action))):
getattr(self, '{func}_action'.format(func=action))()
break
except (SrcError, PackageError, BrowserError, AttributeError) as e:
raise SrcError(tpl.error(e.message))
@staticmethod
def examples_action():
"""
Show examples action
:return: None
"""
tpl.message(package.examples())
@staticmethod
def update_action():
"""
App update action
:raise SrcError
:return: None
"""
try:
tpl.message(package.update())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def version_action():
"""
Show app version action
:raise SrcError
:return: None
"""
try:
tpl.message(package.version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def local_version():
"""
Show app local version
:raise SrcError
:return: None
"""
try:
tpl.message(package.local_version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@classmethod
def scan_action(cls, params):
"""
URL scan action
:param dict params: console input args
:raise SrcError
:return: None
"""
try:
brows = browser(params)
if True is reporter.is_reported(params.get('host')):
try:
tpl.prompt(key='logged')
except KeyboardInterrupt:
tpl.cancel(key='abort')
if reporter.default is params.get('reports'):
tpl.info(key='use_reports')
brows.ping()
brows.scan()
brows.done()
except (AttributeError, BrowserError, ReporterError, TplError) as e:
raise SrcError(e.message)
except (KeyboardInterrupt, SystemExit):
tpl.cancel(key='abort')
| 0 | 0 | 0 |
0f0dd477ef58d03468f0664fbabe017bfccb2929 | 5,911 | py | Python | cfgrib/xarray_store.py | alexamici/cfgrib | 6536825ede61bbc61b7b51b827ec0c41efe9d0ee | [
"Apache-2.0"
] | null | null | null | cfgrib/xarray_store.py | alexamici/cfgrib | 6536825ede61bbc61b7b51b827ec0c41efe9d0ee | [
"Apache-2.0"
] | null | null | null | cfgrib/xarray_store.py | alexamici/cfgrib | 6536825ede61bbc61b7b51b827ec0c41efe9d0ee | [
"Apache-2.0"
] | 1 | 2020-05-18T21:59:22.000Z | 2020-05-18T21:59:22.000Z | #
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import attr
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import FrozenOrderedDict
from xarray.backends.api import open_dataset as _open_dataset
from xarray.backends.common import AbstractDataStore, BackendArray
import cfgrib
FLAVOURS = {
'eccodes': {
'dataset': {
'encode_time': False,
'encode_vertical': False,
'encode_geography': False,
},
},
'ecmwf': {
'variable_map': {
'forecast_reference_time': 'time',
'forecast_period': 'step',
'time': 'valid_time',
'air_pressure': 'level',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
'cds': {
'variable_map': {
'number': 'realization',
'forecast_period': 'leadtime',
'air_pressure': 'plev',
'latitude': 'lat',
'longitude': 'lon',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
}
@attr.attrs()
| 33.777143 | 97 | 0.640162 | #
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import attr
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import FrozenOrderedDict
from xarray.backends.api import open_dataset as _open_dataset
from xarray.backends.common import AbstractDataStore, BackendArray
import cfgrib
class WrapGrib(BackendArray):
def __init__(self, backend_array):
self.backend_array = backend_array
def __getattr__(self, item):
return getattr(self.backend_array, item)
def __getitem__(self, item):
key, np_inds = indexing.decompose_indexer(
item, self.shape, indexing.IndexingSupport.OUTER_1VECTOR)
array = self.backend_array[key.tuple]
if len(np_inds.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_inds]
return array
FLAVOURS = {
'eccodes': {
'dataset': {
'encode_time': False,
'encode_vertical': False,
'encode_geography': False,
},
},
'ecmwf': {
'variable_map': {
'forecast_reference_time': 'time',
'forecast_period': 'step',
'time': 'valid_time',
'air_pressure': 'level',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
'cds': {
'variable_map': {
'number': 'realization',
'forecast_period': 'leadtime',
'air_pressure': 'plev',
'latitude': 'lat',
'longitude': 'lon',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
}
@attr.attrs()
class GribDataStore(AbstractDataStore):
ds = attr.attrib()
variable_map = attr.attrib(default={})
type_of_level_map = attr.attrib(default={})
@classmethod
def frompath(cls, path, flavour_name='ecmwf', **kwargs):
flavour = FLAVOURS[flavour_name].copy()
config = flavour.pop('dataset', {}).copy()
config.update(kwargs)
return cls(ds=cfgrib.Dataset.frompath(path, **config), **flavour)
def __attrs_post_init__(self):
self.variable_map = self.variable_map.copy()
for name, var in self.ds.variables.items():
if self.ds.encode_vertical and 'GRIB_typeOfLevel' in var.attributes:
type_of_level = var.attributes['GRIB_typeOfLevel']
coord_name = self.type_of_level_map.get(type_of_level, type_of_level)
self.variable_map['topLevel'] = coord_name.format(**var.attributes)
def open_store_variable(self, name, var):
if isinstance(var.data, cfgrib.dataset.OnDiskArray):
data = indexing.LazilyOuterIndexedArray(WrapGrib(var.data))
else:
data = var.data
dimensions = tuple(self.variable_map.get(dim, dim) for dim in var.dimensions)
attrs = var.attributes
# the coordinates attributes need a special treatment
if 'coordinates' in attrs:
coordinates = [self.variable_map.get(d, d) for d in attrs['coordinates'].split()]
attrs['coordinates'] = ' '.join(coordinates)
encoding = {}
# save source so __repr__ can detect if it's local or not
encoding['source'] = self.ds.stream.path
encoding['original_shape'] = var.data.shape
return Variable(dimensions, data, attrs, encoding)
def get_variables(self):
return FrozenOrderedDict((self.variable_map.get(k, k), self.open_store_variable(k, v))
for k, v in self.ds.variables.items())
def get_attrs(self):
return FrozenOrderedDict(self.ds.attributes)
def get_dimensions(self):
return collections.OrderedDict((self.variable_map.get(d, d), s)
for d, s in self.ds.dimensions.items())
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {k for k, v in self.ds.dimensions.items() if v is None}
return encoding
def open_dataset(path, flavour_name='ecmwf', **kwargs):
overrides = {}
for k in list(kwargs): # copy to allow the .pop()
if k.startswith('encode_'):
overrides[k] = kwargs.pop(k)
store = GribDataStore.frompath(path, flavour_name=flavour_name, **overrides)
return _open_dataset(store, **kwargs)
def cfgrib2netcdf():
import argparse
parser = argparse.ArgumentParser(description='Convert a GRIB file into a NetCDF file.')
parser.add_argument('input', help='Path to the input GRIB file.')
parser.add_argument(
'--flavour_name', default='cds', help='Translation flavour: "cds", "eccodes" or "ecmwf".'
)
parser.add_argument(
'--output', '-o', default='{input}.nc', help='Path to the output file.'
)
args = parser.parse_args()
print('Loading: %r with flavour %r' % (args.input, args.flavour_name))
ds = open_dataset(args.input, flavour_name=args.flavour_name)
outpath = args.output.format(input=args.input)
print('Creating:', outpath)
ds.to_netcdf(outpath)
| 3,423 | 346 | 171 |
21ce880c5bc39254affa9c21f74859fe35d6b7c8 | 1,168 | py | Python | quedadas/migrations/0021_auto_20171115_1613.py | fevsea/meet-Run-Server | 48454a4665f55da019334271641c514df231f177 | [
"MIT"
] | null | null | null | quedadas/migrations/0021_auto_20171115_1613.py | fevsea/meet-Run-Server | 48454a4665f55da019334271641c514df231f177 | [
"MIT"
] | null | null | null | quedadas/migrations/0021_auto_20171115_1613.py | fevsea/meet-Run-Server | 48454a4665f55da019334271641c514df231f177 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-15 16:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| 32.444444 | 114 | 0.603596 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-15 16:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('quedadas', '0020_auto_20171114_1630'),
]
operations = [
migrations.RemoveField(
model_name='meeting',
name='tracking',
),
migrations.AddField(
model_name='tracking',
name='meeting',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='tracks',
to='quedadas.Meeting'),
preserve_default=False,
),
migrations.AddField(
model_name='tracking',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='tracks',
to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 0 | 923 | 23 |
b6837b5a0422d4a20492350aa069fd47132c7f4c | 382 | py | Python | 20_muke_python_advanced/01_summary/07_download_server_singleThread/scheduler.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 20_muke_python_advanced/01_summary/07_download_server_singleThread/scheduler.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 20_muke_python_advanced/01_summary/07_download_server_singleThread/scheduler.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | # -*- encoding=utf-8 -*-
import utils
from modules.downloader import Downloader
class Scheduler:
""" 调度模块
"""
if __name__ == '__main__':
Scheduler().process()
| 17.363636 | 41 | 0.604712 | # -*- encoding=utf-8 -*-
import utils
from modules.downloader import Downloader
class Scheduler:
""" 调度模块
"""
def __init__(self):
self.downloader = Downloader()
def process(self):
# 1.加载图片url列表
# 2.调度下载模块
url_list = utils.urllist()
self.downloader.process(url_list)
if __name__ == '__main__':
Scheduler().process()
| 176 | 0 | 54 |
a745741dbf59947084387af7c309073e37bfd4f2 | 425 | py | Python | from_book_data_structures/ch01/06.py | alexandrejr45/exercicios_algoritmo | b6af8275ff44e9dacea31e4d0121efd655ba34ca | [
"MIT"
] | null | null | null | from_book_data_structures/ch01/06.py | alexandrejr45/exercicios_algoritmo | b6af8275ff44e9dacea31e4d0121efd655ba34ca | [
"MIT"
] | null | null | null | from_book_data_structures/ch01/06.py | alexandrejr45/exercicios_algoritmo | b6af8275ff44e9dacea31e4d0121efd655ba34ca | [
"MIT"
] | null | null | null | # Write a short Python function that takes a positive integer n and returns
# the sum of the squares of all the odd positive integers smaller than n.
print(odd_squares(9))
print(odd_squares(4002))
print(odd_squares(833))
print(odd_squares(6))
print(odd_squares(112))
| 23.611111 | 75 | 0.684706 | # Write a short Python function that takes a positive integer n and returns
# the sum of the squares of all the odd positive integers smaller than n.
def odd_squares(n: int):
sum_numbers = 0
for x in range(1, n):
if x % 2 != 0:
sum_numbers += pow(x, 2)
return sum_numbers
print(odd_squares(9))
print(odd_squares(4002))
print(odd_squares(833))
print(odd_squares(6))
print(odd_squares(112))
| 132 | 0 | 23 |
658ced6b186c5694b89bb298bd4e5cf2d8877648 | 1,039 | py | Python | experiment/graphs/download_graphs.py | isaacarvestad/edc | 7a45bd121c82480040694ef4f249d9cc049885b9 | [
"BSD-3-Clause"
] | 1 | 2021-01-26T18:36:24.000Z | 2021-01-26T18:36:24.000Z | experiment/graphs/download_graphs.py | isaacarvestad/edc | 7a45bd121c82480040694ef4f249d9cc049885b9 | [
"BSD-3-Clause"
] | null | null | null | experiment/graphs/download_graphs.py | isaacarvestad/edc | 7a45bd121c82480040694ef4f249d9cc049885b9 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
"""Downloads all graphs used in the experiments to the correct locations.
Query results are sparse matrices. Any matrix which isn't square is removed
since it does not define a graph.
Usage: python3 setyp.py
"""
import ssgetpy
import os
# Maximum results when querying website.
QUERY_LIMIT = 1000
# Max non-zero values in matrix allowed. This roughly corresponds to edges.
NON_ZERO_LIMIT = 200000
# Base location for ssgetpy to place graphs when downloading.
SAVE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'real.mtx')
groups = [
"DIMACS10",
"Hamm",
"AG-Monien",
"Nasa",
]
graph_entries = []
for g in groups:
graph_entries += search(g, NON_ZERO_LIMIT)
print(f'Graphs found: {len(graph_entries)}')
for e in graph_entries:
e.download(format="MM", destpath=SAVE_PATH, extract=True)
| 25.341463 | 88 | 0.728585 | #! /usr/bin/env python3
"""Downloads all graphs used in the experiments to the correct locations.
Query results are sparse matrices. Any matrix which isn't square is removed
since it does not define a graph.
Usage: python3 setyp.py
"""
import ssgetpy
import os
# Maximum results when querying website.
QUERY_LIMIT = 1000
# Max non-zero values in matrix allowed. This roughly corresponds to edges.
NON_ZERO_LIMIT = 200000
# Base location for ssgetpy to place graphs when downloading.
SAVE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'real.mtx')
def search(group, max_edges):
results = ssgetpy.search(group=group, nzbounds=(None, max_edges), limit=QUERY_LIMIT)
return list(filter(lambda r: r.cols == r.rows, results))
groups = [
"DIMACS10",
"Hamm",
"AG-Monien",
"Nasa",
]
graph_entries = []
for g in groups:
graph_entries += search(g, NON_ZERO_LIMIT)
print(f'Graphs found: {len(graph_entries)}')
for e in graph_entries:
e.download(format="MM", destpath=SAVE_PATH, extract=True)
| 158 | 0 | 23 |
b667451592da463cb1c903926dd375ffad0cc89a | 1,817 | py | Python | samples/core/resource_spec/runtime_resource_request.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | samples/core/resource_spec/runtime_resource_request.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | samples/core/resource_spec/runtime_resource_request.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl, components
from typing import NamedTuple
@components.create_component_from_func
@components.create_component_from_func
def generate_resource_request() -> NamedTuple('output', [('memory', str), ('cpu', str)]):
'''Returns the memory and cpu request'''
from collections import namedtuple
resource_output = namedtuple('output', ['memory', 'cpu'])
return resource_output('500Mi', '200m')
@dsl.pipeline(
name='Runtime resource request pipeline',
description='An example on how to make resource requests at runtime.'
)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(resource_request_pipeline, __file__ + '.yaml')
| 37.081633 | 89 | 0.734728 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl, components
from typing import NamedTuple
@components.create_component_from_func
def training_op(n: int) -> int:
# quickly allocate a lot of memory to verify memory is enough
a = [i for i in range(n)]
return len(a)
@components.create_component_from_func
def generate_resource_request() -> NamedTuple('output', [('memory', str), ('cpu', str)]):
'''Returns the memory and cpu request'''
from collections import namedtuple
resource_output = namedtuple('output', ['memory', 'cpu'])
return resource_output('500Mi', '200m')
@dsl.pipeline(
name='Runtime resource request pipeline',
description='An example on how to make resource requests at runtime.'
)
def resource_request_pipeline(n: int = 11234567):
resource_task = generate_resource_request()
traning_task = training_op(n)\
.set_memory_limit(resource_task.outputs['memory'])\
.set_cpu_limit(resource_task.outputs['cpu'])\
.set_cpu_request('200m')
# Disable cache for KFP v1 mode.
traning_task.execution_options.caching_strategy.max_cache_staleness = 'P0D'
if __name__ == '__main__':
kfp.compiler.Compiler().compile(resource_request_pipeline, __file__ + '.yaml')
| 500 | 0 | 44 |
81448ebc57ce50e51e9368bd89528b3ec3717ffc | 22,813 | py | Python | gluon/gluoncv2/models/deeplabv3.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 2,649 | 2018-08-03T14:18:00.000Z | 2022-03-31T08:08:17.000Z | gluon/gluoncv2/models/deeplabv3.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 95 | 2018-08-13T01:46:03.000Z | 2022-03-13T08:38:14.000Z | gluon/gluoncv2/models/deeplabv3.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 549 | 2018-08-06T08:09:22.000Z | 2022-03-31T08:08:21.000Z | """
DeepLabv3 for image segmentation, implemented in Gluon.
Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587.
"""
__all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc',
'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco',
'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes',
'deeplabv3_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b, resnetd152b
class DeepLabv3FinalBlock(HybridBlock):
"""
DeepLabv3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
class ASPPAvgBranch(HybridBlock):
"""
ASPP branch with average pooling.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
class AtrousSpatialPyramidPooling(HybridBlock):
"""
Atrous Spatial Pyramid Pooling (ASPP) module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
class DeepLabv3(HybridBlock):
"""
DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,'
https://arxiv.org/abs/1706.05587.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def get_deeplabv3(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DeepLabv3 model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DeepLabv3(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs)
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs)
def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs)
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs)
def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs)
def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs)
def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k",
**kwargs)
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
**kwargs)
def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes",
**kwargs)
def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes",
**kwargs)
if __name__ == "__main__":
_test()
| 39.882867 | 120 | 0.653838 | """
DeepLabv3 for image segmentation, implemented in Gluon.
Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587.
"""
__all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc',
'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco',
'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes',
'deeplabv3_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b, resnetd152b
class DeepLabv3FinalBlock(HybridBlock):
"""
DeepLabv3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(DeepLabv3FinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class ASPPAvgBranch(HybridBlock):
"""
ASPP branch with average pooling.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
upscale_out_size,
**kwargs):
super(ASPPAvgBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
return x
class AtrousSpatialPyramidPooling(HybridBlock):
"""
Atrous Spatial Pyramid Pooling (ASPP) module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size,
**kwargs):
super(AtrousSpatialPyramidPooling, self).__init__(**kwargs)
atrous_rates = [12, 24, 36]
assert (in_channels % 8 == 0)
mid_channels = in_channels // 8
project_in_channels = 5 * mid_channels
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels))
for atrous_rate in atrous_rates:
self.branches.add(conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
padding=atrous_rate,
dilation=atrous_rate))
self.branches.add(ASPPAvgBranch(
in_channels=in_channels,
out_channels=mid_channels,
upscale_out_size=upscale_out_size))
self.conv = conv1x1_block(
in_channels=project_in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
x = self.dropout(x)
return x
class DeepLabv3(HybridBlock):
"""
DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,'
https://arxiv.org/abs/1706.05587.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(DeepLabv3, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = AtrousSpatialPyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = backbone_out_channels // 8
self.final_block = DeepLabv3FinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=1)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = DeepLabv3FinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_deeplabv3(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DeepLabv3 model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DeepLabv3(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs)
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs)
def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs)
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs)
def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs)
def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs)
def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k",
**kwargs)
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
**kwargs)
def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes",
**kwargs)
def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
(deeplabv3_resnetd50b_voc, 21),
(deeplabv3_resnetd101b_voc, 21),
(deeplabv3_resnetd152b_voc, 21),
(deeplabv3_resnetd50b_coco, 21),
(deeplabv3_resnetd101b_coco, 21),
(deeplabv3_resnetd152b_coco, 21),
(deeplabv3_resnetd50b_ade20k, 150),
(deeplabv3_resnetd101b_ade20k, 150),
(deeplabv3_resnetd50b_cityscapes, 19),
(deeplabv3_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950)
else:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 7,555 | 0 | 235 |
42f7ba0d012fae8fdb13b5a32fc74472e7f94a21 | 2,521 | py | Python | dodo_commands/extra/dodo_docker_commands/docker-build.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 8 | 2016-12-01T16:45:45.000Z | 2020-05-05T20:56:57.000Z | dodo_commands/extra/dodo_docker_commands/docker-build.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 75 | 2017-01-29T19:25:45.000Z | 2020-01-28T09:40:47.000Z | dodo_commands/extra/dodo_docker_commands/docker-build.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 2 | 2017-06-01T09:55:20.000Z | 2017-06-08T14:45:08.000Z | import os
from dodo_commands import CommandError, Dodo
from dodo_commands.framework.util import to_arg_list
if Dodo.is_main(__name__):
args = _args()
Dodo.safe = len(args.extra_dirs) == 0
_copy_extra_dirs(args.build_dir, args.extra_dirs)
try:
Dodo.run(
[
"docker",
"build",
"-t",
args.docker_image,
"-f",
args.docker_file,
*to_arg_list(args.build_args),
".",
],
cwd=args.build_dir,
) # noqa
finally:
_remove_extra_dirs(args.build_dir, args.extra_dirs)
| 33.613333 | 87 | 0.613645 | import os
from dodo_commands import CommandError, Dodo
from dodo_commands.framework.util import to_arg_list
def _args():
Dodo.parser.add_argument(
"name",
choices=Dodo.get("/DOCKER_IMAGES").keys(),
help="Key to look up in /DOCKER_IMAGES",
)
Dodo.parser.add_argument("build_args", help="Extra args to pass to docker build")
args = Dodo.parse_args()
args.build_dir = Dodo.get(
"/DOCKER_IMAGES/{name}/build_dir".format(name=args.name), "."
)
args.docker_file = Dodo.get(
"/DOCKER_IMAGES/{name}/docker_file".format(name=args.name), "Dockerfile"
)
args.extra_dirs = Dodo.get(
"/DOCKER_IMAGES/{name}/extra_dirs".format(name=args.name), []
)
args.docker_image = Dodo.get("/DOCKER_IMAGES/{name}/image".format(name=args.name))
return args
def _copy_extra_dirs(local_dir, extra_dirs):
# todo: convert extra dirs config mapping into a list of key=val
for extra_dir in extra_dirs or []:
extra_dir_name, extra_dir_path = extra_dir.split("=")
local_path = os.path.join(local_dir, extra_dir_name)
if os.path.exists(local_path):
raise CommandError("Cannot copy to existing path: " + local_path)
if not os.path.exists(extra_dir_path):
raise CommandError("Cannot copy from non-existing path: " + extra_dir_path)
Dodo.run(["cp", "-rf", extra_dir_path, local_path])
if os.path.abspath(local_dir).startswith(os.path.abspath(extra_dir_path)):
rp = os.path.relpath(local_dir, extra_dir_path)
dead_path = os.path.join(local_path, rp)
if os.path.exists(dead_path):
Dodo.run(["rm", "-rf", dead_path])
def _remove_extra_dirs(local_dir, extra_dirs):
for extra_dir in extra_dirs or []:
extra_dir_name, extra_dir_path = extra_dir.split("=")
local_path = os.path.join(local_dir, extra_dir_name)
Dodo.run(["rm", "-rf", local_path])
if Dodo.is_main(__name__):
args = _args()
Dodo.safe = len(args.extra_dirs) == 0
_copy_extra_dirs(args.build_dir, args.extra_dirs)
try:
Dodo.run(
[
"docker",
"build",
"-t",
args.docker_image,
"-f",
args.docker_file,
*to_arg_list(args.build_args),
".",
],
cwd=args.build_dir,
) # noqa
finally:
_remove_extra_dirs(args.build_dir, args.extra_dirs)
| 1,783 | 0 | 69 |
03abcaec9cc791d47eb2e69bfdfbdadb1f0eea8c | 4,883 | py | Python | tests/function/test_project.py | vaenow/aliyun-datahub-sdk-python | bcb82ad2f558ab34c3b767ad52c667b62c10e37e | [
"Apache-2.0"
] | 1 | 2021-07-07T08:15:19.000Z | 2021-07-07T08:15:19.000Z | tests/function/test_project.py | vaenow/aliyun-datahub-sdk-python | bcb82ad2f558ab34c3b767ad52c667b62c10e37e | [
"Apache-2.0"
] | null | null | null | tests/function/test_project.py | vaenow/aliyun-datahub-sdk-python | bcb82ad2f558ab34c3b767ad52c667b62c10e37e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import random
import sys
import time
import traceback
from six.moves import configparser
from datahub import DataHub
from datahub.exceptions import ResourceExistException, ResourceNotFoundException, InvalidParameterException
current_path = os.path.split(os.path.realpath(__file__))[0]
root_path = os.path.join(current_path, '../..')
configer = configparser.ConfigParser()
configer.read(os.path.join(current_path, '../datahub.ini'))
access_id = configer.get('datahub', 'access_id')
access_key = configer.get('datahub', 'access_key')
endpoint = configer.get('datahub', 'endpoint')
print("=======================================")
print("access_id: %s" % access_id)
print("access_key: %s" % access_key)
print("endpoint: %s" % endpoint)
print("=======================================\n\n")
if not access_id or not access_key or not endpoint:
print("[access_id, access_key, endpoint] must be set in datahub.ini!")
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
# run directly
if __name__ == '__main__':
test = TestProject()
test.test_list_project()
test.test_create_and_delete_project()
test.test_create_invalid_project()
test.test_get_unexist_project()
test.test_get_exist_project()
| 33.675862 | 107 | 0.649191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import random
import sys
import time
import traceback
from six.moves import configparser
from datahub import DataHub
from datahub.exceptions import ResourceExistException, ResourceNotFoundException, InvalidParameterException
current_path = os.path.split(os.path.realpath(__file__))[0]
root_path = os.path.join(current_path, '../..')
configer = configparser.ConfigParser()
configer.read(os.path.join(current_path, '../datahub.ini'))
access_id = configer.get('datahub', 'access_id')
access_key = configer.get('datahub', 'access_key')
endpoint = configer.get('datahub', 'endpoint')
print("=======================================")
print("access_id: %s" % access_id)
print("access_key: %s" % access_key)
print("endpoint: %s" % endpoint)
print("=======================================\n\n")
if not access_id or not access_key or not endpoint:
print("[access_id, access_key, endpoint] must be set in datahub.ini!")
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
class TestProject:
def test_list_project(self):
project_name_1 = "project_test_p%d_0" % int(time.time())
try:
dh.create_project(project_name_1, '')
result = dh.list_project()
print(result)
assert project_name_1 in result.project_names
except ResourceExistException:
pass
except Exception:
print(traceback.format_exc())
sys.exit(-1)
finally:
dh.delete_project(project_name_1)
def test_create_and_delete_project(self):
project_name_2 = "project_test_p%d_2" % int(time.time())
try:
dh.create_project(project_name_2, "comment_test")
except ResourceExistException:
pass
except Exception:
print(traceback.format_exc())
sys.exit(-1)
finally:
dh.delete_project(project_name_2)
def test_create_invalid_project(self):
invalid_project_names = ["", "1invalid", "_invalid", "!invalid", "in",
"invalidinvalidinvalidinvalidinvalidinvalidinvalidinvalid"]
for invalid_project_name in invalid_project_names:
try:
dh.create_project(invalid_project_name, '')
except InvalidParameterException:
pass
else:
raise Exception('Create success with invalid project name!')
def test_get_exist_project(self):
project_name_3 = "project_test_p%d_3" % int(time.time())
try:
dh.create_project(project_name_3, 'comment_test')
project_1 = dh.get_project(project_name_3)
assert project_1.project_name == project_name_3
assert project_1.comment == 'comment_test'
assert project_1.create_time > 0
assert project_1.last_modify_time > 0
except ResourceExistException:
pass
except Exception:
print(traceback.format_exc())
sys.exit(-1)
finally:
dh.delete_project(project_name_3)
def test_get_unexist_project(self):
unexist_project_name = "unexist_project_test_p%d" % random.randint(1000, 9999)
results = dh.list_project()
print(results)
unexist = True
# try to find an unexist project name and test
for i in range(0, 10):
if unexist_project_name in results.project_names:
unexist = False
unexist_project_name = "unexist_project_test_p%d" % random.randint(1000, 9999)
else:
unexist = True
break
if unexist:
try:
dh.get_project(unexist_project_name)
except ResourceNotFoundException:
pass
# run directly
if __name__ == '__main__':
test = TestProject()
test.test_list_project()
test.test_create_and_delete_project()
test.test_create_invalid_project()
test.test_get_unexist_project()
test.test_get_exist_project()
| 2,651 | -3 | 158 |
a948f3237a28cadd19394dc7e7a272bddff7c57e | 712 | py | Python | sqlite tut.py | Olateetoms/billingSystem | f9e4c77645fdcad80761fc6f717387aa91fe92d6 | [
"bzip2-1.0.6"
] | null | null | null | sqlite tut.py | Olateetoms/billingSystem | f9e4c77645fdcad80761fc6f717387aa91fe92d6 | [
"bzip2-1.0.6"
] | null | null | null | sqlite tut.py | Olateetoms/billingSystem | f9e4c77645fdcad80761fc6f717387aa91fe92d6 | [
"bzip2-1.0.6"
] | null | null | null | import sqlite3
#connection obj that represent our database with in memory database to create a file called employee database file
conn = sqlite3.connect(':memory:') #so it can start from scratch, if you dont want to keep deleting a database over and over..you add :memory:
#to create a cursor to start running sql command
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
#first text,
#last text,
#pay integer
#)""")
#to start adding data into the database
c.execute("INSERT INTO employees VALUES ('Corey', 'real', 500)")
conn.commit()
c.execute("SELECT * FROM employees WHERE last='real'")
print (c.fetchall())
conn.commit()
conn.close()
| 27.384615 | 142 | 0.674157 | import sqlite3
#connection obj that represent our database with in memory database to create a file called employee database file
conn = sqlite3.connect(':memory:') #so it can start from scratch, if you dont want to keep deleting a database over and over..you add :memory:
#to create a cursor to start running sql command
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
#first text,
#last text,
#pay integer
#)""")
#to start adding data into the database
c.execute("INSERT INTO employees VALUES ('Corey', 'real', 500)")
conn.commit()
c.execute("SELECT * FROM employees WHERE last='real'")
print (c.fetchall())
conn.commit()
conn.close()
| 0 | 0 | 0 |
4329c4002060adac4b9358813536451b914ca285 | 6,818 | py | Python | test/test_util.py | jwodder/check-wheel-contents | c8282c79114e2e3f0cd9c29834b7b178c03413dc | [
"MIT"
] | 115 | 2020-04-01T20:54:48.000Z | 2022-03-10T14:56:41.000Z | test/test_util.py | jwodder/check-wheel-contents | c8282c79114e2e3f0cd9c29834b7b178c03413dc | [
"MIT"
] | 21 | 2020-04-09T11:01:46.000Z | 2021-07-09T11:45:09.000Z | test/test_util.py | jwodder/check-wheel-contents | c8282c79114e2e3f0cd9c29834b7b178c03413dc | [
"MIT"
] | 8 | 2020-04-02T21:32:00.000Z | 2021-11-13T11:10:30.000Z | import pytest
from check_wheel_contents.errors import WheelValidationError
from check_wheel_contents.util import (
comma_split,
find_wheel_dirs,
is_data_dir,
is_dist_info_dir,
is_stubs_dir,
pymodule_basename,
)
@pytest.mark.parametrize(
"filename,expected",
[
("foo.py", "foo"),
("FOO.PY", None),
("foo.pyc", None),
("foo.pyo", None),
(".py", None),
("py", None),
("not-an-identifier.py", "not-an-identifier"),
("def.py", "def"),
("extra.ext.py", "extra.ext"),
("foo.cpython-38-x86_64-linux-gnu.so", "foo"),
("graph.cpython-37m-darwin.so", "graph"),
("foo.cp38-win_amd64.pyd", "foo"),
("foo.cp38-win32.pyd", "foo"),
("foo.so", "foo"),
("foo.pyd", "foo"),
("_ffi.abi3.so", "_ffi"),
],
)
@pytest.mark.parametrize(
"sin,lout",
[
("", []),
(" ", []),
(",", []),
(" , ", []),
(" , , ", []),
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("foo, bar", ["foo", "bar"]),
("foo ,bar", ["foo", "bar"]),
(" foo , bar ", ["foo", "bar"]),
(" foo , , bar ", ["foo", "bar"]),
("foo,,bar", ["foo", "bar"]),
("foo bar", ["foo bar"]),
(",foo", ["foo"]),
("foo,", ["foo"]),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.dist-info", True),
("somepackage.dist-info", False),
("somepackage-1.0.0-1.dist-info", False),
("somepackage-1.0.0.data", False),
("SOME_._PaCkAgE-0.dist-info", True),
("foo-1!2+local.dist-info", True),
("foo-1_2_local.dist-info", True),
(".dist-info", False),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.data", True),
("somepackage.data", False),
("somepackage-1.0.0-1.data", False),
("somepackage-1.0.0.dist-info", False),
("SOME_._PaCkAgE-0.data", True),
("foo-1!2+local.data", True),
("foo-1_2_local.data", True),
(".data", False),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("foo-stubs", True),
("foo-stub", False),
("foo-STUBS", False),
("-stubs", False),
("def-stubs", False),
("has-hyphen-stubs", False),
("has.period-stubs", False),
],
)
@pytest.mark.parametrize(
"namelist,project,version,expected",
[
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
],
"foo",
"1.0",
("foo-1.0.dist-info", None),
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
"foo-1.0.data/scripts/bar",
],
"foo",
"1.0",
("foo-1.0.dist-info", "foo-1.0.data"),
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"FOO-1.0.0.dist-info/RECORD",
"foo-1.data/scripts/bar",
],
"foo",
"1.0",
("FOO-1.0.0.dist-info", "foo-1.data"),
),
(
[
"foo.py",
"FOO-1.0_1.dist-info/WHEEL",
"FOO-1.0_1.dist-info/RECORD",
],
"foo",
"1.0.post1",
("FOO-1.0_1.dist-info", None),
),
],
)
@pytest.mark.parametrize(
"namelist,project,version,msg",
[
(
[
"foo.py",
"foo-1.0.dist/WHEEL",
],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"bar-1.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'bar-1.0.dist-info'",
),
(
[
"foo.py",
"foo-2.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'foo-2.0.dist-info'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-2.0.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"foo-1.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
["foo.py", ".dist-info/WHEEL"],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.data/scripts/bar",
"FOO-1.data/headers/foo.h",
],
"foo",
"1.0",
"Wheel contains multiple .data directories",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-1.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'bar-1.0.data'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-2.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'foo-2.0.data'",
),
],
)
| 26.223077 | 82 | 0.429891 | import pytest
from check_wheel_contents.errors import WheelValidationError
from check_wheel_contents.util import (
comma_split,
find_wheel_dirs,
is_data_dir,
is_dist_info_dir,
is_stubs_dir,
pymodule_basename,
)
@pytest.mark.parametrize(
"filename,expected",
[
("foo.py", "foo"),
("FOO.PY", None),
("foo.pyc", None),
("foo.pyo", None),
(".py", None),
("py", None),
("not-an-identifier.py", "not-an-identifier"),
("def.py", "def"),
("extra.ext.py", "extra.ext"),
("foo.cpython-38-x86_64-linux-gnu.so", "foo"),
("graph.cpython-37m-darwin.so", "graph"),
("foo.cp38-win_amd64.pyd", "foo"),
("foo.cp38-win32.pyd", "foo"),
("foo.so", "foo"),
("foo.pyd", "foo"),
("_ffi.abi3.so", "_ffi"),
],
)
def test_pymodule_basename(filename, expected):
assert pymodule_basename(filename) == expected
@pytest.mark.parametrize(
"sin,lout",
[
("", []),
(" ", []),
(",", []),
(" , ", []),
(" , , ", []),
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("foo, bar", ["foo", "bar"]),
("foo ,bar", ["foo", "bar"]),
(" foo , bar ", ["foo", "bar"]),
(" foo , , bar ", ["foo", "bar"]),
("foo,,bar", ["foo", "bar"]),
("foo bar", ["foo bar"]),
(",foo", ["foo"]),
("foo,", ["foo"]),
],
)
def test_comma_split(sin, lout):
assert comma_split(sin) == lout
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.dist-info", True),
("somepackage.dist-info", False),
("somepackage-1.0.0-1.dist-info", False),
("somepackage-1.0.0.data", False),
("SOME_._PaCkAgE-0.dist-info", True),
("foo-1!2+local.dist-info", True),
("foo-1_2_local.dist-info", True),
(".dist-info", False),
],
)
def test_is_dist_info_dir(name, expected):
assert is_dist_info_dir(name) is expected
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.data", True),
("somepackage.data", False),
("somepackage-1.0.0-1.data", False),
("somepackage-1.0.0.dist-info", False),
("SOME_._PaCkAgE-0.data", True),
("foo-1!2+local.data", True),
("foo-1_2_local.data", True),
(".data", False),
],
)
def test_is_data_dir(name, expected):
assert is_data_dir(name) is expected
@pytest.mark.parametrize(
"name,expected",
[
("foo-stubs", True),
("foo-stub", False),
("foo-STUBS", False),
("-stubs", False),
("def-stubs", False),
("has-hyphen-stubs", False),
("has.period-stubs", False),
],
)
def test_is_stubs_dir(name, expected):
assert is_stubs_dir(name) is expected
@pytest.mark.parametrize(
"namelist,project,version,expected",
[
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
],
"foo",
"1.0",
("foo-1.0.dist-info", None),
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
"foo-1.0.data/scripts/bar",
],
"foo",
"1.0",
("foo-1.0.dist-info", "foo-1.0.data"),
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"FOO-1.0.0.dist-info/RECORD",
"foo-1.data/scripts/bar",
],
"foo",
"1.0",
("FOO-1.0.0.dist-info", "foo-1.data"),
),
(
[
"foo.py",
"FOO-1.0_1.dist-info/WHEEL",
"FOO-1.0_1.dist-info/RECORD",
],
"foo",
"1.0.post1",
("FOO-1.0_1.dist-info", None),
),
],
)
def test_find_wheel_dirs(namelist, project, version, expected):
assert find_wheel_dirs(namelist, project, version) == expected
@pytest.mark.parametrize(
"namelist,project,version,msg",
[
(
[
"foo.py",
"foo-1.0.dist/WHEEL",
],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"bar-1.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'bar-1.0.dist-info'",
),
(
[
"foo.py",
"foo-2.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'foo-2.0.dist-info'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-2.0.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"foo-1.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
["foo.py", ".dist-info/WHEEL"],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.data/scripts/bar",
"FOO-1.data/headers/foo.h",
],
"foo",
"1.0",
"Wheel contains multiple .data directories",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-1.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'bar-1.0.data'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-2.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'foo-2.0.data'",
),
],
)
def test_find_wheel_dirs_error(namelist, project, version, msg):
with pytest.raises(WheelValidationError) as excinfo:
find_wheel_dirs(namelist, project, version)
assert str(excinfo.value) == msg
| 605 | 0 | 154 |
85db824e5e870124edc7d516245585cd93da9d5d | 453 | py | Python | setup.py | BiobankLab/pycdhit | feb6b702d0820d4eca890cecaca62709bd341c69 | [
"Apache-2.0"
] | null | null | null | setup.py | BiobankLab/pycdhit | feb6b702d0820d4eca890cecaca62709bd341c69 | [
"Apache-2.0"
] | null | null | null | setup.py | BiobankLab/pycdhit | feb6b702d0820d4eca890cecaca62709bd341c69 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(name='pycdhit',
version='0.0.1',
description='CDHIT results analysis tool set',
#url='http://github.com/storborg/funniest',
author='Blazej Marciniak',
author_email='blazejmarciniak@gmail.com',
license='Apache 2.0',
packages=['pycdhit'],
install_requires=[
'scipy', 'numpy', 'pandas', 'matplotlib'
],
scripts=[],
zip_safe=False)
| 28.3125 | 53 | 0.593819 | from setuptools import setup
setup(name='pycdhit',
version='0.0.1',
description='CDHIT results analysis tool set',
#url='http://github.com/storborg/funniest',
author='Blazej Marciniak',
author_email='blazejmarciniak@gmail.com',
license='Apache 2.0',
packages=['pycdhit'],
install_requires=[
'scipy', 'numpy', 'pandas', 'matplotlib'
],
scripts=[],
zip_safe=False)
| 0 | 0 | 0 |
e36d76255d485f302ade235b4c136882fb149ab1 | 1,368 | py | Python | docs/examples/filters/f_sphinx/__init__.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | 1 | 2018-07-08T06:12:02.000Z | 2018-07-08T06:12:02.000Z | docs/examples/filters/f_sphinx/__init__.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | null | null | null | docs/examples/filters/f_sphinx/__init__.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | null | null | null | """A filter that can parse sphinx docs
"""
__author__ = 'jay'
__version__ = (0, 0, 1, 'alpha', 0)
catalog = ['read']
form = {
"lang" : {
"verbose": "Language",
"require": False,
"type": "select",
"choices": [["python2", "Python 2"], ["python3", "Python 3"]],
"value": "python2",
},
"trigger_pattern": {
"verbose": "Trigger Pattern",
"require": False,
"helper_text": "Sphinx will regenerate docs once trigger is matched."
" Empty means regenerate docs every commit.",
},
"docs_root": {
"verbose": "Documentation Root",
"helper_text": "Where the generated docs locate."
},
"working_dir": {
"verbose": "Working Directory",
"require": False,
"helper_text": "Path to working tree that build command run in.",
},
"build_command": {
"verbose": "Build Command",
"require": False,
"type": "textarea",
"helper_text": "Command that builds docs. Empty means docs are"
" already there",
},
"ignore_errors": {
"verbose": "Ignore Errors",
"type": "checkbox",
"value": False,
},
}
from main import Filter
__all__ = ['Filter']
| 22.8 | 77 | 0.537281 | """A filter that can parse sphinx docs
"""
__author__ = 'jay'
__version__ = (0, 0, 1, 'alpha', 0)
catalog = ['read']
form = {
"lang" : {
"verbose": "Language",
"require": False,
"type": "select",
"choices": [["python2", "Python 2"], ["python3", "Python 3"]],
"value": "python2",
},
"trigger_pattern": {
"verbose": "Trigger Pattern",
"require": False,
"helper_text": "Sphinx will regenerate docs once trigger is matched."
" Empty means regenerate docs every commit.",
},
"docs_root": {
"verbose": "Documentation Root",
"helper_text": "Where the generated docs locate."
},
"working_dir": {
"verbose": "Working Directory",
"require": False,
"helper_text": "Path to working tree that build command run in.",
},
"build_command": {
"verbose": "Build Command",
"require": False,
"type": "textarea",
"helper_text": "Command that builds docs. Empty means docs are"
" already there",
},
"ignore_errors": {
"verbose": "Ignore Errors",
"type": "checkbox",
"value": False,
},
}
def on_install(db):
pass
def on_upgrade(db, pre):
pass
def on_uninstall(db):
pass
from main import Filter
__all__ = ['Filter']
| 28 | 0 | 69 |
9c3ba2b5275e6cdda80be92f9bd65056d0f611d8 | 29,649 | py | Python | appsite/home/migrations/0001_initial.py | markussitzmann/chembience-home | 1addcd26d1168a099cf7ef715425f3e10ab5221f | [
"BSD-3-Clause"
] | null | null | null | appsite/home/migrations/0001_initial.py | markussitzmann/chembience-home | 1addcd26d1168a099cf7ef715425f3e10ab5221f | [
"BSD-3-Clause"
] | null | null | null | appsite/home/migrations/0001_initial.py | markussitzmann/chembience-home | 1addcd26d1168a099cf7ef715425f3e10ab5221f | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-20 20:44
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
| 71.271635 | 2,131 | 0.630443 | # Generated by Django 2.2.4 on 2019-08-20 20:44
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
]
operations = [
migrations.CreateModel(
name='ActionButtons',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('small', models.BooleanField(default=False, verbose_name='Small Buttons')),
('fit', models.BooleanField(default=False, verbose_name='Fit Buttons')),
('stacked', models.BooleanField(default=False, verbose_name='Stacked Buttons')),
],
options={
'verbose_name': 'Action Button',
'verbose_name_plural': 'Action Buttons',
},
),
migrations.CreateModel(
name='Button',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('url', models.URLField(blank=True, default='', max_length=255)),
('size', models.CharField(blank=True, choices=[('', 'Default'), ('small', 'Small'), ('large', 'Large')], default='', max_length=25)),
('icon', models.CharField(blank=True, default='', max_length=25)),
('primary', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Button',
},
),
migrations.CreateModel(
name='ColorOptions',
fields=[
('color_id', models.AutoField(primary_key=True, serialize=False)),
('color', models.CharField(blank=True, choices=[('color0', 'color 0'), ('color1', 'color 1'), ('color2', 'color 2'), ('color3', 'color 3'), ('color4', 'color 4'), ('color5', 'color 5'), ('color6', 'color 6'), ('color7', 'color 7')], default='color0', max_length=30, null=True)),
('invert', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='ContentAlignmentOptions',
fields=[
('alignment_id', models.AutoField(primary_key=True, serialize=False)),
('alignment', models.CharField(blank=True, choices=[('content-align-left', 'left'), ('content-align-center', 'center'), ('content-align-right', 'right')], default='content-align-left', max_length=30, null=True, verbose_name='Alignment')),
],
),
migrations.CreateModel(
name='ContentOrientationOptions',
fields=[
('orientation_id', models.AutoField(primary_key=True, serialize=False)),
('orientation', models.CharField(choices=[('orient-left', 'left'), ('orient-center', 'center'), ('orient-right', 'right')], default='orient-left', max_length=30, verbose_name='Orientation')),
],
),
migrations.CreateModel(
name='FooterPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('text', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IconButtons',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Icon Button',
'verbose_name_plural': 'Icon Buttons',
},
),
migrations.CreateModel(
name='ImagePositionOptions',
fields=[
('ip_id', models.AutoField(primary_key=True, serialize=False)),
('image_position', models.CharField(blank=True, choices=[('image-position-left', 'left'), ('image-position-center', 'center'), ('image-position-right', 'right')], default='image-position-left', max_length=30, null=True, verbose_name='Image Position')),
],
),
migrations.CreateModel(
name='OnloadContentFadeOptions',
fields=[
('onload_content_fade_id', models.AutoField(primary_key=True, serialize=False)),
('onload_content_fade', models.CharField(blank=True, choices=[(None, 'none'), ('onload-content-fade-up', 'up'), ('onload-content-fade-down', 'down'), ('onload-content-fade-left', 'left'), ('onload-content-fade-right', 'right'), ('onload-content-fade-in', 'in')], default=None, max_length=30, null=True, verbose_name='Onload Fade')),
],
),
migrations.CreateModel(
name='OnloadImageFadeOptions',
fields=[
('onload_image_fade_id', models.AutoField(primary_key=True, serialize=False)),
('onload_image_fade', models.CharField(blank=True, choices=[(None, 'none'), ('onload-image-fade-up', 'up'), ('onload-image-fade-down', 'down'), ('onload-image-fade-left', 'left'), ('onload-image-fade-right', 'right'), ('onload-image-fade-in', 'in')], default=None, max_length=30, null=True, verbose_name='Image Fade')),
],
),
migrations.CreateModel(
name='OnscrollContentFadeOptions',
fields=[
('onscroll_content_fade_id', models.AutoField(primary_key=True, serialize=False)),
('onscroll_content_fade', models.CharField(blank=True, choices=[(None, 'none'), ('onscroll-content-fade-up', 'up'), ('onscroll-content-fade-down', 'down'), ('onscroll-content-fade-left', 'left'), ('onscroll-content-fade-right', 'right'), ('onscroll-content-fade-in', 'in')], default=None, max_length=30, null=True, verbose_name='Content Fade')),
],
),
migrations.CreateModel(
name='OnscrollImageFadeOptions',
fields=[
('onscroll_image_fade_id', models.AutoField(primary_key=True, serialize=False)),
('onscroll_image_fade', models.CharField(blank=True, choices=[(None, 'none'), ('onscroll-image-fade-up', 'up'), ('onscroll-image-fade-down', 'down'), ('onscroll-image-fade-left', 'left'), ('onscroll-image-fade-right', 'right'), ('onscroll-image-fade-in', 'in')], default=None, max_length=30, null=True, verbose_name='Image Fade')),
],
),
migrations.CreateModel(
name='ScreenOptions',
fields=[
('screen_id', models.AutoField(primary_key=True, serialize=False)),
('screen', models.CharField(blank=True, choices=[(None, 'none'), ('fullscreen', 'full screen'), ('halfscreen', 'half screen')], default='none', max_length=30, null=True)),
],
),
migrations.CreateModel(
name='SectionPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('header', models.CharField(max_length=255)),
('content', wagtail.core.fields.StreamField([('heading_block', wagtail.core.blocks.StructBlock([('separator', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], required=False))])), ('paragraph_block', wagtail.core.blocks.RichTextBlock(icon='fa-paragraph', template='home/blocks/paragraph_block.html')), ('image_block', wagtail.core.blocks.StructBlock([('separator', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False))])), ('block_quote', wagtail.core.blocks.StructBlock([('separator', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('text', wagtail.core.blocks.TextBlock()), ('attribute_name', wagtail.core.blocks.CharBlock(blank=True, label='e.g. Mary Berry', required=False))])), ('embed_block', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-s15', template='home/blocks/embed_block.html')), ('table', wagtail.contrib.table_block.blocks.TableBlock(default_table_options={'autoColumnSize': False, 'colHeaders': False, 'contextMenu': True, 'editor': 'text', 'height': 108, 'minSpareRows': 0, 'renderer': 'text', 'rowHeaders': False, 'startCols': 3, 'startRows': 3, 'stretchH': 'all'})), ('code', wagtail.core.blocks.StructBlock([('language', wagtail.core.blocks.ChoiceBlock(choices=[('bash', 'Bash/Shell'), ('css', 'CSS'), ('diff', 'diff'), ('html', 'HTML'), ('javascript', 'Javascript'), ('json', 'JSON'), ('python', 'Python'), ('scss', 'SCSS'), ('yaml', 'YAML')], help_text='Coding language', identifier='language', label='Language')), ('code', wagtail.core.blocks.TextBlock(identifier='code', label='Code'))], label='Code'))], blank=True, verbose_name='Page Content')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SizeOptions',
fields=[
('size_id', models.AutoField(primary_key=True, serialize=False)),
('size', models.CharField(choices=[('small', 'Small'), ('medium', 'Medium'), ('big', 'Big')], default='medium', max_length=10)),
],
),
migrations.CreateModel(
name='SpotlightIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StyleOptions',
fields=[
('style_id', models.AutoField(primary_key=True, serialize=False)),
('style', models.CharField(choices=[('style1', 'No. 1'), ('style2', 'No. 2'), ('style3', 'No. 3'), ('style4', 'No. 4'), ('style5', 'No. 5'), ('style6', 'No. 6'), ('style7', 'No. 7')], default='style0', max_length=30)),
],
),
migrations.CreateModel(
name='StylingBase',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='BannerStyling',
fields=[
('screenoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ScreenOptions')),
('onscrollimagefadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnscrollImageFadeOptions')),
('onscrollcontentfadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnscrollContentFadeOptions')),
('onloadimagefadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnloadImageFadeOptions')),
('onloadcontentfadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnloadContentFadeOptions')),
('imagepositionoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ImagePositionOptions')),
('contentalignmentoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ContentAlignmentOptions')),
('contentorientationoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ContentOrientationOptions')),
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('styleoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.StyleOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
],
options={
'verbose_name': 'Banner Styling',
},
bases=('home.stylingbase', 'home.styleoptions', 'home.coloroptions', 'home.contentorientationoptions', 'home.contentalignmentoptions', 'home.imagepositionoptions', 'home.onloadcontentfadeoptions', 'home.onloadimagefadeoptions', 'home.onscrollcontentfadeoptions', 'home.onscrollimagefadeoptions', 'home.screenoptions'),
),
migrations.CreateModel(
name='FooterStyling',
fields=[
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
],
options={
'verbose_name': 'Footer Styling',
},
bases=('home.stylingbase', 'home.coloroptions'),
),
migrations.CreateModel(
name='GalleryStyling',
fields=[
('sizeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.SizeOptions')),
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('styleoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.StyleOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
('lightbox_button_text', models.CharField(blank=True, max_length=32, null=True)),
('onload_fade_in', models.BooleanField(default=False, verbose_name='Fade In on Load')),
('onscroll_fade_in', models.BooleanField(default=False, verbose_name='Fade In on Scroll')),
],
options={
'verbose_name': 'Gallery Styling',
},
bases=('home.stylingbase', 'home.styleoptions', 'home.coloroptions', 'home.sizeoptions'),
),
migrations.CreateModel(
name='ItemStyling',
fields=[
('sizeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.SizeOptions')),
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('styleoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.StyleOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
('onload_fade_in', models.BooleanField(default=False, verbose_name='Fade In on Load')),
('onscroll_fade_in', models.BooleanField(default=False, verbose_name='Fade In on Scroll')),
],
options={
'verbose_name': 'Item Styling',
},
bases=('home.stylingbase', 'home.styleoptions', 'home.coloroptions', 'home.sizeoptions'),
),
migrations.CreateModel(
name='SectionStyling',
fields=[
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
],
options={
'verbose_name': 'Section Styling',
},
bases=('home.stylingbase', 'home.coloroptions'),
),
migrations.CreateModel(
name='SpotlightStyling',
fields=[
('screenoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ScreenOptions')),
('onscrollimagefadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnscrollImageFadeOptions')),
('onscrollcontentfadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnscrollContentFadeOptions')),
('onloadimagefadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnloadImageFadeOptions')),
('onloadcontentfadeoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.OnloadContentFadeOptions')),
('imagepositionoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ImagePositionOptions')),
('contentalignmentoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ContentAlignmentOptions')),
('contentorientationoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ContentOrientationOptions')),
('coloroptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.ColorOptions')),
('styleoptions_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='home.StyleOptions')),
('stylingbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.StylingBase')),
],
options={
'verbose_name': 'Spotlight Styling',
},
bases=('home.stylingbase', 'home.styleoptions', 'home.coloroptions', 'home.contentorientationoptions', 'home.contentalignmentoptions', 'home.imagepositionoptions', 'home.onloadcontentfadeoptions', 'home.onloadimagefadeoptions', 'home.onscrollcontentfadeoptions', 'home.onscrollimagefadeoptions', 'home.screenoptions'),
),
migrations.CreateModel(
name='StreamPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', wagtail.core.fields.StreamField([('banner', wagtail.core.blocks.PageChooserBlock(blank=True, null=True, page_type=['home.BannerPage'])), ('section_index', wagtail.core.blocks.PageChooserBlock(blank=True, null=True, page_type=['home.SectionIndexPage'])), ('gallery', wagtail.core.blocks.PageChooserBlock(blank=True, null=True, page_type=['home.GalleryPage'])), ('item_index', wagtail.core.blocks.PageChooserBlock(blank=True, null=True, page_type=['home.ItemIndexPage'])), ('spotlight_index', wagtail.core.blocks.PageChooserBlock(blank=True, null=True, page_type=['home.SpotlightIndexPage']))])),
('footer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.FooterPage')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ItemPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('icon', models.CharField(blank=True, default='', max_length=30)),
('headline', models.CharField(blank=True, max_length=255, null=True)),
('text', models.TextField(blank=True)),
('actions', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.ActionButtons')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IconButton',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('button', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Button')),
('icon', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='buttons', to='home.IconButtons')),
],
options={
'verbose_name': 'Icon Button',
},
),
migrations.CreateModel(
name='GalleryArticlePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('headline', models.CharField(blank=True, max_length=255, null=True)),
('text', models.TextField(blank=True, help_text='Text to describe the page')),
('actions', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.ActionButtons')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='footerpage',
name='icons',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.IconButtons'),
),
migrations.CreateModel(
name='ActionButton',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('action', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='buttons', to='home.ActionButtons')),
('button', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Button')),
],
options={
'verbose_name': 'Action Button',
},
),
migrations.CreateModel(
name='SpotlightPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', wagtail.core.fields.RichTextField(blank=True)),
('actions', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.ActionButtons')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('styling_options', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SpotlightStyling')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('headline', models.CharField(blank=True, max_length=255, null=True)),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('styling_options', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SectionStyling')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ItemIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('headline', models.CharField(blank=True, max_length=255, null=True)),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('styling_options', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.ItemStyling')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='GalleryPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('headline', models.CharField(blank=True, max_length=255, null=True)),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('styling_options', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.GalleryStyling')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='footerpage',
name='styling_options',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.FooterStyling'),
),
migrations.CreateModel(
name='BannerPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('headline', models.CharField(max_length=127)),
('major', wagtail.core.fields.RichTextField(blank=True, verbose_name='Major Text')),
('minor', wagtail.core.fields.RichTextField(blank=True, verbose_name='Minor Text')),
('actions', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.ActionButtons')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('styling_options', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.BannerStyling')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 0 | 29,321 | 23 |
8946c562e5d165e5237379b727f59e9924d369e6 | 5,571 | py | Python | src/tests/python_tests/vector_tests.py | Whitemane/fluid-engine-dev | 93c3e942182cd73d54b74b7c2a283854e79911be | [
"MIT"
] | 1 | 2018-04-16T13:09:03.000Z | 2018-04-16T13:09:03.000Z | src/tests/python_tests/vector_tests.py | kentbarber/fluid-engine-dev | fb2256badb80c04702db536b63b14754699038ca | [
"MIT"
] | null | null | null | src/tests/python_tests/vector_tests.py | kentbarber/fluid-engine-dev | fb2256badb80c04702db536b63b14754699038ca | [
"MIT"
] | null | null | null | """
Copyright (c) 2018 Doyub Kim
I am making my contributions/submissions to this project solely in my personal
capacity and am not conveying any rights to any intellectual property of any
third parties.
"""
import pyjet
import unittest
if __name__ == '__main__':
main()
| 28.279188 | 78 | 0.531323 | """
Copyright (c) 2018 Doyub Kim
I am making my contributions/submissions to this project solely in my personal
capacity and am not conveying any rights to any intellectual property of any
third parties.
"""
import pyjet
import unittest
class Vector2FTest(unittest.TestCase):
def testInit(self):
a = pyjet.Vector2F()
self.assertEqual(a.x, 0.0)
self.assertEqual(a.y, 0.0)
b = pyjet.Vector2F(1, 2)
self.assertEqual(b.x, 1.0)
self.assertEqual(b.y, 2.0)
c = pyjet.Vector2F(y=2, x=1)
self.assertEqual(c.x, 1.0)
self.assertEqual(c.y, 2.0)
def testGetters(self):
a = pyjet.Vector2F(1, 2)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
def testSetters(self):
a = pyjet.Vector2F(1, 2)
a[0] = 4; a[1] = 5;
self.assertEqual(a[0], 4)
self.assertEqual(a[1], 5)
def testCalc(self):
a = pyjet.Vector2F(1, 2)
b = pyjet.Vector2F(4, 6)
c = a + b
self.assertEqual(c.x, 5.0)
self.assertEqual(c.y, 8.0)
c = a - b
self.assertEqual(c.x, -3.0)
self.assertEqual(c.y, -4.0)
c = a * b
self.assertEqual(c.x, 4.0)
self.assertEqual(c.y, 12.0)
c = a / b
self.assertAlmostEqual(c.x, 1.0 / 4.0)
self.assertAlmostEqual(c.y, 1.0 / 3.0)
class Vector2DTest(unittest.TestCase):
def testInit(self):
a = pyjet.Vector2D()
self.assertEqual(a.x, 0.0)
self.assertEqual(a.y, 0.0)
b = pyjet.Vector2D(1, 2)
self.assertEqual(b.x, 1.0)
self.assertEqual(b.y, 2.0)
c = pyjet.Vector2D(y=2, x=1)
self.assertEqual(c.x, 1.0)
self.assertEqual(c.y, 2.0)
def testGetters(self):
a = pyjet.Vector2D(1, 2)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
def testSetters(self):
a = pyjet.Vector2D(1, 2)
a[0] = 4; a[1] = 5;
self.assertEqual(a[0], 4)
self.assertEqual(a[1], 5)
def testCalc(self):
a = pyjet.Vector2D(1, 2)
b = pyjet.Vector2D(4, 6)
c = a + b
self.assertEqual(c.x, 5.0)
self.assertEqual(c.y, 8.0)
c = a - b
self.assertEqual(c.x, -3.0)
self.assertEqual(c.y, -4.0)
c = a * b
self.assertEqual(c.x, 4.0)
self.assertEqual(c.y, 12.0)
c = a / b
self.assertAlmostEqual(c.x, 1.0 / 4.0)
self.assertAlmostEqual(c.y, 1.0 / 3.0)
class Vector3FTest(unittest.TestCase):
def testInit(self):
a = pyjet.Vector3F()
self.assertEqual(a.x, 0.0)
self.assertEqual(a.y, 0.0)
self.assertEqual(a.z, 0.0)
b = pyjet.Vector3F(1, 2, 3)
self.assertEqual(b.x, 1.0)
self.assertEqual(b.y, 2.0)
self.assertEqual(b.z, 3.0)
c = pyjet.Vector3F(y=2, x=1, z=3)
self.assertEqual(c.x, 1.0)
self.assertEqual(c.y, 2.0)
self.assertEqual(c.z, 3.0)
def testGetters(self):
a = pyjet.Vector3F(1, 2, 3)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a[2], 3)
def testSetters(self):
a = pyjet.Vector3F(1, 2, 3)
a[0] = 4; a[1] = 5; a[2] = 6
self.assertEqual(a[0], 4)
self.assertEqual(a[1], 5)
self.assertEqual(a[2], 6)
def testCalc(self):
a = pyjet.Vector3F(1, 2, 3)
b = pyjet.Vector3F(4, 6, 8)
c = a + b
self.assertEqual(c.x, 5.0)
self.assertEqual(c.y, 8.0)
self.assertEqual(c.z, 11.0)
c = a - b
self.assertEqual(c.x, -3.0)
self.assertEqual(c.y, -4.0)
self.assertEqual(c.z, -5.0)
c = a * b
self.assertEqual(c.x, 4.0)
self.assertEqual(c.y, 12.0)
self.assertEqual(c.z, 24.0)
c = a / b
self.assertAlmostEqual(c.x, 1.0 / 4.0)
self.assertAlmostEqual(c.y, 1.0 / 3.0)
self.assertAlmostEqual(c.z, 3.0 / 8.0)
class Vector3DTest(unittest.TestCase):
def testInit(self):
a = pyjet.Vector3D()
self.assertEqual(a.x, 0.0)
self.assertEqual(a.y, 0.0)
self.assertEqual(a.z, 0.0)
b = pyjet.Vector3D(1, 2, 3)
self.assertEqual(b.x, 1.0)
self.assertEqual(b.y, 2.0)
self.assertEqual(b.z, 3.0)
c = pyjet.Vector3D(y=2, x=1, z=3)
self.assertEqual(c.x, 1.0)
self.assertEqual(c.y, 2.0)
self.assertEqual(c.z, 3.0)
def testGetters(self):
a = pyjet.Vector3D(1, 2, 3)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a[2], 3)
def testSetters(self):
a = pyjet.Vector3D(1, 2, 3)
a[0] = 4; a[1] = 5; a[2] = 6
self.assertEqual(a[0], 4)
self.assertEqual(a[1], 5)
self.assertEqual(a[2], 6)
def testCalc(self):
a = pyjet.Vector3D(1, 2, 3)
b = pyjet.Vector3D(4, 6, 8)
c = a + b
self.assertEqual(c.x, 5.0)
self.assertEqual(c.y, 8.0)
self.assertEqual(c.z, 11.0)
c = a - b
self.assertEqual(c.x, -3.0)
self.assertEqual(c.y, -4.0)
self.assertEqual(c.z, -5.0)
c = a * b
self.assertEqual(c.x, 4.0)
self.assertEqual(c.y, 12.0)
self.assertEqual(c.z, 24.0)
c = a / b
self.assertAlmostEqual(c.x, 1.0 / 4.0)
self.assertAlmostEqual(c.y, 1.0 / 3.0)
self.assertAlmostEqual(c.z, 3.0 / 8.0)
def main():
pyjet.Logging.mute()
unittest.main()
if __name__ == '__main__':
main()
| 4,677 | 68 | 543 |
eaea7ea729999bf8be7a0ffb27f39b2820743069 | 587 | py | Python | Coursera_HW/Week_3/task_6.py | IsFelix/ML-HLS_python_course_fall21 | 3119482d2951feb85d8a7819d2beaac947bca5db | [
"MIT"
] | null | null | null | Coursera_HW/Week_3/task_6.py | IsFelix/ML-HLS_python_course_fall21 | 3119482d2951feb85d8a7819d2beaac947bca5db | [
"MIT"
] | null | null | null | Coursera_HW/Week_3/task_6.py | IsFelix/ML-HLS_python_course_fall21 | 3119482d2951feb85d8a7819d2beaac947bca5db | [
"MIT"
] | null | null | null | """
Процентная ставка по вкладу составляет P процентов годовых, которые
прибавляются к сумме вклада. Вклад составляет X рублей Y копеек.
Определите размер вклада через год. При решении этой задачи нельзя
пользоваться условными инструкциями и циклами.
Формат ввода
Программа получает на вход целые числа P, X, Y.
Формат вывода
Программа должна вывести два числа: величину вклада через год
в рублях и копейках. Дробная часть копеек отбрасывается.
"""
p, x, y = float(input()), float(input()), float(input())
res = (100 * x + y) * (100 + p) / 100
print(int(res // 100), int(res % 100))
| 30.894737 | 67 | 0.744463 | """
Процентная ставка по вкладу составляет P процентов годовых, которые
прибавляются к сумме вклада. Вклад составляет X рублей Y копеек.
Определите размер вклада через год. При решении этой задачи нельзя
пользоваться условными инструкциями и циклами.
Формат ввода
Программа получает на вход целые числа P, X, Y.
Формат вывода
Программа должна вывести два числа: величину вклада через год
в рублях и копейках. Дробная часть копеек отбрасывается.
"""
p, x, y = float(input()), float(input()), float(input())
res = (100 * x + y) * (100 + p) / 100
print(int(res // 100), int(res % 100))
| 0 | 0 | 0 |
8151da9fee178ce4ff62b10ab49d6836eeeb2ba2 | 9,978 | py | Python | services/ui_backend_service/api/admin.py | valayDave/metaflow-service | 65e19aef268e9e707522ee0695fd4ebaee42aa69 | [
"Apache-2.0"
] | 103 | 2019-12-04T04:41:08.000Z | 2022-03-29T16:20:45.000Z | services/ui_backend_service/api/admin.py | Netflix/metaflow-service | a065407c822018ccd5ebab764bb43e0a9e121024 | [
"Apache-2.0"
] | 42 | 2019-12-16T23:15:44.000Z | 2022-02-18T17:33:32.000Z | services/ui_backend_service/api/admin.py | valayDave/metaflow-service | 65e19aef268e9e707522ee0695fd4ebaee42aa69 | [
"Apache-2.0"
] | 36 | 2019-12-12T17:46:46.000Z | 2022-01-21T04:53:24.000Z | import os
import hashlib
import asyncio
from aiohttp import web
from multidict import MultiDict
from services.utils import (METADATA_SERVICE_HEADER, METADATA_SERVICE_VERSION,
SERVICE_BUILD_TIMESTAMP, SERVICE_COMMIT_HASH,
web_response)
from .utils import get_json_from_env
UI_SERVICE_VERSION = "{metadata_v}-{timestamp}-{commit}".format(
metadata_v=METADATA_SERVICE_VERSION,
timestamp=SERVICE_BUILD_TIMESTAMP or "",
commit=SERVICE_COMMIT_HASH or ""
)
class AdminApi(object):
"""
Provides administrative routes for the UI Service,
such as health checks, version info and custom navigation links.
"""
async def version(self, request):
"""
---
description: Returns the version of the metadata service
tags:
- Admin
produces:
- 'text/plain'
responses:
"200":
description: successful operation. Return the version number
"405":
description: invalid HTTP Method
"""
return web.Response(text=str(UI_SERVICE_VERSION))
async def ping(self, request):
"""
---
description: This end-point allow to test that service is up.
tags:
- Admin
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return "pong" text
"405":
description: invalid HTTP Method
"""
return web.Response(text="pong", headers=MultiDict(
{METADATA_SERVICE_HEADER: METADATA_SERVICE_VERSION}))
async def links(self, request):
"""
---
description: Provides custom navigation links for UI.
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns the custom navigation links for UI
schema:
$ref: '#/definitions/ResponsesLinkList'
"405":
description: invalid HTTP Method
"""
return web_response(status=200, body=self.navigation_links)
async def get_notifications(self, request):
"""
---
description: Provides System Notifications for the UI
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns list of active system notification
schema:
$ref: '#/definitions/ResponsesNotificationList'
"405":
description: invalid HTTP Method
"""
processed_notifications = []
for notification in self.notifications:
try:
if "message" not in notification:
continue
# Created at is required and "start" is used by default if not value provided
# Notification will be ignored if both "created" and "start" are missing
created = notification.get("created", notification.get("start", None))
if not created:
continue
processed_notifications.append({
"id": notification.get("id", hashlib.sha1(
str(notification).encode('utf-8')).hexdigest()),
"type": notification.get("type", "info"),
"contentType": notification.get("contentType", "text"),
"message": notification.get("message", ""),
"url": notification.get("url", None),
"urlText": notification.get("urlText", None),
"created": created,
"start": notification.get("start", None),
"end": notification.get("end", None)
})
except:
pass
# Filter notifications based on query parameters
# Supports eq,ne.lt,le,gt,ge operators for all the fields
return web_response(status=200, body=list(
filter(filter_notifications, processed_notifications)))
async def status(self, request):
"""
---
description: Display system status information, such as cache
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Return system status information, such as cache
"405":
description: invalid HTTP Method
"""
cache_status = {}
for store in [self.cache_store.artifact_cache, self.cache_store.dag_cache, self.cache_store.log_cache]:
try:
# Use client ping to verify communcation, True = ok
await store.cache.ping()
ping = True
except Exception as ex:
ping = str(ex)
try:
# Use Check -action to verify Cache communication, True = ok
await store.cache.request_and_return([store.cache.check()], None)
check = True
except Exception as ex:
check = str(ex)
# Extract list of worker subprocesses
worker_list = []
cache_server_pid = store.cache._proc.pid if store.cache._proc else None
if cache_server_pid:
try:
proc = await asyncio.create_subprocess_shell(
"pgrep -P {}".format(cache_server_pid),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
pids = stdout.decode().splitlines()
proc = await asyncio.create_subprocess_shell(
"ps -p {} -o pid,%cpu,%mem,stime,time,command".format(",".join(pids)),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
worker_list = stdout.decode().splitlines()
except Exception as ex:
worker_list = str(ex)
else:
worker_list = "Unable to get cache server pid"
# Extract current cache data usage in bytes
current_size = 0
try:
cache_data_path = os.path.abspath(store.cache._root)
proc = await asyncio.create_subprocess_shell(
"du -s {} | cut -f1".format(cache_data_path),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
current_size = int(stdout.decode())
except Exception as ex:
current_size = str(ex)
cache_status[store.__class__.__name__] = {
"restart_requested": store.cache._restart_requested,
"is_alive": store.cache._is_alive,
"pending_requests": list(store.cache.pending_requests),
"root": store.cache._root,
"prev_is_alive": store.cache._prev_is_alive,
"action_classes": list(map(lambda cls: cls.__name__, store.cache._action_classes)),
"max_actions": store.cache._max_actions,
"max_size": store.cache._max_size,
"current_size": current_size,
"ping": ping,
"check_action": check,
"proc": {
"pid": store.cache._proc.pid,
"returncode": store.cache._proc.returncode,
} if store.cache._proc else None,
"workers": worker_list
}
return web_response(status=200, body={
"cache": cache_status
})
| 36.683824 | 111 | 0.523852 | import os
import hashlib
import asyncio
from aiohttp import web
from multidict import MultiDict
from services.utils import (METADATA_SERVICE_HEADER, METADATA_SERVICE_VERSION,
SERVICE_BUILD_TIMESTAMP, SERVICE_COMMIT_HASH,
web_response)
from .utils import get_json_from_env
UI_SERVICE_VERSION = "{metadata_v}-{timestamp}-{commit}".format(
metadata_v=METADATA_SERVICE_VERSION,
timestamp=SERVICE_BUILD_TIMESTAMP or "",
commit=SERVICE_COMMIT_HASH or ""
)
class AdminApi(object):
"""
Provides administrative routes for the UI Service,
such as health checks, version info and custom navigation links.
"""
def __init__(self, app, cache_store):
self.cache_store = cache_store
app.router.add_route("GET", "/ping", self.ping)
app.router.add_route("GET", "/version", self.version)
app.router.add_route("GET", "/links", self.links)
app.router.add_route("GET", "/notifications", self.get_notifications)
app.router.add_route("GET", "/status", self.status)
defaults = [
{"href": 'https://docs.metaflow.org/', "label": 'Documentation'},
{"href": 'http://chat.metaflow.org/', "label": 'Help'}
]
self.notifications = _get_notifications_from_env() or []
self.navigation_links = _get_links_from_env() or defaults
async def version(self, request):
"""
---
description: Returns the version of the metadata service
tags:
- Admin
produces:
- 'text/plain'
responses:
"200":
description: successful operation. Return the version number
"405":
description: invalid HTTP Method
"""
return web.Response(text=str(UI_SERVICE_VERSION))
async def ping(self, request):
"""
---
description: This end-point allow to test that service is up.
tags:
- Admin
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return "pong" text
"405":
description: invalid HTTP Method
"""
return web.Response(text="pong", headers=MultiDict(
{METADATA_SERVICE_HEADER: METADATA_SERVICE_VERSION}))
async def links(self, request):
"""
---
description: Provides custom navigation links for UI.
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns the custom navigation links for UI
schema:
$ref: '#/definitions/ResponsesLinkList'
"405":
description: invalid HTTP Method
"""
return web_response(status=200, body=self.navigation_links)
async def get_notifications(self, request):
"""
---
description: Provides System Notifications for the UI
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns list of active system notification
schema:
$ref: '#/definitions/ResponsesNotificationList'
"405":
description: invalid HTTP Method
"""
processed_notifications = []
for notification in self.notifications:
try:
if "message" not in notification:
continue
# Created at is required and "start" is used by default if not value provided
# Notification will be ignored if both "created" and "start" are missing
created = notification.get("created", notification.get("start", None))
if not created:
continue
processed_notifications.append({
"id": notification.get("id", hashlib.sha1(
str(notification).encode('utf-8')).hexdigest()),
"type": notification.get("type", "info"),
"contentType": notification.get("contentType", "text"),
"message": notification.get("message", ""),
"url": notification.get("url", None),
"urlText": notification.get("urlText", None),
"created": created,
"start": notification.get("start", None),
"end": notification.get("end", None)
})
except:
pass
# Filter notifications based on query parameters
# Supports eq,ne.lt,le,gt,ge operators for all the fields
def filter_notifications(notification):
comp_operators = {
"eq": lambda a, b: a == b,
"ne": lambda a, b: a != b,
"lt": lambda a, b: a < b,
"le": lambda a, b: a <= b,
"gt": lambda a, b: a > b,
"ge": lambda a, b: a >= b,
}
try:
for q in request.query.keys():
if ":" in q:
field, op = q.split(":", 1)
else:
field, op = q, "eq"
# Make sure compare operator is supported, otherwise ignore
# Compare value is typecasted to match field type
if op in comp_operators:
field_val = notification.get(field, None)
if not field_val:
continue
comp_val = type(field_val)(request.query.get(q, None))
if not comp_val:
continue
if not comp_operators[op](field_val, comp_val):
return False
except:
pass
return True
return web_response(status=200, body=list(
filter(filter_notifications, processed_notifications)))
async def status(self, request):
"""
---
description: Display system status information, such as cache
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Return system status information, such as cache
"405":
description: invalid HTTP Method
"""
cache_status = {}
for store in [self.cache_store.artifact_cache, self.cache_store.dag_cache, self.cache_store.log_cache]:
try:
# Use client ping to verify communcation, True = ok
await store.cache.ping()
ping = True
except Exception as ex:
ping = str(ex)
try:
# Use Check -action to verify Cache communication, True = ok
await store.cache.request_and_return([store.cache.check()], None)
check = True
except Exception as ex:
check = str(ex)
# Extract list of worker subprocesses
worker_list = []
cache_server_pid = store.cache._proc.pid if store.cache._proc else None
if cache_server_pid:
try:
proc = await asyncio.create_subprocess_shell(
"pgrep -P {}".format(cache_server_pid),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
pids = stdout.decode().splitlines()
proc = await asyncio.create_subprocess_shell(
"ps -p {} -o pid,%cpu,%mem,stime,time,command".format(",".join(pids)),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
worker_list = stdout.decode().splitlines()
except Exception as ex:
worker_list = str(ex)
else:
worker_list = "Unable to get cache server pid"
# Extract current cache data usage in bytes
current_size = 0
try:
cache_data_path = os.path.abspath(store.cache._root)
proc = await asyncio.create_subprocess_shell(
"du -s {} | cut -f1".format(cache_data_path),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
current_size = int(stdout.decode())
except Exception as ex:
current_size = str(ex)
cache_status[store.__class__.__name__] = {
"restart_requested": store.cache._restart_requested,
"is_alive": store.cache._is_alive,
"pending_requests": list(store.cache.pending_requests),
"root": store.cache._root,
"prev_is_alive": store.cache._prev_is_alive,
"action_classes": list(map(lambda cls: cls.__name__, store.cache._action_classes)),
"max_actions": store.cache._max_actions,
"max_size": store.cache._max_size,
"current_size": current_size,
"ping": ping,
"check_action": check,
"proc": {
"pid": store.cache._proc.pid,
"returncode": store.cache._proc.returncode,
} if store.cache._proc else None,
"workers": worker_list
}
return web_response(status=200, body={
"cache": cache_status
})
def _get_links_from_env():
return get_json_from_env("CUSTOM_QUICKLINKS")
def _get_notifications_from_env():
return get_json_from_env("NOTIFICATIONS")
| 2,009 | 0 | 103 |
8f36c7b0dfc50ee5aae2dda913c606bfc9d665fe | 4,301 | py | Python | tests/test_parser.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | tests/test_parser.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | tests/test_parser.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | from unittest import TestCase
from django_tally.user_def.lang import parse, KW, serialize
from django_tally.user_def.lang.parser import parse_tokens
source = """
(do
(defn fib (n)
(if (in '(0 1) n)
1
(+ (fib (- n 1)) (fib (- n 2)))))
(fib 10)
[1 2 3]
{1 2 3}
#{1 2 3}
#[1 2 3]
^foo)
"""
body = [
[
KW('do'),
[
KW('defn'), KW('fib'), [KW('n')],
[
KW('if'), [KW('in'), [KW('quote'), [0, 1]], KW('n')],
1,
[
KW('+'),
[KW('fib'), [KW('-'), KW('n'), 1]],
[KW('fib'), [KW('-'), KW('n'), 2]],
],
],
],
[KW('fib'), 10],
[KW('list'), 1, 2, 3],
[KW('tuple'), 1, 2, 3],
[KW('dict'), 1, 2, 3],
[KW('set'), 1, 2, 3],
[KW('quote'), [KW('unquote'), KW('foo')]],
]
]
| 28.673333 | 78 | 0.485003 | from unittest import TestCase
from django_tally.user_def.lang import parse, KW, serialize
from django_tally.user_def.lang.parser import parse_tokens
source = """
(do
(defn fib (n)
(if (in '(0 1) n)
1
(+ (fib (- n 1)) (fib (- n 2)))))
(fib 10)
[1 2 3]
{1 2 3}
#{1 2 3}
#[1 2 3]
^foo)
"""
body = [
[
KW('do'),
[
KW('defn'), KW('fib'), [KW('n')],
[
KW('if'), [KW('in'), [KW('quote'), [0, 1]], KW('n')],
1,
[
KW('+'),
[KW('fib'), [KW('-'), KW('n'), 1]],
[KW('fib'), [KW('-'), KW('n'), 2]],
],
],
],
[KW('fib'), 10],
[KW('list'), 1, 2, 3],
[KW('tuple'), 1, 2, 3],
[KW('dict'), 1, 2, 3],
[KW('set'), 1, 2, 3],
[KW('quote'), [KW('unquote'), KW('foo')]],
]
]
class ParserTest(TestCase):
def test_parse(self):
self.assertEqual(list(parse(source)), body)
def test_serialize(self):
self.assertEqual(list(parse(serialize(body, many=True))), body)
def test_intermediate_error(self):
with self.assertRaises(ValueError) as cm:
list(parse('% foo'))
self.assertEqual(str(cm.exception), 'Invalid body: %')
def test_final_error(self):
with self.assertRaises(ValueError) as cm:
list(parse('foo %'))
self.assertEqual(str(cm.exception), 'Invalid body: %')
def test_unexpected_closing_paren(self):
with self.assertRaises(ValueError) as cm:
list(parse(')'))
self.assertEqual(str(cm.exception), 'Unexpected )')
def test_unexpected_eof(self):
with self.assertRaises(ValueError) as cm:
list(parse('('))
self.assertEqual(str(cm.exception), 'Unexpected EOF')
def test_invalid_tokens(self):
with self.assertRaises(ValueError) as cm:
list(parse_tokens([]))
self.assertEqual(str(cm.exception), 'Expected more tokens')
def test_parse_float(self):
self.assertEqual(next(parse('0.123')), 0.123)
def test_parse_string(self):
self.assertEqual(next(parse('"foobar\\"\\n\\t\\r"')), 'foobar"\n\t\r')
def test_serialize_string(self):
self.assertEqual(serialize('foobar"\n\t\r'), '"foobar\\"\\n\\t\\r"')
def test_parse_operators(self):
self.assertEqual(
list(parse('* / + - = != <= >= < > ->')),
[
KW('*'), KW('/'), KW('+'), KW('-'), KW('='), KW('!='),
KW('<='), KW('>='), KW('<'), KW('>'), KW('->'),
],
)
def test_parse_constants(self):
self.assertEqual(
list(parse('null true false')),
[None, True, False],
)
def test_serialize_constants(self):
self.assertEqual(
serialize([None, True, False]),
'(null true false)',
)
def test_parse_spread(self):
self.assertEqual(
list(parse('[&foo 1 2 3 &bar 4 5 6 &baz]'))[0],
[
KW('into_list'),
KW('foo'),
[KW('list'), 1, 2, 3],
KW('bar'),
[KW('list'), 4, 5, 6],
KW('baz'),
],
)
def test_serialize_spread(self):
self.assertEqual(
serialize([
KW('into_list'),
[KW('list'), 1, 2, 3],
KW('foo'),
[KW('list'), 4, 5],
KW('bar'),
]),
'[1 2 3 &foo 4 5 &bar]',
)
def test_parse_unexpected_token(self):
with self.assertRaises(ValueError) as cm:
list(parse_tokens([('FOO', '')]))
self.assertEqual(
str(cm.exception),
'Unexpected token FOO',
)
def test_parse_col_unexpected_end(self):
with self.assertRaises(ValueError) as cm:
list(parse_tokens([('LIST_OPEN', '[')]))
self.assertEqual(str(cm.exception), 'Expected more tokens')
def test_parse_col_spread_without_expression(self):
with self.assertRaises(ValueError) as cm:
list(parse('[&]'))
self.assertEqual(str(cm.exception), 'Expected expression to spread')
| 2,861 | 6 | 509 |
37f7fb1c17a2999f23e072c756c3cc3feae54407 | 4,854 | py | Python | tests/test_memberships.py | indigo-ag/pgbedrock | 58e46f98da72e81ae0c2f69463aa6e97ea7b8936 | [
"Apache-2.0"
] | 295 | 2018-03-22T15:08:45.000Z | 2022-03-24T05:06:30.000Z | tests/test_memberships.py | indigo-ag/pgbedrock | 58e46f98da72e81ae0c2f69463aa6e97ea7b8936 | [
"Apache-2.0"
] | 54 | 2018-03-27T16:06:36.000Z | 2022-01-21T14:25:27.000Z | tests/test_memberships.py | indigo-ag/pgbedrock | 58e46f98da72e81ae0c2f69463aa6e97ea7b8936 | [
"Apache-2.0"
] | 34 | 2018-03-29T01:05:58.000Z | 2022-01-08T03:17:59.000Z | from conftest import run_setup_sql
from pgbedrock import memberships as memb
from pgbedrock import attributes
ROLE1 = 'charlie'
ROLE2 = 'barney'
ROLE3 = 'wacko'
DESIRED_GROUP1 = 'desired_group1'
DESIRED_GROUP2 = 'desired_group2'
CURRENT_GROUP1 = 'current_group1'
CURRENT_GROUP2 = 'current_group2'
Q_HAS_ROLE = "SELECT pg_has_role('{}', '{}', 'member')"
DUMMY = 'foo'
@run_setup_sql([
attributes.Q_CREATE_ROLE.format(ROLE1),
attributes.Q_CREATE_ROLE.format(ROLE2),
attributes.Q_CREATE_ROLE.format(ROLE3),
attributes.Q_CREATE_ROLE.format(CURRENT_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP2),
attributes.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'),
memb.Q_GRANT_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
def test_analyze_memberships(cursor):
"""
Test:
* one superuser (to make sure they don't get evaluated)
* two users, both of which will be removed from a group and added to a group
"""
spec = {
ROLE1: {'member_of': [DESIRED_GROUP1]},
ROLE2: {'member_of': [DESIRED_GROUP1, DESIRED_GROUP2]},
ROLE3: {'member_of': [DESIRED_GROUP1]}
}
expected = set([
memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE1),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE3),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
actual = memb.analyze_memberships(spec, cursor, verbose=False)
assert set(actual) == expected
| 38.220472 | 93 | 0.716522 | from conftest import run_setup_sql
from pgbedrock import memberships as memb
from pgbedrock import attributes
ROLE1 = 'charlie'
ROLE2 = 'barney'
ROLE3 = 'wacko'
DESIRED_GROUP1 = 'desired_group1'
DESIRED_GROUP2 = 'desired_group2'
CURRENT_GROUP1 = 'current_group1'
CURRENT_GROUP2 = 'current_group2'
Q_HAS_ROLE = "SELECT pg_has_role('{}', '{}', 'member')"
DUMMY = 'foo'
@run_setup_sql([
attributes.Q_CREATE_ROLE.format(ROLE1),
attributes.Q_CREATE_ROLE.format(ROLE2),
attributes.Q_CREATE_ROLE.format(ROLE3),
attributes.Q_CREATE_ROLE.format(CURRENT_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP2),
attributes.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'),
memb.Q_GRANT_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
def test_analyze_memberships(cursor):
"""
Test:
* one superuser (to make sure they don't get evaluated)
* two users, both of which will be removed from a group and added to a group
"""
spec = {
ROLE1: {'member_of': [DESIRED_GROUP1]},
ROLE2: {'member_of': [DESIRED_GROUP1, DESIRED_GROUP2]},
ROLE3: {'member_of': [DESIRED_GROUP1]}
}
expected = set([
memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE1),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE3),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
actual = memb.analyze_memberships(spec, cursor, verbose=False)
assert set(actual) == expected
def test_analyze_no_desired_memberships_none_current(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
mockdbcontext.get_role_memberships = lambda x: set()
memberships_ = set()
actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=memberships_,
dbcontext=mockdbcontext).analyze()
assert actual == []
def test_analyze_none_current_some_desired(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
mockdbcontext.get_role_memberships = lambda x: set()
desired_groups = set([DESIRED_GROUP1, DESIRED_GROUP2])
expected = set([
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE1),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE1),
])
actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups,
dbcontext=mockdbcontext).analyze()
assert set(actual) == expected
def test_analyze_some_current_none_desired(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
mockdbcontext.get_role_memberships = lambda x: set([CURRENT_GROUP1, CURRENT_GROUP2])
desired_groups = set()
expected = set([
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP2, ROLE1),
])
actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups,
dbcontext=mockdbcontext).analyze()
assert set(actual) == expected
def test_analyze_some_current_some_desired(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
mockdbcontext.get_role_memberships = lambda x: set([DESIRED_GROUP1, CURRENT_GROUP1,
CURRENT_GROUP2])
desired_groups = set([DESIRED_GROUP1, DESIRED_GROUP2])
expected = set([
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE1),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP2, ROLE1),
])
actual = memb.MembershipAnalyzer(ROLE1, spec_memberships=desired_groups,
dbcontext=mockdbcontext).analyze()
assert set(actual) == expected
def test_analyze_skip_superuser(mockdbcontext):
mockdbcontext.is_superuser = lambda x: True
expected = [memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE2)]
actual = memb.MembershipAnalyzer(ROLE2, spec_memberships=DUMMY,
dbcontext=mockdbcontext).analyze()
assert actual == expected
def test_grant_membership(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
memconf = memb.MembershipAnalyzer(ROLE1, spec_memberships=DUMMY, dbcontext=mockdbcontext)
memconf.grant_membership(DESIRED_GROUP1)
assert memconf.sql_to_run == [memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE1)]
def test_revoke_membership(mockdbcontext):
mockdbcontext.is_superuser = lambda x: False
memconf = memb.MembershipAnalyzer(ROLE1, spec_memberships=DUMMY, dbcontext=mockdbcontext)
memconf.revoke_membership(CURRENT_GROUP1)
assert memconf.sql_to_run == [memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE1)]
| 3,053 | 0 | 161 |
361ee0181936dd37e60aef8c4721f06e7ee22ec6 | 255 | py | Python | accounts/apps.py | GunnerJnr/stream-three-final-project | 87aa24b64c7c3e0af8117f2c565d6b56c591964c | [
"BSD-3-Clause"
] | 1 | 2017-10-10T14:55:55.000Z | 2017-10-10T14:55:55.000Z | accounts/apps.py | GunnerJnr/stream-three-final-project | 87aa24b64c7c3e0af8117f2c565d6b56c591964c | [
"BSD-3-Clause"
] | 9 | 2019-10-22T08:11:56.000Z | 2022-03-11T23:26:11.000Z | accounts/apps.py | GunnerJnr/stream-three-final-project | 87aa24b64c7c3e0af8117f2c565d6b56c591964c | [
"BSD-3-Clause"
] | 3 | 2017-10-26T02:13:10.000Z | 2018-05-31T16:04:20.000Z | """
Apps.py:
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
"""
AccountsConfig(AppConfig):
Accounts app configuration
"""
name = 'accounts'
| 15.9375 | 39 | 0.658824 | """
Apps.py:
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
"""
AccountsConfig(AppConfig):
Accounts app configuration
"""
name = 'accounts'
| 0 | 0 | 0 |
4864337d19c3fe235787df36f490760e262f39db | 98 | py | Python | api/views/index.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | null | null | null | api/views/index.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | 7 | 2016-02-09T07:18:48.000Z | 2016-02-09T07:25:40.000Z | api/views/index.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | null | null | null | from flask import render_template
| 19.6 | 40 | 0.785714 | from flask import render_template
def handle_request():
return render_template('index.html')
| 41 | 0 | 23 |
02e9c8ec20674049b5cba7e119a2025b0c715b1c | 5,394 | py | Python | mysite/settings.py | jordanbroberts/python-myBlog | 09f373b1e7a4ce804b688d3bc18f894107119d7c | [
"MIT"
] | null | null | null | mysite/settings.py | jordanbroberts/python-myBlog | 09f373b1e7a4ce804b688d3bc18f894107119d7c | [
"MIT"
] | 5 | 2020-06-05T18:48:19.000Z | 2021-09-08T00:02:06.000Z | mysite/settings.py | jordanbroberts/python-myBlog | 09f373b1e7a4ce804b688d3bc18f894107119d7c | [
"MIT"
] | null | null | null | """
Django settings for {{ project_name }} project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "key"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'taggit',
'taggit_templatetags2',
'tinymce',
'blog',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TINYMCE_INCLUDE_JQUERY = True
BROWSER_SPELLCHECKER = True
TINYMCE_DEFAULT_CONFIG = {
'height': 360,
'width': 1120,
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'modern',
'plugins': '''
paste textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
'codesample_dialog_height':500,
'codesample_dialog_width':300,
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles' )
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'blog', 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
| 30.822857 | 91 | 0.693178 | """
Django settings for {{ project_name }} project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "key"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'taggit',
'taggit_templatetags2',
'tinymce',
'blog',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TINYMCE_INCLUDE_JQUERY = True
BROWSER_SPELLCHECKER = True
TINYMCE_DEFAULT_CONFIG = {
'height': 360,
'width': 1120,
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'modern',
'plugins': '''
paste textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
'codesample_dialog_height':500,
'codesample_dialog_width':300,
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles' )
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'blog', 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
| 0 | 0 | 0 |
45b881345d216339f86ace48c6f247fa127a6ea6 | 684 | py | Python | main.py | NaKolenke/kolenka-backend | b8bf0a49b424e088b02bfeb59b7a083e59973b88 | [
"MIT"
] | null | null | null | main.py | NaKolenke/kolenka-backend | b8bf0a49b424e088b02bfeb59b7a083e59973b88 | [
"MIT"
] | null | null | null | main.py | NaKolenke/kolenka-backend | b8bf0a49b424e088b02bfeb59b7a083e59973b88 | [
"MIT"
] | null | null | null | import sys
from src.model.migrations import migrate_schema
if __name__ == "__main__":
if len(sys.argv) == 1:
print(
"""Specify argument.
Possible commands:
migrate
convert users
convert blogs
For launching flask server see README.md"""
)
elif sys.argv[1] == "migrate":
from src import create_app
from src.model import db
create_app()
migrate_schema(db.get_database())
elif sys.argv[1] == "convert":
if len(sys.argv) == 3:
from converters import convert
convert(sys.argv[2])
else:
print("Use convert <type>")
else:
print("Unrecognized command")
| 22.8 | 47 | 0.596491 | import sys
from src.model.migrations import migrate_schema
if __name__ == "__main__":
if len(sys.argv) == 1:
print(
"""Specify argument.
Possible commands:
migrate
convert users
convert blogs
For launching flask server see README.md"""
)
elif sys.argv[1] == "migrate":
from src import create_app
from src.model import db
create_app()
migrate_schema(db.get_database())
elif sys.argv[1] == "convert":
if len(sys.argv) == 3:
from converters import convert
convert(sys.argv[2])
else:
print("Use convert <type>")
else:
print("Unrecognized command")
| 0 | 0 | 0 |
84952999aa38e354caf57eee97a391a84b7d9c5b | 1,935 | py | Python | minimax_agent.py | williampark73/snake | 919f8f2f1a2b60cdc5f1aa7d612836b108e6eb6e | [
"MIT"
] | null | null | null | minimax_agent.py | williampark73/snake | 919f8f2f1a2b60cdc5f1aa7d612836b108e6eb6e | [
"MIT"
] | null | null | null | minimax_agent.py | williampark73/snake | 919f8f2f1a2b60cdc5f1aa7d612836b108e6eb6e | [
"MIT"
] | null | null | null | import random
from curses import *
from random import randint
depth = 5
| 28.043478 | 138 | 0.717313 | import random
from curses import *
from random import randint
depth = 5
def minimax_agent_first_index(game, state):
return minimax_agent(game, state, 1, 1)
def minimax_agent_second_index(game, state):
return minimax_agent(game, state, 2, 5)
def get_valid(current_dir, actions):
if current_dir == KEY_RIGHT:
remove = KEY_LEFT
elif current_dir == KEY_LEFT:
remove = KEY_RIGHT
elif current_dir == KEY_UP:
remove = KEY_DOWN
elif current_dir == KEY_DOWN:
remove = KEY_UP
actions.remove(remove)
return actions
def minimax_agent(game, state, agent_index, depth):
win = state[0]
current_dir = state[2][state[5]-1]
win.getch()
actions = get_valid(current_dir, game.actions())
scores = [minimax_value(game, game.successor(state, action, False), agent_index, 3 - agent_index, depth) for action in actions]
best_score = max(scores)
best_indices = [index for index in range(len(scores)) if scores[index] == best_score]
chosen_index = random.choice(best_indices)
return actions[chosen_index]
def minimax_value(game, state, maximizing_agent, agent_index, depth):
if game.is_end(state)[0]:
winner = game.is_end(state)[1]
if winner == 0:
return 0
elif winner == maximizing_agent:
return -float('inf')
else:
return float('inf')
if depth == 0:
food = state[4]
snake = state[1][state[5]-1]
return 100*state[3][state[5]-1] -((snake[0][0] - food[0])**2 + (snake[0][1] - food[1])**2)
current_dir = state[2][state[5]-1]
actions = get_valid(current_dir, game.actions())
save_state = state
if state[5] == maximizing_agent:
values = [minimax_value(game, game.successor(state, action, False), maximizing_agent, 3 - agent_index, depth - 1) for action in actions]
state = save_state
return max(values)
else:
values = [minimax_value(game, game.successor(state, action, False), maximizing_agent, 3 - agent_index, depth - 1) for action in actions]
state = save_state
return min(values)
| 1,745 | 0 | 115 |
fab2bdc56cfbd1639c744a47a03bd1a1ec58e5fa | 1,252 | py | Python | src/magic_config/interfaces.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | src/magic_config/interfaces.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | src/magic_config/interfaces.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | """
Module for resolving circular imports via "interfaces"
"""
import abc
| 18.686567 | 54 | 0.589457 | """
Module for resolving circular imports via "interfaces"
"""
import abc
class AbstractSettingsManager(abc.ABC):
@abc.abstractmethod
def get_loaders(self): ...
@abc.abstractmethod
def get_main_loader(self): ...
@abc.abstractmethod
def get_fields(self): ...
class AbstractSettingField(abc.ABC):
pass
class AbstractLoader(abc.ABC):
@abc.abstractmethod
def get_value(self, field_name, **kwargs):
"""
:param field_name - field meta name
"""
...
@abc.abstractmethod
def set_value(self, field_name, value, **kwargs):
"""
:param field_name: field meta name
:param value: new value
"""
...
@abc.abstractmethod
def get_key(self, field_name, **kwargs):
"""
:param field_name: field meta name
"""
...
@abc.abstractmethod
def is_readonly(self) -> bool: ...
class AbstractSettingType(abc.ABC):
@classmethod
@abc.abstractmethod
def parse(cls, str_value): ...
@classmethod
@abc.abstractmethod
def serialize(cls, value): ...
@classmethod
@abc.abstractmethod
def cast(cls, value):
"""
cast python value to type
"""
...
| 49 | 1,032 | 92 |
c2e50a9278dba12f403557d94b131a7ea2985242 | 349 | py | Python | Recursion/index_of_first_occurance_of_number_index_way.py | shubh2ds/DSA_Python | bf7dbb5560494e9d18b58e05ba9a51b97e25b332 | [
"MIT"
] | 1 | 2021-10-15T11:23:36.000Z | 2021-10-15T11:23:36.000Z | Recursion/index_of_first_occurance_of_number_index_way.py | shubh2ds/Data-Structures-and-Algorithms-Python | bf7dbb5560494e9d18b58e05ba9a51b97e25b332 | [
"MIT"
] | null | null | null | Recursion/index_of_first_occurance_of_number_index_way.py | shubh2ds/Data-Structures-and-Algorithms-Python | bf7dbb5560494e9d18b58e05ba9a51b97e25b332 | [
"MIT"
] | null | null | null | #To complete
# Main
from sys import setrecursionlimit
setrecursionlimit(11000)
n=int(input())
arr=list(int(i) for i in input().strip().split(' '))
x=int(input())
idx=0
print(firstIndex(arr, x, idx))
| 20.529412 | 52 | 0.661891 | #To complete
def firstIndex(arr, x,idx):
if len(arr)==0 or idx==len(arr):
return -1
if arr[idx]==x:
return idx
return firstIndex(arr, x,idx+1)
# Main
from sys import setrecursionlimit
setrecursionlimit(11000)
n=int(input())
arr=list(int(i) for i in input().strip().split(' '))
x=int(input())
idx=0
print(firstIndex(arr, x, idx))
| 127 | 0 | 22 |
863b3659a041f8fe2d390beef85ff57c3459ec80 | 752 | py | Python | test/appcontrollers.py | minixalpha/WatchTips | 4e7402741c98fcd1888a01b712a5f9e0aa0689ac | [
"BSD-3-Clause"
] | 1 | 2017-10-25T17:08:48.000Z | 2017-10-25T17:08:48.000Z | test/appcontrollers.py | minixalpha/WatchTips | 4e7402741c98fcd1888a01b712a5f9e0aa0689ac | [
"BSD-3-Clause"
] | null | null | null | test/appcontrollers.py | minixalpha/WatchTips | 4e7402741c98fcd1888a01b712a5f9e0aa0689ac | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#coding: utf-8
import watchtipstest
import unittest
from index import app
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AppController)
watchtipstest.main(suite)
| 25.066667 | 70 | 0.632979 | #!/usr/bin/env python
#coding: utf-8
import watchtipstest
import unittest
from index import app
class AppController(unittest.TestCase):
def testPageView(self):
url_ok = ['/', '/watch/2', '/register', '/login']
method = 'GET'
status_ok = '200 OK'
for url in url_ok:
response = app.request(url, method=method)
self.assertEqual(response.status, status_ok)
def testNotFound(self):
url = '/8'
method = 'GET'
status = '404 Not Found'
response = app.request(url, method=method)
self.assertEqual(response.status, status)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AppController)
watchtipstest.main(suite)
| 430 | 18 | 76 |
bf7f51474bbee7250caea9b9edf201151ad17d06 | 767 | py | Python | bf2c.py | ikorin24/bf2c | 0eea26ebaaf25d8f372ea18ce6cff3db2701a080 | [
"Apache-2.0"
] | null | null | null | bf2c.py | ikorin24/bf2c | 0eea26ebaaf25d8f372ea18ce6cff3db2701a080 | [
"Apache-2.0"
] | null | null | null | bf2c.py | ikorin24/bf2c | 0eea26ebaaf25d8f372ea18ce6cff3db2701a080 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
if __name__ == '__main__':
main()
| 16.673913 | 52 | 0.485007 | #!/usr/bin/env python
import sys
def main():
if len(sys.argv) < 2:
sys.exit(1)
filename = sys.argv[1]
dic = {
'+': '(*ptr)++;',
'-': '(*ptr)--;',
'>': 'ptr++;',
'<': 'ptr--;',
'[': 'while(*ptr){',
']': '}',
',': '*ptr=getchar();',
'.': 'putchar(*ptr);',
}
start = '\
#include <stdio.h>\n\
#include <stdlib.h>\n\
int main(){\n\
const int len = 1024 * 1024;\n\
unsigned char *mem = (unsigned char*)malloc(len);\n\
for(int i = 0;i < len;i++){ mem[i] = 0; }\n\
unsigned char *ptr = mem;'
end = '\n\
free(mem);\n\
return 0;\n\
}'
print(start)
with open(filename) as f:
for line in f:
for char in line:
if char in dic:
print(dic[char])
print(end)
if __name__ == '__main__':
main()
| 671 | 0 | 23 |
1c5539703c18ce91d32669c0a2c6aa22b4ce5ad2 | 2,193 | py | Python | max-flow-network-analyzer/source/test_network_generator.py | raisingdibar/course-projects | 78fc44a8d015446ab07ee9a42b5d7672dfbd51e8 | [
"Unlicense"
] | 1 | 2018-04-19T11:45:29.000Z | 2018-04-19T11:45:29.000Z | max-flow-network-analyzer/source/test_network_generator.py | raisingdibar/course-projects | 78fc44a8d015446ab07ee9a42b5d7672dfbd51e8 | [
"Unlicense"
] | null | null | null | max-flow-network-analyzer/source/test_network_generator.py | raisingdibar/course-projects | 78fc44a8d015446ab07ee9a42b5d7672dfbd51e8 | [
"Unlicense"
] | null | null | null | from random import *
import copy
| 45.6875 | 126 | 0.577747 | from random import *
import copy
class test_network_generator:
def __init__(self):
self.vertices = {} # Will hold a dictionary of all vertices and the vertices they are connected to
self.edges = [] # Each edge will be represented by tuples
# The first two numbers are the vertexes that are connected by an edge
# The third number is the current flow through that edge, and the fourth is the capacity for that edge
# Randomly pick an upper bound on the number of vertices in the graph.
randomVertices = randint(7,15)
# This creates a dictionary of keys representing a random amount of vertices between 7 and 15
# At this step, the vertices will have no values
for i in range(2,randomVertices):
self.vertices[i] = []
for vertex in self.vertices:
if randomVertices < 9:
randomEdges = randint(1, 2)
else:
randomEdges = randint(2, 3)
for x in range(randomEdges):
randomConnect = randint(2, randomVertices-1)
if (randomConnect in self.vertices[vertex] or randomConnect == vertex):
continue
self.vertices[vertex].append(randomConnect)
self.vertices[randomConnect].append(vertex)
edge = (vertex, randomConnect, 0, randint(5, 20))
self.edges.append(edge)
final = [1, randomVertices]
for vertex in final:
self.vertices[vertex] = []
randomEdges = randint(2,3)
for x in range(randomEdges):
randomConnect = randint(2, randomVertices-1)
while (randomConnect in self.vertices[vertex]):
randomConnect = randint(2, randomVertices-1)
self.vertices[vertex].append(randomConnect)
self.vertices[randomConnect].append(vertex)
if (vertex == 1):
edge = (vertex, randomConnect, 0, randint(5, 20))
else:
edge = (randomConnect, vertex, 0, randint(5, 20))
self.edges.append(edge)
| 2,103 | 8 | 49 |
1937fc68d47cdf4d841a33cfd82360ed50daa7ef | 16,564 | py | Python | astm/mapping.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 38 | 2015-06-11T06:43:02.000Z | 2022-03-01T18:21:07.000Z | astm/mapping.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 7 | 2016-08-12T10:16:34.000Z | 2021-02-11T15:43:34.000Z | astm/mapping.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 39 | 2015-08-10T16:49:33.000Z | 2021-12-26T10:27:07.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import datetime
import decimal
import inspect
import time
import warnings
from operator import itemgetter
from itertools import islice
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
from .compat import basestring, unicode, long
class Field(object):
"""Base mapping field class."""
_MappingProxy = MetaMapping('_MappingProxy', (object,), {}) # Python 3 workaround
class Record(Mapping):
"""ASTM record mapping class."""
class Component(Mapping):
"""ASTM component mapping class."""
class TextField(Field):
"""Mapping field for string values."""
class ConstantField(Field):
"""Mapping field for constant values.
>>> class Record(Mapping):
... type = ConstantField(default='S')
>>> rec = Record()
>>> rec.type
'S'
>>> rec.type = 'W'
Traceback (most recent call last):
...
ValueError: Field changing not allowed
"""
class IntegerField(Field):
"""Mapping field for integer values."""
class DecimalField(Field):
"""Mapping field for decimal values."""
class DateField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d'
class TimeField(Field):
"""Mapping field for storing times."""
format = '%H%M%S'
class DateTimeField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d%H%M%S'
class SetField(Field):
"""Mapping field for predefined set of values."""
class ComponentField(Field):
"""Mapping field for storing record component."""
class RepeatedComponentField(Field):
"""Mapping field for storing list of record components."""
# update docstrings from list
for name, obj in inspect.getmembers(Proxy):
if getattr(list, name, None) is None\
or name in ['__module__', '__doc__']:
continue
if not inspect.isfunction(obj):
continue
obj.__doc__ = getattr(list, name).__doc__
del name, obj
class NotUsedField(Field):
"""Mapping field for value that should be used. Acts as placeholder.
On attempt to assign something to it raises :exc:`UserWarning` and rejects
assigned value."""
| 31.610687 | 81 | 0.579087 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import datetime
import decimal
import inspect
import time
import warnings
from operator import itemgetter
from itertools import islice
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
from .compat import basestring, unicode, long
def make_string(value):
if isinstance(value, unicode):
return value
elif isinstance(value, bytes):
return unicode(value, 'utf-8')
else:
return unicode(value)
class Field(object):
"""Base mapping field class."""
def __init__(self, name=None, default=None, required=False, length=None):
self.name = name
self.default = default
self.required = required
self.length = length
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._get_value(value)
elif self.default is not None:
default = self.default
if hasattr(default, '__call__'):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._set_value(value)
instance._data[self.name] = value
def _get_value(self, value):
return value
def _set_value(self, value):
value = make_string(value)
if self.length is not None and len(value) > self.length:
raise ValueError('Field %r value is too long (max %d, got %d)'
'' % (self.name, self.length, len(value)))
return value
class MetaMapping(type):
def __new__(mcs, name, bases, d):
fields = []
names = []
def merge_fields(items):
for name, field in items:
if field.name is None:
field.name = name
if name not in names:
fields.append((name, field))
names.append(name)
else:
fields[names.index(name)] = (name, field)
for base in bases:
if hasattr(base, '_fields'):
merge_fields(base._fields)
merge_fields([(k, v) for k, v in d.items() if isinstance(v, Field)])
if '_fields' not in d:
d['_fields'] = fields
else:
merge_fields(d['_fields'])
d['_fields'] = fields
return super(MetaMapping, mcs).__new__(mcs, name, bases, d)
_MappingProxy = MetaMapping('_MappingProxy', (object,), {}) # Python 3 workaround
class Mapping(_MappingProxy):
def __init__(self, *args, **kwargs):
fieldnames = map(itemgetter(0), self._fields)
values = dict(izip_longest(fieldnames, args))
values.update(kwargs)
self._data = {}
for attrname, field in self._fields:
attrval = values.pop(attrname, None)
if attrval is None:
setattr(self, attrname, getattr(self, attrname))
else:
setattr(self, attrname, attrval)
if values:
raise ValueError('Unexpected kwargs found: %r' % values)
@classmethod
def build(cls, *a):
fields = []
newcls = type('Generic' + cls.__name__, (cls,), {})
for field in a:
if field.name is None:
raise ValueError('Name is required for ordered fields.')
setattr(newcls, field.name, field)
fields.append((field.name, field))
newcls._fields = fields
return newcls
def __getitem__(self, key):
return self.values()[key]
def __setitem__(self, key, value):
setattr(self, self._fields[key][0], value)
def __delitem__(self, key):
self._data[self._fields[key][0]] = None
def __iter__(self):
return iter(self.values())
def __contains__(self, item):
return item in self.values()
def __len__(self):
return len(self._data)
def __eq__(self, other):
if len(self) != len(other):
return False
for key, value in zip(self.keys(), other):
if getattr(self, key) != value:
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (key, value)
for key, value in self.items()))
def keys(self):
return [key for key, field in self._fields]
def values(self):
return [getattr(self, key) for key in self.keys()]
def items(self):
return [(key, getattr(self, key)) for key, field in self._fields]
def to_astm(self):
def values(obj):
for key, field in obj._fields:
value = obj._data[key]
if isinstance(value, Mapping):
yield list(values(value))
elif isinstance(value, list):
stack = []
for item in value:
if isinstance(item, Mapping):
stack.append(list(values(item)))
else:
stack.append(item)
yield stack
elif value is None and field.required:
raise ValueError('Field %r value should not be None' % key)
else:
yield value
return list(values(self))
class Record(Mapping):
"""ASTM record mapping class."""
class Component(Mapping):
"""ASTM component mapping class."""
class TextField(Field):
"""Mapping field for string values."""
def _set_value(self, value):
if not isinstance(value, basestring):
raise TypeError('String value expected, got %r' % value)
return super(TextField, self)._set_value(value)
class ConstantField(Field):
"""Mapping field for constant values.
>>> class Record(Mapping):
... type = ConstantField(default='S')
>>> rec = Record()
>>> rec.type
'S'
>>> rec.type = 'W'
Traceback (most recent call last):
...
ValueError: Field changing not allowed
"""
def __init__(self, name=None, default=None, field=Field()):
super(ConstantField, self).__init__(name, default, True, None)
self.field = field
self.required = True
if self.default is None:
raise ValueError('Constant value should be defined')
def _get_value(self, value):
return self.default
def _set_value(self, value):
value = self.field._get_value(value)
if self.default != value:
raise ValueError('Field changing not allowed: got %r, accepts %r'
'' % (value, self.default))
return super(ConstantField, self)._set_value(value)
class IntegerField(Field):
"""Mapping field for integer values."""
def _get_value(self, value):
return int(value)
def _set_value(self, value):
if not isinstance(value, (int, long)):
try:
value = self._get_value(value)
except Exception:
raise TypeError('Integer value expected, got %r' % value)
return super(IntegerField, self)._set_value(value)
class DecimalField(Field):
"""Mapping field for decimal values."""
def _get_value(self, value):
return decimal.Decimal(value)
def _set_value(self, value):
if not isinstance(value, (int, long, float, decimal.Decimal)):
raise TypeError('Decimal value expected, got %r' % value)
return super(DecimalField, self)._set_value(value)
class DateField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class TimeField(Field):
"""Mapping field for storing times."""
format = '%H%M%S'
def _get_value(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = datetime.time(*time.strptime(value, self.format)[3:6])
except ValueError:
raise ValueError('Value %r does not match format %s'
'' % (value, self.format))
return value
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.time)):
raise TypeError('Datetime value expected, got %r' % value)
if isinstance(value, datetime.datetime):
value = value.time()
return value.replace(microsecond=0).strftime(self.format)
class DateTimeField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d%H%M%S'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class SetField(Field):
"""Mapping field for predefined set of values."""
def __init__(self, name=None, default=None,
required=False, length=None,
values=None, field=Field()):
super(SetField, self).__init__(name, default, required, length)
self.field = field
self.values = values and set(values) or set([])
def _get_value(self, value):
return self.field._get_value(value)
def _set_value(self, value):
value = self.field._get_value(value)
if value not in self.values:
raise ValueError('Unexpectable value %r' % value)
return self.field._set_value(value)
class ComponentField(Field):
"""Mapping field for storing record component."""
def __init__(self, mapping, name=None, default=None):
self.mapping = mapping
default = default or mapping()
super(ComponentField, self).__init__(name, default)
def _get_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
else:
return self.mapping(*value)
def _set_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
if isinstance(value, basestring):
value = [value]
return self.mapping(*value)
class RepeatedComponentField(Field):
"""Mapping field for storing list of record components."""
def __init__(self, field, name=None, default=None):
if isinstance(field, ComponentField):
self.field = field
else:
assert isinstance(field, type) and issubclass(field, Mapping)
self.field = ComponentField(field)
default = default or []
super(RepeatedComponentField, self).__init__(name, default)
class Proxy(list):
def __init__(self, seq, field):
list.__init__(self, seq)
self.list = seq
self.field = field
def _to_list(self):
return [list(self.field._get_value(item)) for item in self.list]
def __add__(self, other):
obj = type(self)(self.list, self.field)
obj.extend(other)
return obj
def __iadd__(self, other):
self.extend(other)
return self
def __mul__(self, other):
return type(self)(self.list * other, self.field)
def __imul__(self, other):
self.list *= other
return self
def __lt__(self, other):
return self._to_list() < other
def __le__(self, other):
return self._to_list() <= other
def __eq__(self, other):
return self._to_list() == other
def __ne__(self, other):
return self._to_list() != other
def __ge__(self, other):
return self._to_list() >= other
def __gt__(self, other):
return self._to_list() > other
def __repr__(self):
return '<ListProxy %s %r>' % (self.list, list(self))
def __str__(self):
return str(self.list)
def __unicode__(self):
return unicode(self.list)
def __delitem__(self, index):
del self.list[index]
def __getitem__(self, index):
return self.field._get_value(self.list[index])
def __setitem__(self, index, value):
self.list[index] = self.field._set_value(value)
def __delslice__(self, i, j):
del self.list[i:j]
def __getslice__(self, i, j):
return self.__class__(self.list[i:j], self.field)
def __setslice__(self, i, j, seq):
self.list[i:j] = [self.field._set_value(v) for v in seq]
def __contains__(self, value):
for item in self:
if item == value:
return True
return False
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.list)
def __nonzero__(self):
return bool(self.list)
def __reduce__(self):
return self.list.__reduce__()
def __reduce_ex__(self, *args, **kwargs):
return self.list.__reduce_ex__(*args, **kwargs)
def append(self, item):
self.list.append(self.field._set_value(item))
def count(self, value):
return self._to_list().count(value)
def extend(self, other):
self.list.extend([self.field._set_value(i) for i in other])
def index(self, value, start=None, stop=None):
start = start or 0
for idx, item in enumerate(islice(self, start, stop)):
if item == value:
return idx + start
else:
raise ValueError('%r not in list' % value)
def insert(self, index, object):
self.list.insert(index, self.field._set_value(object))
def remove(self, value):
for item in self:
if item == value:
return self.list.remove(value)
raise ValueError('Value %r not in list' % value)
def pop(self, index=-1):
return self.field._get_value(self.list.pop(index))
def sort(self, cmp=None, key=None, reverse=False):
raise NotImplementedError('In place sorting not allowed.')
# update docstrings from list
for name, obj in inspect.getmembers(Proxy):
if getattr(list, name, None) is None\
or name in ['__module__', '__doc__']:
continue
if not inspect.isfunction(obj):
continue
obj.__doc__ = getattr(list, name).__doc__
del name, obj
def _get_value(self, value):
return self.Proxy(value, self.field)
def _set_value(self, value):
return [self.field._set_value(item) for item in value]
class NotUsedField(Field):
"""Mapping field for value that should be used. Acts as placeholder.
On attempt to assign something to it raises :exc:`UserWarning` and rejects
assigned value."""
def __init__(self, name=None):
super(NotUsedField, self).__init__(name)
def _get_value(self, value):
return None
def _set_value(self, value):
warnings.warn('Field %r is not used, any assignments are omitted'
'' % self.name, UserWarning)
return None
| 11,657 | 430 | 2,032 |
6b575eb58b808764e5984acf25dc85e83ca8068b | 1,977 | py | Python | pyiArduinoI2Cmotor/examples/FindReducer.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | pyiArduinoI2Cmotor/examples/FindReducer.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | pyiArduinoI2Cmotor/examples/FindReducer.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | # ОПРЕДЕЛЕНИЕ РЕАЛЬНОГО ПЕРЕДАТОЧНОГО ОТНОШЕНИЯ: #
# ВЫПОЛНИТЕ СЛЕДУЮЩИЕ ДЕЙСТВИЯ: #
# После старта скрипта, в мониторе будут появляться значения 0.00
# При вращении ротора мотора (в любую сторону), эти значения будут увеличиваться.
# Поворачивайте вручную ротор мотора до тех пор пока вал редуктора не повернётся на 1 полный оборот.
# В мониторе появится значение равное передаточному отношению редуктора. Оно может быть не целым!
# Для большей точности советуем поворачивать ротор мотора до тех пор, пока вал редуктора не повернётся 10 раз, и разделить полученное значение на 10.
#
from pyiArduinoI2Cmotor import * # Подключаем библиотеку для работы с мотором I2C-flash.
mot = pyiArduinoI2Cmotor(0x09) # Объявляем объект mot для работы с функциями и методами библиотеки pyiArduinoI2Cmotor, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса (mot = pyiArduinoI2Cmotor ), то адрес будет найден автоматически.
mot.begin() # Инициируем работу с мотором.
mot.delSum() # Сбрасываем количество совершённых оборотов вала.
mot.setReducer(1.0) # Указываем передаточное отношение редуктора как 1:1.
#mot.setMagnet(7) # Укажите реальное количество магнитных полюсов одной полярности, кольцевого магнита установленного на роторе мотора.
# Для определения этого значения воспользуйтесь примером библиотеки FindMagnet.
#
while True: #
print( mot.getSum(MOT_REV) ) # Выводим количество полных оборотов вала.
sleep(.2) #
| 94.142857 | 176 | 0.584219 | # ОПРЕДЕЛЕНИЕ РЕАЛЬНОГО ПЕРЕДАТОЧНОГО ОТНОШЕНИЯ: #
# ВЫПОЛНИТЕ СЛЕДУЮЩИЕ ДЕЙСТВИЯ: #
# После старта скрипта, в мониторе будут появляться значения 0.00
# При вращении ротора мотора (в любую сторону), эти значения будут увеличиваться.
# Поворачивайте вручную ротор мотора до тех пор пока вал редуктора не повернётся на 1 полный оборот.
# В мониторе появится значение равное передаточному отношению редуктора. Оно может быть не целым!
# Для большей точности советуем поворачивать ротор мотора до тех пор, пока вал редуктора не повернётся 10 раз, и разделить полученное значение на 10.
#
from pyiArduinoI2Cmotor import * # Подключаем библиотеку для работы с мотором I2C-flash.
mot = pyiArduinoI2Cmotor(0x09) # Объявляем объект mot для работы с функциями и методами библиотеки pyiArduinoI2Cmotor, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса (mot = pyiArduinoI2Cmotor ), то адрес будет найден автоматически.
mot.begin() # Инициируем работу с мотором.
mot.delSum() # Сбрасываем количество совершённых оборотов вала.
mot.setReducer(1.0) # Указываем передаточное отношение редуктора как 1:1.
#mot.setMagnet(7) # Укажите реальное количество магнитных полюсов одной полярности, кольцевого магнита установленного на роторе мотора.
# Для определения этого значения воспользуйтесь примером библиотеки FindMagnet.
#
while True: #
print( mot.getSum(MOT_REV) ) # Выводим количество полных оборотов вала.
sleep(.2) #
| 0 | 0 | 0 |
64caf2af26a1109c0a92c0164fb189e1eb42f571 | 3,602 | py | Python | seqrep/utils.py | MIR-MU/seqrep | dbce45572af9c87b10d6f33f0e3a1b05cea77f86 | [
"MIT"
] | 9 | 2022-01-13T20:44:51.000Z | 2022-02-17T13:49:33.000Z | seqrep/utils.py | MIR-MU/seqrep | dbce45572af9c87b10d6f33f0e3a1b05cea77f86 | [
"MIT"
] | 1 | 2021-09-30T08:56:50.000Z | 2022-01-06T11:35:05.000Z | seqrep/utils.py | MIR-MU/seqrep | dbce45572af9c87b10d6f33f0e3a1b05cea77f86 | [
"MIT"
] | null | null | null | """
Utils
This file provides an implementation of helping classes and functions.
"""
import abc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
class Picklable:
"""
Simple class for saving (and loading) functionality using pickle.
"""
def save(self, name: str = None, concat: bool = False):
"""
Save object using pickle.
Parameters
----------
name: str
The filename for the saved object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "wb") as output:
pickle.dump(self, output, -1)
def load(self, name: str = None, concat: bool = False):
"""
Load object using pickle.
Parameters
----------
name: str
The filename for the loaded object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "rb") as input:
return pickle.load(input)
class Visualizable(abc.ABC):
"""
A simple abstract class requiring the implementation of a visualize function.
"""
@abc.abstractmethod
def visualize(self):
"""
This function visualize the outputs or state of the object.
"""
raise NotImplementedError
def visualize_labels(
labels, title="Visualization of labels", mode: str = "lines"
) -> None:
"""
Plot labels.
Parameters
----------
title: str
Title of the plot.
mode: str
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
"""
if len(labels.shape) == 1:
labels = pd.DataFrame(labels, columns=["labels"])
fig = go.Figure()
fig.update_layout(title=title)
fig.update_yaxes(title_text="labels")
for i in range(labels.shape[1]):
fig.add_trace(
go.Scatter(
x=labels.index,
y=labels.iloc[:, i],
name=labels.columns[i],
mode=mode,
)
)
fig.show()
def visualize_data(
X, y, downprojector=None, title: str = "Visualization of data"
) -> None:
"""
Plot data in 2D.
Parameters
----------
X : iterable
Training data.
y : iterable
Training targets.
downprojector : callable, default=None
Data downprojection method for visualization.
title: str
Title of the plot.
"""
if downprojector is not None:
embedding = downprojector.fit_transform(X)
else:
embedding = X.iloc[:, :2].values
data = pd.DataFrame(embedding, columns=["X Value", "Y Value"], index=X.index)
data["Category"] = y
fig = px.scatter(
data,
x=data.columns[0],
y=data.columns[1],
color=data["Category"],
hover_name=data.index,
)
fig.update_layout(title=title)
fig.show()
| 24.337838 | 81 | 0.575236 | """
Utils
This file provides an implementation of helping classes and functions.
"""
import abc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
class Picklable:
"""
Simple class for saving (and loading) functionality using pickle.
"""
def save(self, name: str = None, concat: bool = False):
"""
Save object using pickle.
Parameters
----------
name: str
The filename for the saved object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "wb") as output:
pickle.dump(self, output, -1)
def load(self, name: str = None, concat: bool = False):
"""
Load object using pickle.
Parameters
----------
name: str
The filename for the loaded object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "rb") as input:
return pickle.load(input)
class Visualizable(abc.ABC):
"""
A simple abstract class requiring the implementation of a visualize function.
"""
@abc.abstractmethod
def visualize(self):
"""
This function visualize the outputs or state of the object.
"""
raise NotImplementedError
def visualize_labels(
labels, title="Visualization of labels", mode: str = "lines"
) -> None:
"""
Plot labels.
Parameters
----------
title: str
Title of the plot.
mode: str
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
"""
if len(labels.shape) == 1:
labels = pd.DataFrame(labels, columns=["labels"])
fig = go.Figure()
fig.update_layout(title=title)
fig.update_yaxes(title_text="labels")
for i in range(labels.shape[1]):
fig.add_trace(
go.Scatter(
x=labels.index,
y=labels.iloc[:, i],
name=labels.columns[i],
mode=mode,
)
)
fig.show()
def visualize_data(
X, y, downprojector=None, title: str = "Visualization of data"
) -> None:
"""
Plot data in 2D.
Parameters
----------
X : iterable
Training data.
y : iterable
Training targets.
downprojector : callable, default=None
Data downprojection method for visualization.
title: str
Title of the plot.
"""
if downprojector is not None:
embedding = downprojector.fit_transform(X)
else:
embedding = X.iloc[:, :2].values
data = pd.DataFrame(embedding, columns=["X Value", "Y Value"], index=X.index)
data["Category"] = y
fig = px.scatter(
data,
x=data.columns[0],
y=data.columns[1],
color=data["Category"],
hover_name=data.index,
)
fig.update_layout(title=title)
fig.show()
| 0 | 0 | 0 |
9cc5f17b33da001e38327d295fb357f33fec2ffb | 230 | py | Python | test/config_test.py | roadt/scrapybot | c4588f3d2c354a464f67f7199933775bc7a859e0 | [
"MIT"
] | null | null | null | test/config_test.py | roadt/scrapybot | c4588f3d2c354a464f67f7199933775bc7a859e0 | [
"MIT"
] | null | null | null | test/config_test.py | roadt/scrapybot | c4588f3d2c354a464f67f7199933775bc7a859e0 | [
"MIT"
] | 1 | 2015-09-11T05:59:42.000Z | 2015-09-11T05:59:42.000Z |
import os
import unittest
from test import fixture
from .config import yaml_config
| 11.5 | 40 | 0.695652 |
import os
import unittest
from test import fixture
from .config import yaml_config
class ConfigTestCase(unittest.TestCase):
def test_config(self):
cfg = yaml_config()
self.assertTrue('zdic' in cfg)
| 68 | 19 | 50 |
0b316cf907e087dd4432a553fcc2cdebb767c078 | 1,697 | py | Python | db_preparation/addDecoyDB.py | HKU-BAL/MegaPath-Nano | 55e0ac3687aaac35c012c71970a5966a60d7d380 | [
"BSD-3-Clause"
] | 13 | 2020-09-16T13:55:41.000Z | 2022-02-09T14:34:41.000Z | db_preparation/addDecoyDB.py | HKU-BAL/MegaPath-Nano | 55e0ac3687aaac35c012c71970a5966a60d7d380 | [
"BSD-3-Clause"
] | 1 | 2021-01-05T02:47:30.000Z | 2021-01-05T08:51:05.000Z | db_preparation/addDecoyDB.py | HKU-BAL/MegaPath-Nano | 55e0ac3687aaac35c012c71970a5966a60d7d380 | [
"BSD-3-Clause"
] | 3 | 2020-10-31T02:19:39.000Z | 2021-09-07T11:47:44.000Z | import argparse
from shutil import copyfile
import os
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add fasta to decoy db')
parser.add_argument('--decoy_fasta', required=True)
cwd=os.path.dirname(os.path.realpath(__file__))
NANO_DIR=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser.add_argument('--assembly_dir', default=f'{NANO_DIR}/genomes' )
parser.add_argument('--config_folder', help='Config file folder', default=f'{NANO_DIR}/config' )
FLAGS = parser.parse_args()
decoy_name=os.path.basename(os.path.splitext(FLAGS.decoy_fasta)[0])
path= f'{FLAGS.assembly_dir}/refseq/plasmid/{decoy_name}.fa'
copyfile(FLAGS.decoy_fasta,path)
with open(f'{FLAGS.config_folder}/plasmid.genome_set' ,'a') as config_plasmid:
config_plasmid.write(f'{decoy_name}\n')
assemblyLengthWriter = open(f'{FLAGS.assembly_dir}/assembly_length' , 'a')
assemblyPathWriter = open(f'{FLAGS.assembly_dir}/assembly_path' , 'a')
assemblyTaxidWriter = open(f'{FLAGS.assembly_dir}/assembly_tax_id' , 'a')
sequenceSummaryWriter = open(f'{FLAGS.assembly_dir}/sequence_summary' , 'a')
totalLength=0
with open(path, 'rt') as fi:
for record in SeqIO.parse(fi, 'fasta'):
totalLength += len(record)
sequenceSummaryWriter.write(f"{record.id}\t{len(record)}\t{decoy_name}\n")
assemblyLengthWriter.write(f"{decoy_name}\t{totalLength}\n")
assemblyPathWriter.write(f"{decoy_name}\t{path}\n")
arbitrary_taxid='35'
assemblyTaxidWriter.write(f"{decoy_name}\t1000000099\t1000000099\t1000000001\t{arbitrary_taxid}\n")
| 48.485714 | 104 | 0.705362 | import argparse
from shutil import copyfile
import os
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add fasta to decoy db')
parser.add_argument('--decoy_fasta', required=True)
cwd=os.path.dirname(os.path.realpath(__file__))
NANO_DIR=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser.add_argument('--assembly_dir', default=f'{NANO_DIR}/genomes' )
parser.add_argument('--config_folder', help='Config file folder', default=f'{NANO_DIR}/config' )
FLAGS = parser.parse_args()
decoy_name=os.path.basename(os.path.splitext(FLAGS.decoy_fasta)[0])
path= f'{FLAGS.assembly_dir}/refseq/plasmid/{decoy_name}.fa'
copyfile(FLAGS.decoy_fasta,path)
with open(f'{FLAGS.config_folder}/plasmid.genome_set' ,'a') as config_plasmid:
config_plasmid.write(f'{decoy_name}\n')
assemblyLengthWriter = open(f'{FLAGS.assembly_dir}/assembly_length' , 'a')
assemblyPathWriter = open(f'{FLAGS.assembly_dir}/assembly_path' , 'a')
assemblyTaxidWriter = open(f'{FLAGS.assembly_dir}/assembly_tax_id' , 'a')
sequenceSummaryWriter = open(f'{FLAGS.assembly_dir}/sequence_summary' , 'a')
totalLength=0
with open(path, 'rt') as fi:
for record in SeqIO.parse(fi, 'fasta'):
totalLength += len(record)
sequenceSummaryWriter.write(f"{record.id}\t{len(record)}\t{decoy_name}\n")
assemblyLengthWriter.write(f"{decoy_name}\t{totalLength}\n")
assemblyPathWriter.write(f"{decoy_name}\t{path}\n")
arbitrary_taxid='35'
assemblyTaxidWriter.write(f"{decoy_name}\t1000000099\t1000000099\t1000000001\t{arbitrary_taxid}\n")
| 0 | 0 | 0 |
8350dbae96089f704886f9f722cfa49600a22df8 | 176 | py | Python | tarn/__init__.py | neuro-ml/tarn | 07bf61d5ebec69cd5f44ef4766b774769281613f | [
"Apache-2.0"
] | null | null | null | tarn/__init__.py | neuro-ml/tarn | 07bf61d5ebec69cd5f44ef4766b774769281613f | [
"Apache-2.0"
] | null | null | null | tarn/__init__.py | neuro-ml/tarn | 07bf61d5ebec69cd5f44ef4766b774769281613f | [
"Apache-2.0"
] | null | null | null | from .__version__ import __version__
from .interface import WriteError, RemoteStorage, StorageLevel
from .exceptions import *
from .local import *
from .ssh import SSHLocation
| 29.333333 | 62 | 0.823864 | from .__version__ import __version__
from .interface import WriteError, RemoteStorage, StorageLevel
from .exceptions import *
from .local import *
from .ssh import SSHLocation
| 0 | 0 | 0 |
519396f2d83a92e220252a97680e8da6731a2521 | 113 | py | Python | megashop/urls.py | EugeniyMalakhov/megashara | 50ee8d7c73447fcbeac22483f2659032fc213176 | [
"MIT"
] | null | null | null | megashop/urls.py | EugeniyMalakhov/megashara | 50ee8d7c73447fcbeac22483f2659032fc213176 | [
"MIT"
] | null | null | null | megashop/urls.py | EugeniyMalakhov/megashara | 50ee8d7c73447fcbeac22483f2659032fc213176 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
urlpatterns = [
url(r'^$', 'megashop.views.home', name='home'),
]
| 18.833333 | 51 | 0.654867 | from django.conf.urls import url, include
urlpatterns = [
url(r'^$', 'megashop.views.home', name='home'),
]
| 0 | 0 | 0 |
60a39e06a5e70ee773808a42c6ba191ecc311416 | 269 | py | Python | finapp-be/config/config.py | mauroseb/finapp-be | a9b696bf1505e0975a273360ebc03228940ca2bf | [
"MIT"
] | null | null | null | finapp-be/config/config.py | mauroseb/finapp-be | a9b696bf1505e0975a273360ebc03228940ca2bf | [
"MIT"
] | null | null | null | finapp-be/config/config.py | mauroseb/finapp-be | a9b696bf1505e0975a273360ebc03228940ca2bf | [
"MIT"
] | null | null | null | class Config(object):
"""Config class for quick test"""
DEBUG = False
TICKER_LIST = ['AMZN', 'DIS', 'FOX', 'GE', 'GILD', 'GOOGL', 'HPE', 'HLT', 'HFC', 'INFO', 'IBM', 'JPM', 'LKQ', 'MSFT', 'MSCI', 'NDAQ', 'NFLX', 'NVDA', 'PLTR', 'RH', 'SPCE', 'YUM', 'ZTS']
| 44.833333 | 189 | 0.520446 | class Config(object):
"""Config class for quick test"""
DEBUG = False
TICKER_LIST = ['AMZN', 'DIS', 'FOX', 'GE', 'GILD', 'GOOGL', 'HPE', 'HLT', 'HFC', 'INFO', 'IBM', 'JPM', 'LKQ', 'MSFT', 'MSCI', 'NDAQ', 'NFLX', 'NVDA', 'PLTR', 'RH', 'SPCE', 'YUM', 'ZTS']
| 0 | 0 | 0 |
2ed6ffc3d7cd8d78362ae851e58d4a8706ff894b | 849 | py | Python | 2015/06/fc_2015_06_09.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | 2015/06/fc_2015_06_09.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | 1 | 2015-04-27T01:43:45.000Z | 2015-04-27T01:43:45.000Z | 2015/06/fc_2015_06_09.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# imports go here
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
#
# Free Coding session for 2015-06-09
# Written by Matt Warren
#
lena = sp.misc.lena()
X = np.reshape(lena, (-1, 1))
connectivity = grid_to_graph(*lena.shape)
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| 24.970588 | 103 | 0.730271 | #!/usr/bin/env python3
# imports go here
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
#
# Free Coding session for 2015-06-09
# Written by Matt Warren
#
lena = sp.misc.lena()
X = np.reshape(lena, (-1, 1))
connectivity = grid_to_graph(*lena.shape)
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| 0 | 0 | 0 |
91136de8a3b063093fe1b10e44cc7f4385cdeb2c | 12,493 | py | Python | tests/app/dao/test_organisation_dao.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | tests/app/dao/test_organisation_dao.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | tests/app/dao/test_organisation_dao.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | import datetime
import uuid
import pytest
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from app import db
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_by_service_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_update_organisation,
)
from app.models import Organisation, Service
from tests.app.db import (
create_domain,
create_email_branding,
create_letter_branding,
create_organisation,
create_service,
create_user,
)
@pytest.mark.parametrize('domain_list, expected_domains', (
(['abc', 'def'], {'abc', 'def'}),
(['ABC', 'DEF'], {'abc', 'def'}),
([], set()),
(None, {'123', '456'}),
pytest.param(
['abc', 'ABC'], {'abc'},
marks=pytest.mark.xfail(raises=IntegrityError)
),
))
@pytest.mark.parametrize('domain, expected_org', (
('unknown.gov.uk', False),
('example.gov.uk', True),
))
| 34.606648 | 106 | 0.780437 | import datetime
import uuid
import pytest
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from app import db
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_by_service_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_update_organisation,
)
from app.models import Organisation, Service
from tests.app.db import (
create_domain,
create_email_branding,
create_letter_branding,
create_organisation,
create_service,
create_user,
)
def test_get_organisations_gets_all_organisations_alphabetically_with_active_organisations_first(
notify_db_session
):
m_active_org = create_organisation(name='m_active_organisation')
z_inactive_org = create_organisation(name='z_inactive_organisation', active=False)
a_inactive_org = create_organisation(name='a_inactive_organisation', active=False)
z_active_org = create_organisation(name='z_active_organisation')
a_active_org = create_organisation(name='a_active_organisation')
organisations = dao_get_organisations()
assert len(organisations) == 5
assert organisations[0] == a_active_org
assert organisations[1] == m_active_org
assert organisations[2] == z_active_org
assert organisations[3] == a_inactive_org
assert organisations[4] == z_inactive_org
def test_get_organisation_by_id_gets_correct_organisation(notify_db_session):
organisation = create_organisation()
organisation_from_db = dao_get_organisation_by_id(organisation.id)
assert organisation_from_db == organisation
def test_update_organisation(notify_db_session):
create_organisation()
organisation = Organisation.query.one()
user = create_user()
email_branding = create_email_branding()
letter_branding = create_letter_branding()
data = {
'name': 'new name',
"crown": True,
"organisation_type": 'local',
"agreement_signed": True,
"agreement_signed_at": datetime.datetime.utcnow(),
"agreement_signed_by_id": user.id,
"agreement_signed_version": 999.99,
"letter_branding_id": letter_branding.id,
"email_branding_id": email_branding.id,
}
for attribute, value in data.items():
assert getattr(organisation, attribute) != value
assert organisation.updated_at is None
dao_update_organisation(organisation.id, **data)
organisation = Organisation.query.one()
for attribute, value in data.items():
assert getattr(organisation, attribute) == value
assert organisation.updated_at
@pytest.mark.parametrize('domain_list, expected_domains', (
(['abc', 'def'], {'abc', 'def'}),
(['ABC', 'DEF'], {'abc', 'def'}),
([], set()),
(None, {'123', '456'}),
pytest.param(
['abc', 'ABC'], {'abc'},
marks=pytest.mark.xfail(raises=IntegrityError)
),
))
def test_update_organisation_domains_lowercases(
notify_db_session,
domain_list,
expected_domains,
):
create_organisation()
organisation = Organisation.query.one()
# Seed some domains
dao_update_organisation(organisation.id, domains=['123', '456'])
# This should overwrite the seeded domains
dao_update_organisation(organisation.id, domains=domain_list)
assert {domain.domain for domain in organisation.domains} == expected_domains
def test_update_organisation_does_not_update_the_service_if_certain_attributes_not_provided(
sample_service,
sample_organisation,
):
email_branding = create_email_branding()
letter_branding = create_letter_branding()
sample_service.organisation_type = 'local'
sample_organisation.organisation_type = 'central'
sample_organisation.email_branding = email_branding
sample_organisation.letter_branding = letter_branding
sample_organisation.services.append(sample_service)
db.session.commit()
assert sample_organisation.name == 'sample organisation'
dao_update_organisation(sample_organisation.id, name='updated org name')
assert sample_organisation.name == 'updated org name'
assert sample_organisation.organisation_type == 'central'
assert sample_service.organisation_type == 'local'
assert sample_organisation.email_branding == email_branding
assert sample_service.email_branding is None
assert sample_organisation.letter_branding == letter_branding
assert sample_service.letter_branding is None
def test_update_organisation_updates_the_service_org_type_if_org_type_is_provided(
sample_service,
sample_organisation,
):
sample_service.organisation_type = 'local'
sample_organisation.organisation_type = 'local'
sample_organisation.services.append(sample_service)
db.session.commit()
dao_update_organisation(sample_organisation.id, organisation_type='central')
assert sample_organisation.organisation_type == 'central'
assert sample_service.organisation_type == 'central'
assert Service.get_history_model().query.filter_by(
id=sample_service.id,
version=2
).one().organisation_type == 'central'
def test_update_organisation_updates_the_service_branding_if_branding_is_provided(
sample_service,
sample_organisation,
):
email_branding = create_email_branding()
letter_branding = create_letter_branding()
sample_organisation.services.append(sample_service)
db.session.commit()
dao_update_organisation(sample_organisation.id, email_branding_id=email_branding.id)
dao_update_organisation(sample_organisation.id, letter_branding_id=letter_branding.id)
assert sample_organisation.email_branding == email_branding
assert sample_organisation.letter_branding == letter_branding
assert sample_service.email_branding == email_branding
assert sample_service.letter_branding == letter_branding
def test_update_organisation_does_not_override_service_branding(
sample_service,
sample_organisation,
):
email_branding = create_email_branding()
custom_email_branding = create_email_branding(name='custom')
letter_branding = create_letter_branding()
custom_letter_branding = create_letter_branding(name='custom', filename='custom')
sample_service.email_branding = custom_email_branding
sample_service.letter_branding = custom_letter_branding
sample_organisation.services.append(sample_service)
db.session.commit()
dao_update_organisation(sample_organisation.id, email_branding_id=email_branding.id)
dao_update_organisation(sample_organisation.id, letter_branding_id=letter_branding.id)
assert sample_organisation.email_branding == email_branding
assert sample_organisation.letter_branding == letter_branding
assert sample_service.email_branding == custom_email_branding
assert sample_service.letter_branding == custom_letter_branding
def test_update_organisation_updates_services_with_new_crown_type(
sample_service,
sample_organisation
):
sample_organisation.services.append(sample_service)
db.session.commit()
assert Service.query.get(sample_service.id).crown
dao_update_organisation(sample_organisation.id, crown=False)
assert not Service.query.get(sample_service.id).crown
def test_add_service_to_organisation(sample_service, sample_organisation):
assert sample_organisation.services == []
sample_service.organisation_type = "central"
sample_organisation.organisation_type = "local"
sample_organisation.crown = False
dao_add_service_to_organisation(sample_service, sample_organisation.id)
assert len(sample_organisation.services) == 1
assert sample_organisation.services[0].id == sample_service.id
assert sample_service.organisation_type == sample_organisation.organisation_type
assert sample_service.crown == sample_organisation.crown
assert Service.get_history_model().query.filter_by(
id=sample_service.id,
version=2
).one().organisation_type == sample_organisation.organisation_type
assert sample_service.organisation_id == sample_organisation.id
def test_get_organisation_services(sample_service, sample_organisation):
another_service = create_service(service_name='service 2')
another_org = create_organisation()
dao_add_service_to_organisation(sample_service, sample_organisation.id)
dao_add_service_to_organisation(another_service, sample_organisation.id)
org_services = dao_get_organisation_services(sample_organisation.id)
other_org_services = dao_get_organisation_services(another_org.id)
assert [sample_service.name, another_service.name] == sorted([s.name for s in org_services])
assert not other_org_services
def test_get_organisation_by_service_id(sample_service, sample_organisation):
another_service = create_service(service_name='service 2')
another_org = create_organisation()
dao_add_service_to_organisation(sample_service, sample_organisation.id)
dao_add_service_to_organisation(another_service, another_org.id)
organisation_1 = dao_get_organisation_by_service_id(sample_service.id)
organisation_2 = dao_get_organisation_by_service_id(another_service.id)
assert organisation_1 == sample_organisation
assert organisation_2 == another_org
def test_dao_get_users_for_organisation(sample_organisation):
first = create_user(email='first@invited.com')
second = create_user(email='another@invited.com')
dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=first.id)
dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=second.id)
results = dao_get_users_for_organisation(organisation_id=sample_organisation.id)
assert len(results) == 2
assert results[0] == first
assert results[1] == second
def test_dao_get_users_for_organisation_returns_empty_list(sample_organisation):
results = dao_get_users_for_organisation(organisation_id=sample_organisation.id)
assert len(results) == 0
def test_dao_get_users_for_organisation_only_returns_active_users(sample_organisation):
first = create_user(email='first@invited.com')
second = create_user(email='another@invited.com')
dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=first.id)
dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=second.id)
second.state = 'inactive'
results = dao_get_users_for_organisation(organisation_id=sample_organisation.id)
assert len(results) == 1
assert results[0] == first
def test_add_user_to_organisation_returns_user(sample_organisation):
org_user = create_user()
assert not org_user.organisations
added_user = dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=org_user.id)
assert len(added_user.organisations) == 1
assert added_user.organisations[0] == sample_organisation
def test_add_user_to_organisation_when_user_does_not_exist(sample_organisation):
with pytest.raises(expected_exception=SQLAlchemyError):
dao_add_user_to_organisation(organisation_id=sample_organisation.id, user_id=uuid.uuid4())
def test_add_user_to_organisation_when_organisation_does_not_exist(sample_user):
with pytest.raises(expected_exception=SQLAlchemyError):
dao_add_user_to_organisation(organisation_id=uuid.uuid4(), user_id=sample_user.id)
@pytest.mark.parametrize('domain, expected_org', (
('unknown.gov.uk', False),
('example.gov.uk', True),
))
def test_get_organisation_by_email_address(
domain,
expected_org,
notify_db_session
):
org = create_organisation()
create_domain('example.gov.uk', org.id)
create_domain('test.gov.uk', org.id)
another_org = create_organisation(name='Another')
create_domain('cabinet-office.gov.uk', another_org.id)
create_domain('cabinetoffice.gov.uk', another_org.id)
found_org = dao_get_organisation_by_email_address('test@{}'.format(domain))
if expected_org:
assert found_org is org
else:
assert found_org is None
def test_get_organisation_by_email_address_ignores_gsi_gov_uk(notify_db_session):
org = create_organisation()
create_domain('example.gov.uk', org.id)
found_org = dao_get_organisation_by_email_address('test_gsi_address@example.gsi.gov.uk')
assert org == found_org
| 10,915 | 0 | 458 |
6cd25eacec4bbf34b5da320753489717c6c1b39b | 649 | py | Python | Action/interpreter.py | lpdswing/DesignPatternsPython | 6f4b8416b0624ecda0cca3dadd306f938da9a118 | [
"Apache-2.0"
] | 3 | 2020-07-21T11:43:22.000Z | 2020-07-21T11:56:50.000Z | Action/interpreter.py | lpdswing/DesignPatternsPython | 6f4b8416b0624ecda0cca3dadd306f938da9a118 | [
"Apache-2.0"
] | null | null | null | Action/interpreter.py | lpdswing/DesignPatternsPython | 6f4b8416b0624ecda0cca3dadd306f938da9a118 | [
"Apache-2.0"
] | null | null | null | # 解释器模式
class AbstractExpression:
'''抽象解释器'''
class TerminalExpression(AbstractExpression):
'''继承抽象解释器,具体解释器终端'''
if __name__ == '__main__':
context = Context()
context.name = 'alex'
arrs = [NotTerminalExpression(),TerminalExpression(),TerminalExpression()]
for entry in arrs:
entry.interpreter(context)
| 19.088235 | 78 | 0.659476 | # 解释器模式
class AbstractExpression:
'''抽象解释器'''
def interpreter(self, context):
pass
class TerminalExpression(AbstractExpression):
'''继承抽象解释器,具体解释器终端'''
def interpreter(self, context):
print(f'终端解释器:{context}')
class NotTerminalExpression(AbstractExpression):
def interpreter(self, context):
print(f'非终端解释器:{context}')
class Context:
def __init__(self):
self.name = ''
if __name__ == '__main__':
context = Context()
context.name = 'alex'
arrs = [NotTerminalExpression(),TerminalExpression(),TerminalExpression()]
for entry in arrs:
entry.interpreter(context)
| 155 | 20 | 154 |
91253aa69a78411799ddedcfc052a48d3b3e5bc0 | 190 | py | Python | src/sweetrpg_db/exceptions.py | sweetrpg/db | efd4fbea8eeb387b3ab6cb5cf2581cdb9c454b90 | [
"MIT"
] | null | null | null | src/sweetrpg_db/exceptions.py | sweetrpg/db | efd4fbea8eeb387b3ab6cb5cf2581cdb9c454b90 | [
"MIT"
] | 11 | 2021-09-27T05:02:27.000Z | 2022-03-21T04:21:58.000Z | src/sweetrpg_db/exceptions.py | sweetrpg/db | efd4fbea8eeb387b3ab6cb5cf2581cdb9c454b90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""Common exceptions.
"""
class ObjectNotFound(Exception):
"""An exception for objects not found."""
pass
| 17.272727 | 48 | 0.652632 | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""Common exceptions.
"""
class ObjectNotFound(Exception):
"""An exception for objects not found."""
pass
| 0 | 0 | 0 |
4b616493893bbac3330367b82b1e9dc10fbe6c37 | 415 | py | Python | python/sprint0/c.py | Chiga2030/algorithms-templates | 6e0ee665e2420c77a3d56d153f67dcfab60be622 | [
"MIT"
] | 38 | 2021-09-14T08:22:32.000Z | 2022-03-15T08:52:20.000Z | python/sprint0/c.py | Chiga2030/algorithms-templates | 6e0ee665e2420c77a3d56d153f67dcfab60be622 | [
"MIT"
] | null | null | null | python/sprint0/c.py | Chiga2030/algorithms-templates | 6e0ee665e2420c77a3d56d153f67dcfab60be622 | [
"MIT"
] | 38 | 2021-10-04T14:06:35.000Z | 2022-03-31T12:24:04.000Z | from typing import List, Tuple
arr, window_size = read_input()
print(" ".join(map(str, moving_average(arr, window_size))))
| 27.666667 | 68 | 0.66988 | from typing import List, Tuple
def moving_average(arr: List[int], window_size: int) -> List[float]:
# Здесь реализация вашего решения
pass
def read_input() -> Tuple[List[int], int]:
n = int(input())
arr = list(map(int, input().strip().split()))
window_size = int(input())
return arr, window_size
arr, window_size = read_input()
print(" ".join(map(str, moving_average(arr, window_size))))
| 273 | 0 | 46 |
eb5de1b5d6b00b937a8eeb7d9de731867c138ffe | 14,779 | py | Python | simpa/core/simulation_modules/optical_simulation_module/optical_forward_model_mcx_adapter.py | jgroehl/simpa | e56f0802e5a8555ee8bb139dd4f776025e7e9267 | [
"MIT"
] | 1 | 2021-11-12T22:45:06.000Z | 2021-11-12T22:45:06.000Z | simpa/core/simulation_modules/optical_simulation_module/optical_forward_model_mcx_adapter.py | jgroehl/simpa | e56f0802e5a8555ee8bb139dd4f776025e7e9267 | [
"MIT"
] | null | null | null | simpa/core/simulation_modules/optical_simulation_module/optical_forward_model_mcx_adapter.py | jgroehl/simpa | e56f0802e5a8555ee8bb139dd4f776025e7e9267 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import numpy as np
import struct
import subprocess
from simpa.utils import Tags, Settings
from simpa.core.simulation_modules.optical_simulation_module import OpticalForwardModuleBase
from simpa.core.device_digital_twins import PhotoacousticDevice
from simpa.core.device_digital_twins.illumination_geometries.illumination_geometry_base import IlluminationGeometryBase
import json
import os
import gc
from typing import List, Dict, Tuple, Union
class MCXAdapter(OpticalForwardModuleBase):
"""
This class implements a bridge to the mcx framework to integrate mcx into SIMPA. This adapter only allows for
computation of fluence, for computations of diffuse reflectance, take a look at `simpa.ReflectanceMcxAdapter`
.. note::
MCX is a GPU-enabled Monte-Carlo model simulation of photon transport in tissue:
Fang, Qianqian, and David A. Boas. "Monte Carlo simulation of photon migration in 3D
turbid media accelerated by graphics processing units."
Optics express 17.22 (2009): 20178-20190.
"""
def __init__(self, global_settings: Settings):
"""
initializes MCX-specific configuration and clean-up instances
:param global_settings: global settings used during simulations
"""
super(MCXAdapter, self).__init__(global_settings=global_settings)
self.mcx_json_config_file = None
self.mcx_volumetric_data_file = None
self.frames = None
self.mcx_output_suffixes = {'mcx_volumetric_data_file': '.mc2'}
def define_settings_and_execute(self,
_illumination_geometry,
_assumed_anisotropy):
"""
Defines the settigs and runs mcx
"""
settings_dict = self.get_mcx_settings(illumination_geometry=_illumination_geometry,
assumed_anisotropy=_assumed_anisotropy)
self.generate_mcx_json_input(settings_dict=settings_dict)
# run the simulation
cmd = self.get_command()
self.run_mcx(cmd)
# Read output
return self.read_mcx_output()[Tags.DATA_FIELD_FLUENCE]
def forward_model(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
illumination_geometry: Union[IlluminationGeometryBase, PhotoacousticDevice]) -> Dict:
"""
runs the MCX simulations. Binary file containing scattering and absorption volumes is temporarily created as
input for MCX. A JSON serializable file containing the configuration required by MCx is also generated.
The set of flags parsed to MCX is built based on the Tags declared in `self.component_settings`, the results
from MCX are used to populate an instance of Dict and returned.
:param absorption_cm: array containing the absorption of the tissue in `cm` units
:param scattering_cm: array containing the scattering of the tissue in `cm` units
:param anisotropy: array containing the anisotropy of the volume defined by `absorption_cm` and `scattering_cm`
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:return: `Dict` containing the results of optical simulations, the keys in this dictionary-like object
depend on the Tags defined in `self.component_settings`
"""
if Tags.MCX_ASSUMED_ANISOTROPY in self.component_settings:
_assumed_anisotropy = self.component_settings[Tags.MCX_ASSUMED_ANISOTROPY]
else:
_assumed_anisotropy = 0.9
self.generate_mcx_bin_input(absorption_cm=absorption_cm,
scattering_cm=scattering_cm,
anisotropy=anisotropy,
assumed_anisotropy=_assumed_anisotropy)
fluence = None
if isinstance(illumination_geometry, list):
# per convention this list has at least two elements
fluence = self.define_settings_and_execute(illumination_geometry[0], _assumed_anisotropy)
for idx in range(1, len(illumination_geometry)):
# we already looked at the 0th element, so go from 1 to n-1
fluence += self.define_settings_and_execute(illumination_geometry[idx], _assumed_anisotropy)
fluence = fluence / len(illumination_geometry)
else:
fluence = self.define_settings_and_execute(illumination_geometry, _assumed_anisotropy)
struct._clearcache()
# clean temporary files
self.remove_mcx_output()
return {Tags.DATA_FIELD_FLUENCE: fluence}
def generate_mcx_json_input(self, settings_dict: Dict) -> None:
"""
generates JSON serializable file with settings needed by MCX to run simulations.
:param settings_dict: dictionary to be saved as .json
:return: None
"""
tmp_json_filename = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".json"
self.mcx_json_config_file = tmp_json_filename
self.temporary_output_files.append(tmp_json_filename)
with open(tmp_json_filename, "w") as json_file:
json.dump(settings_dict, json_file, indent="\t")
def get_mcx_settings(self,
illumination_geometry: IlluminationGeometryBase,
assumed_anisotropy: np.ndarray,
**kwargs) -> Dict:
"""
generates MCX-specific settings for simulations based on Tags in `self.global_settings` and
`self.component_settings` . Among others, it defines the volume type, dimensions and path to binary file.
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:param assumed_anisotropy:
:param kwargs: dummy, used for class inheritance
:return: dictionary with settings to be used by MCX
"""
mcx_volumetric_data_file = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + "_output"
for name, suffix in self.mcx_output_suffixes.items():
self.__setattr__(name, mcx_volumetric_data_file + suffix)
self.temporary_output_files.append(mcx_volumetric_data_file + suffix)
if Tags.TIME_STEP and Tags.TOTAL_TIME in self.component_settings:
dt = self.component_settings[Tags.TIME_STEP]
time = self.component_settings[Tags.TOTAL_TIME]
else:
time = 5e-09
dt = 5e-09
self.frames = int(time / dt)
source = illumination_geometry.get_mcx_illuminator_definition(self.global_settings)
settings_dict = {
"Session": {
"ID": mcx_volumetric_data_file,
"DoAutoThread": 1,
"Photons": self.component_settings[Tags.OPTICAL_MODEL_NUMBER_PHOTONS],
"DoMismatch": 0
},
"Forward": {
"T0": 0,
"T1": time,
"Dt": dt
},
"Optode": {
"Source": source
},
"Domain": {
"OriginType": 0,
"LengthUnit": self.global_settings[Tags.SPACING_MM],
"Media": [
{
"mua": 0,
"mus": 0,
"g": 1,
"n": 1
},
{
"mua": 1,
"mus": 1,
"g": assumed_anisotropy,
"n": 1
}
],
"MediaFormat": "muamus_float",
"Dim": [self.nx, self.ny, self.nz],
"VolumeFile": self.global_settings[Tags.SIMULATION_PATH] + "/" +
self.global_settings[Tags.VOLUME_NAME] + ".bin"
}}
if Tags.MCX_SEED not in self.component_settings:
if Tags.RANDOM_SEED in self.global_settings:
settings_dict["Session"]["RNGSeed"] = self.global_settings[Tags.RANDOM_SEED]
else:
settings_dict["Session"]["RNGSeed"] = self.component_settings[Tags.MCX_SEED]
return settings_dict
def get_command(self) -> List:
"""
generates list of commands to be parse to MCX in a subprocess
:return: list of MCX commands
"""
cmd = list()
cmd.append(self.component_settings[Tags.OPTICAL_MODEL_BINARY_PATH])
cmd.append("-f")
cmd.append(self.mcx_json_config_file)
cmd.append("-O")
cmd.append("F")
return cmd
@staticmethod
def run_mcx(cmd: List) -> None:
"""
runs subprocess calling MCX with the flags built with `self.get_command`. Rises a `RuntimeError` if the code
exit of the subprocess is not 0.
:param cmd: list defining command to parse to `subprocess.run`
:return: None
"""
results = None
try:
results = subprocess.run(cmd)
except:
raise RuntimeError(f"MCX failed to run: {cmd}, results: {results}")
def generate_mcx_bin_input(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
assumed_anisotropy: np.ndarray) -> None:
"""
generates binary file containing volume scattering and absorption as input for MCX
:param absorption_cm: Absorption in units of per centimeter
:param scattering_cm: Scattering in units of per centimeter
:param anisotropy: Dimensionless scattering anisotropy
:param assumed_anisotropy:
:return: None
"""
absorption_mm, scattering_mm = self.pre_process_volumes(**{'absorption_cm': absorption_cm,
'scattering_cm': scattering_cm,
'anisotropy': anisotropy,
'assumed_anisotropy': assumed_anisotropy})
op_array = np.asarray([absorption_mm, scattering_mm])
[_, self.nx, self.ny, self.nz] = np.shape(op_array)
# create a binary of the volume
optical_properties_list = list(np.reshape(op_array, op_array.size, "F"))
del absorption_cm, absorption_mm, scattering_cm, scattering_mm, op_array
gc.collect()
mcx_input = struct.pack("f" * len(optical_properties_list), *optical_properties_list)
del optical_properties_list
gc.collect()
tmp_input_path = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".bin"
self.temporary_output_files.append(tmp_input_path)
with open(tmp_input_path, "wb") as input_file:
input_file.write(mcx_input)
del mcx_input, input_file
struct._clearcache()
gc.collect()
def read_mcx_output(self, **kwargs) -> Dict:
"""
reads the temporary output generated with MCX
:param kwargs: dummy, used for class inheritance compatibility
:return: `Dict` instance containing the MCX output
"""
with open(self.mcx_volumetric_data_file, 'rb') as f:
data = f.read()
data = struct.unpack('%df' % (len(data) / 4), data)
fluence = np.asarray(data).reshape([self.nx, self.ny, self.nz, self.frames], order='F')
fluence *= 100 # Convert from J/mm^2 to J/cm^2
if np.shape(fluence)[3] == 1:
fluence = np.squeeze(fluence, 3)
results = dict()
results[Tags.DATA_FIELD_FLUENCE] = fluence
return results
def remove_mcx_output(self) -> None:
"""
deletes temporary MCX output files from the file system
:return: None
"""
for f in self.temporary_output_files:
if os.path.isfile(f):
os.remove(f)
def pre_process_volumes(self, **kwargs) -> Tuple:
"""
pre-process volumes before running simulations with MCX. The volumes are transformed to `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
return self.volumes_to_mm(**kwargs)
@staticmethod
def volumes_to_mm(**kwargs) -> Tuple:
"""
transforms volumes into `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
scattering_cm = kwargs.get('scattering_cm')
absorption_cm = kwargs.get('absorption_cm')
absorption_mm = absorption_cm / 10
scattering_mm = scattering_cm / 10
# FIXME Currently, mcx only accepts a single value for the anisotropy.
# In order to use the correct reduced scattering coefficient throughout the simulation,
# we adjust the scattering parameter to be more accurate in the diffuse regime.
# This will lead to errors, especially in the quasi-ballistic regime.
given_reduced_scattering = (scattering_mm * (1 - kwargs.get('anisotropy')))
# If the anisotropy is 1, all scattering is forward scattering which is equal to no scattering at all
if kwargs.get("assumed_anisotropy") == 1:
scattering_mm = given_reduced_scattering * 0
else:
scattering_mm = given_reduced_scattering / (1 - kwargs.get('assumed_anisotropy'))
scattering_mm[scattering_mm < 1e-10] = 1e-10
return absorption_mm, scattering_mm
@staticmethod
def post_process_volumes(**kwargs) -> Tuple:
"""
post-processes volumes after MCX simulations. Dummy function implemented for compatibility with inherited
classes
:param kwargs: dictionary containing at least the key `volumes` to be transformed
:return:
"""
arrays = kwargs.get('arrays')
return tuple(a for a in arrays)
| 43.21345 | 119 | 0.616145 | # SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import numpy as np
import struct
import subprocess
from simpa.utils import Tags, Settings
from simpa.core.simulation_modules.optical_simulation_module import OpticalForwardModuleBase
from simpa.core.device_digital_twins import PhotoacousticDevice
from simpa.core.device_digital_twins.illumination_geometries.illumination_geometry_base import IlluminationGeometryBase
import json
import os
import gc
from typing import List, Dict, Tuple, Union
class MCXAdapter(OpticalForwardModuleBase):
"""
This class implements a bridge to the mcx framework to integrate mcx into SIMPA. This adapter only allows for
computation of fluence, for computations of diffuse reflectance, take a look at `simpa.ReflectanceMcxAdapter`
.. note::
MCX is a GPU-enabled Monte-Carlo model simulation of photon transport in tissue:
Fang, Qianqian, and David A. Boas. "Monte Carlo simulation of photon migration in 3D
turbid media accelerated by graphics processing units."
Optics express 17.22 (2009): 20178-20190.
"""
def __init__(self, global_settings: Settings):
"""
initializes MCX-specific configuration and clean-up instances
:param global_settings: global settings used during simulations
"""
super(MCXAdapter, self).__init__(global_settings=global_settings)
self.mcx_json_config_file = None
self.mcx_volumetric_data_file = None
self.frames = None
self.mcx_output_suffixes = {'mcx_volumetric_data_file': '.mc2'}
def define_settings_and_execute(self,
_illumination_geometry,
_assumed_anisotropy):
"""
Defines the settigs and runs mcx
"""
settings_dict = self.get_mcx_settings(illumination_geometry=_illumination_geometry,
assumed_anisotropy=_assumed_anisotropy)
self.generate_mcx_json_input(settings_dict=settings_dict)
# run the simulation
cmd = self.get_command()
self.run_mcx(cmd)
# Read output
return self.read_mcx_output()[Tags.DATA_FIELD_FLUENCE]
def forward_model(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
illumination_geometry: Union[IlluminationGeometryBase, PhotoacousticDevice]) -> Dict:
"""
runs the MCX simulations. Binary file containing scattering and absorption volumes is temporarily created as
input for MCX. A JSON serializable file containing the configuration required by MCx is also generated.
The set of flags parsed to MCX is built based on the Tags declared in `self.component_settings`, the results
from MCX are used to populate an instance of Dict and returned.
:param absorption_cm: array containing the absorption of the tissue in `cm` units
:param scattering_cm: array containing the scattering of the tissue in `cm` units
:param anisotropy: array containing the anisotropy of the volume defined by `absorption_cm` and `scattering_cm`
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:return: `Dict` containing the results of optical simulations, the keys in this dictionary-like object
depend on the Tags defined in `self.component_settings`
"""
if Tags.MCX_ASSUMED_ANISOTROPY in self.component_settings:
_assumed_anisotropy = self.component_settings[Tags.MCX_ASSUMED_ANISOTROPY]
else:
_assumed_anisotropy = 0.9
self.generate_mcx_bin_input(absorption_cm=absorption_cm,
scattering_cm=scattering_cm,
anisotropy=anisotropy,
assumed_anisotropy=_assumed_anisotropy)
fluence = None
if isinstance(illumination_geometry, list):
# per convention this list has at least two elements
fluence = self.define_settings_and_execute(illumination_geometry[0], _assumed_anisotropy)
for idx in range(1, len(illumination_geometry)):
# we already looked at the 0th element, so go from 1 to n-1
fluence += self.define_settings_and_execute(illumination_geometry[idx], _assumed_anisotropy)
fluence = fluence / len(illumination_geometry)
else:
fluence = self.define_settings_and_execute(illumination_geometry, _assumed_anisotropy)
struct._clearcache()
# clean temporary files
self.remove_mcx_output()
return {Tags.DATA_FIELD_FLUENCE: fluence}
def generate_mcx_json_input(self, settings_dict: Dict) -> None:
"""
generates JSON serializable file with settings needed by MCX to run simulations.
:param settings_dict: dictionary to be saved as .json
:return: None
"""
tmp_json_filename = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".json"
self.mcx_json_config_file = tmp_json_filename
self.temporary_output_files.append(tmp_json_filename)
with open(tmp_json_filename, "w") as json_file:
json.dump(settings_dict, json_file, indent="\t")
def get_mcx_settings(self,
illumination_geometry: IlluminationGeometryBase,
assumed_anisotropy: np.ndarray,
**kwargs) -> Dict:
"""
generates MCX-specific settings for simulations based on Tags in `self.global_settings` and
`self.component_settings` . Among others, it defines the volume type, dimensions and path to binary file.
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:param assumed_anisotropy:
:param kwargs: dummy, used for class inheritance
:return: dictionary with settings to be used by MCX
"""
mcx_volumetric_data_file = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + "_output"
for name, suffix in self.mcx_output_suffixes.items():
self.__setattr__(name, mcx_volumetric_data_file + suffix)
self.temporary_output_files.append(mcx_volumetric_data_file + suffix)
if Tags.TIME_STEP and Tags.TOTAL_TIME in self.component_settings:
dt = self.component_settings[Tags.TIME_STEP]
time = self.component_settings[Tags.TOTAL_TIME]
else:
time = 5e-09
dt = 5e-09
self.frames = int(time / dt)
source = illumination_geometry.get_mcx_illuminator_definition(self.global_settings)
settings_dict = {
"Session": {
"ID": mcx_volumetric_data_file,
"DoAutoThread": 1,
"Photons": self.component_settings[Tags.OPTICAL_MODEL_NUMBER_PHOTONS],
"DoMismatch": 0
},
"Forward": {
"T0": 0,
"T1": time,
"Dt": dt
},
"Optode": {
"Source": source
},
"Domain": {
"OriginType": 0,
"LengthUnit": self.global_settings[Tags.SPACING_MM],
"Media": [
{
"mua": 0,
"mus": 0,
"g": 1,
"n": 1
},
{
"mua": 1,
"mus": 1,
"g": assumed_anisotropy,
"n": 1
}
],
"MediaFormat": "muamus_float",
"Dim": [self.nx, self.ny, self.nz],
"VolumeFile": self.global_settings[Tags.SIMULATION_PATH] + "/" +
self.global_settings[Tags.VOLUME_NAME] + ".bin"
}}
if Tags.MCX_SEED not in self.component_settings:
if Tags.RANDOM_SEED in self.global_settings:
settings_dict["Session"]["RNGSeed"] = self.global_settings[Tags.RANDOM_SEED]
else:
settings_dict["Session"]["RNGSeed"] = self.component_settings[Tags.MCX_SEED]
return settings_dict
def get_command(self) -> List:
"""
generates list of commands to be parse to MCX in a subprocess
:return: list of MCX commands
"""
cmd = list()
cmd.append(self.component_settings[Tags.OPTICAL_MODEL_BINARY_PATH])
cmd.append("-f")
cmd.append(self.mcx_json_config_file)
cmd.append("-O")
cmd.append("F")
return cmd
@staticmethod
def run_mcx(cmd: List) -> None:
"""
runs subprocess calling MCX with the flags built with `self.get_command`. Rises a `RuntimeError` if the code
exit of the subprocess is not 0.
:param cmd: list defining command to parse to `subprocess.run`
:return: None
"""
results = None
try:
results = subprocess.run(cmd)
except:
raise RuntimeError(f"MCX failed to run: {cmd}, results: {results}")
def generate_mcx_bin_input(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
assumed_anisotropy: np.ndarray) -> None:
"""
generates binary file containing volume scattering and absorption as input for MCX
:param absorption_cm: Absorption in units of per centimeter
:param scattering_cm: Scattering in units of per centimeter
:param anisotropy: Dimensionless scattering anisotropy
:param assumed_anisotropy:
:return: None
"""
absorption_mm, scattering_mm = self.pre_process_volumes(**{'absorption_cm': absorption_cm,
'scattering_cm': scattering_cm,
'anisotropy': anisotropy,
'assumed_anisotropy': assumed_anisotropy})
op_array = np.asarray([absorption_mm, scattering_mm])
[_, self.nx, self.ny, self.nz] = np.shape(op_array)
# create a binary of the volume
optical_properties_list = list(np.reshape(op_array, op_array.size, "F"))
del absorption_cm, absorption_mm, scattering_cm, scattering_mm, op_array
gc.collect()
mcx_input = struct.pack("f" * len(optical_properties_list), *optical_properties_list)
del optical_properties_list
gc.collect()
tmp_input_path = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".bin"
self.temporary_output_files.append(tmp_input_path)
with open(tmp_input_path, "wb") as input_file:
input_file.write(mcx_input)
del mcx_input, input_file
struct._clearcache()
gc.collect()
def read_mcx_output(self, **kwargs) -> Dict:
"""
reads the temporary output generated with MCX
:param kwargs: dummy, used for class inheritance compatibility
:return: `Dict` instance containing the MCX output
"""
with open(self.mcx_volumetric_data_file, 'rb') as f:
data = f.read()
data = struct.unpack('%df' % (len(data) / 4), data)
fluence = np.asarray(data).reshape([self.nx, self.ny, self.nz, self.frames], order='F')
fluence *= 100 # Convert from J/mm^2 to J/cm^2
if np.shape(fluence)[3] == 1:
fluence = np.squeeze(fluence, 3)
results = dict()
results[Tags.DATA_FIELD_FLUENCE] = fluence
return results
def remove_mcx_output(self) -> None:
"""
deletes temporary MCX output files from the file system
:return: None
"""
for f in self.temporary_output_files:
if os.path.isfile(f):
os.remove(f)
def pre_process_volumes(self, **kwargs) -> Tuple:
"""
pre-process volumes before running simulations with MCX. The volumes are transformed to `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
return self.volumes_to_mm(**kwargs)
@staticmethod
def volumes_to_mm(**kwargs) -> Tuple:
"""
transforms volumes into `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
scattering_cm = kwargs.get('scattering_cm')
absorption_cm = kwargs.get('absorption_cm')
absorption_mm = absorption_cm / 10
scattering_mm = scattering_cm / 10
# FIXME Currently, mcx only accepts a single value for the anisotropy.
# In order to use the correct reduced scattering coefficient throughout the simulation,
# we adjust the scattering parameter to be more accurate in the diffuse regime.
# This will lead to errors, especially in the quasi-ballistic regime.
given_reduced_scattering = (scattering_mm * (1 - kwargs.get('anisotropy')))
# If the anisotropy is 1, all scattering is forward scattering which is equal to no scattering at all
if kwargs.get("assumed_anisotropy") == 1:
scattering_mm = given_reduced_scattering * 0
else:
scattering_mm = given_reduced_scattering / (1 - kwargs.get('assumed_anisotropy'))
scattering_mm[scattering_mm < 1e-10] = 1e-10
return absorption_mm, scattering_mm
@staticmethod
def post_process_volumes(**kwargs) -> Tuple:
"""
post-processes volumes after MCX simulations. Dummy function implemented for compatibility with inherited
classes
:param kwargs: dictionary containing at least the key `volumes` to be transformed
:return:
"""
arrays = kwargs.get('arrays')
return tuple(a for a in arrays)
| 0 | 0 | 0 |
62a88ee1cea48215d3c7df22a522cd9c67672444 | 136 | py | Python | Python/CodeForces Solutions/1-500/71A.py | 7namansharma/Comp-Prog | b760ef9b4173e6d5851dc63cc92a8e935baf60ed | [
"MIT"
] | null | null | null | Python/CodeForces Solutions/1-500/71A.py | 7namansharma/Comp-Prog | b760ef9b4173e6d5851dc63cc92a8e935baf60ed | [
"MIT"
] | null | null | null | Python/CodeForces Solutions/1-500/71A.py | 7namansharma/Comp-Prog | b760ef9b4173e6d5851dc63cc92a8e935baf60ed | [
"MIT"
] | null | null | null | for i in range(int(input())):
n = input()
if len(n)>10:
print(n[0], len(n)-2, n[-1], sep="")
else:
print(n)
| 19.428571 | 44 | 0.441176 | for i in range(int(input())):
n = input()
if len(n)>10:
print(n[0], len(n)-2, n[-1], sep="")
else:
print(n)
| 0 | 0 | 0 |
e89a350d85b8cab3877c7dc4a2a8b2b77a88e09c | 3,107 | py | Python | setup.py | qinzzz/forte | 39a106667efd7d0c72dbc44f3cf060a49d742697 | [
"Apache-2.0"
] | null | null | null | setup.py | qinzzz/forte | 39a106667efd7d0c72dbc44f3cf060a49d742697 | [
"Apache-2.0"
] | null | null | null | setup.py | qinzzz/forte | 39a106667efd7d0c72dbc44f3cf060a49d742697 | [
"Apache-2.0"
] | null | null | null | import sys
from pathlib import Path
import os
import setuptools
long_description = (Path(__file__).parent / "README.md").read_text()
if sys.version_info < (3, 6):
sys.exit("Python>=3.6 is required by Forte.")
VERSION_VAR = "VERSION"
version = {}
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "forte/version.py")
) as fp:
exec(fp.read(), version)
if VERSION_VAR not in version or not version[VERSION_VAR]:
raise ValueError(
f"Cannot find {VERSION_VAR} in forte/version.py. Please make sure that "
f"{VERSION_VAR} is correctly defined and formatted in forte/version.py."
)
setuptools.setup(
name="forte",
version=version[VERSION_VAR],
url="https://github.com/asyml/forte",
description="Forte is extensible framework for building composable and "
"modularized NLP workflows.",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache License Version 2.0",
packages=setuptools.find_namespace_packages(
include=["ft.*", "ftx.*", "forte"]
),
include_package_data=True,
platforms="any",
install_requires=[
"sortedcontainers>=2.1.0",
"numpy>=1.16.6",
"jsonpickle>=1.4",
"pyyaml>=5.4",
"smart-open>=1.8.4",
"typed_astunparse>=2.1.4",
"funcsigs>=1.0.2",
"typed_ast>=1.5.0",
"jsonschema>=3.0.2",
'typing>=3.7.4;python_version<"3.5"',
"typing-inspect>=0.6.0",
'dataclasses~=0.7;python_version<"3.7"',
'importlib-resources>=5.1.4;python_version<"3.7"',
"asyml-utilities",
],
extras_require={
"data_aug": [
"transformers>=4.15.0",
"nltk",
"texar-pytorch>=0.1.4",
"requests",
],
"ir": ["texar-pytorch>=0.1.4", "tensorflow>=1.15.0"],
"remote": ["fastapi>=0.65.2", "uvicorn>=0.14.0", "requests"],
"audio_ext": ["soundfile>=0.10.3"],
"stave": ["stave>=0.0.1.dev12"],
"models": [
"torch>=1.1.0",
"torchtext==0.4.0",
"tqdm>=4.36.1",
"texar-pytorch>=0.1.4",
"tensorflow>=1.15.0",
],
"test": [
"ddt",
"testfixtures",
"testbook",
"termcolor",
"transformers>=4.15.0",
"nltk",
],
"wikipedia": ["rdflib==4.2.2"],
# transformers 4.10.0 will break the translation model we used here
"nlp": ["texar-pytorch>=0.1.4"],
"extractor": ["texar-pytorch>=0.1.4"],
},
entry_points={
"console_scripts": [
"generate_ontology = forte.command_line.generate_ontology.__main__:main"
]
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| 31.07 | 84 | 0.562601 | import sys
from pathlib import Path
import os
import setuptools
long_description = (Path(__file__).parent / "README.md").read_text()
if sys.version_info < (3, 6):
sys.exit("Python>=3.6 is required by Forte.")
VERSION_VAR = "VERSION"
version = {}
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "forte/version.py")
) as fp:
exec(fp.read(), version)
if VERSION_VAR not in version or not version[VERSION_VAR]:
raise ValueError(
f"Cannot find {VERSION_VAR} in forte/version.py. Please make sure that "
f"{VERSION_VAR} is correctly defined and formatted in forte/version.py."
)
setuptools.setup(
name="forte",
version=version[VERSION_VAR],
url="https://github.com/asyml/forte",
description="Forte is extensible framework for building composable and "
"modularized NLP workflows.",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache License Version 2.0",
packages=setuptools.find_namespace_packages(
include=["ft.*", "ftx.*", "forte"]
),
include_package_data=True,
platforms="any",
install_requires=[
"sortedcontainers>=2.1.0",
"numpy>=1.16.6",
"jsonpickle>=1.4",
"pyyaml>=5.4",
"smart-open>=1.8.4",
"typed_astunparse>=2.1.4",
"funcsigs>=1.0.2",
"typed_ast>=1.5.0",
"jsonschema>=3.0.2",
'typing>=3.7.4;python_version<"3.5"',
"typing-inspect>=0.6.0",
'dataclasses~=0.7;python_version<"3.7"',
'importlib-resources>=5.1.4;python_version<"3.7"',
"asyml-utilities",
],
extras_require={
"data_aug": [
"transformers>=4.15.0",
"nltk",
"texar-pytorch>=0.1.4",
"requests",
],
"ir": ["texar-pytorch>=0.1.4", "tensorflow>=1.15.0"],
"remote": ["fastapi>=0.65.2", "uvicorn>=0.14.0", "requests"],
"audio_ext": ["soundfile>=0.10.3"],
"stave": ["stave>=0.0.1.dev12"],
"models": [
"torch>=1.1.0",
"torchtext==0.4.0",
"tqdm>=4.36.1",
"texar-pytorch>=0.1.4",
"tensorflow>=1.15.0",
],
"test": [
"ddt",
"testfixtures",
"testbook",
"termcolor",
"transformers>=4.15.0",
"nltk",
],
"wikipedia": ["rdflib==4.2.2"],
# transformers 4.10.0 will break the translation model we used here
"nlp": ["texar-pytorch>=0.1.4"],
"extractor": ["texar-pytorch>=0.1.4"],
},
entry_points={
"console_scripts": [
"generate_ontology = forte.command_line.generate_ontology.__main__:main"
]
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| 0 | 0 | 0 |
ae888740f1b547b002ac3469210c056f2816ef74 | 1,498 | py | Python | bot.py | PiranavS/Protbot | c09dac4be4348f7c6c5ed497cc8a2c56e180f53b | [
"MIT"
] | null | null | null | bot.py | PiranavS/Protbot | c09dac4be4348f7c6c5ed497cc8a2c56e180f53b | [
"MIT"
] | null | null | null | bot.py | PiranavS/Protbot | c09dac4be4348f7c6c5ed497cc8a2c56e180f53b | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from googleapiclient.discovery import build
youtube=build('youtube','v3',developerKey='')
client=commands.Bot(command_prefix = ">")
@client.event
@client.event
#await message.channel.send(message.content)
@client.command()
@client.command()
@client.command()
@client.command(aliases=["mb","maari boys"])
@client.command()
@client.command()
client.run("")
| 26.280702 | 86 | 0.700935 | import discord
from discord.ext import commands
from googleapiclient.discovery import build
youtube=build('youtube','v3',developerKey='')
client=commands.Bot(command_prefix = ">")
@client.event
async def on_ready():
print("Let's kick ass")
@client.event
async def on_message(message):
if message.content.startswith('>ytstat') :
chl=message.content[8:len(message.content):1]
request=youtube.channels().list(part="statistics", forUsername=chl)
response=request.execute()
viewers=response['items'][0]['statistics']['viewCount'] + ' viewers'
subscribers=response['items'][0]['statistics']['subscriberCount'] + ' subscribers'
videos=response['items'][0]['statistics']['videoCount'] + ' videos'
await message.channel.send(chl)
await message.channel.send(viewers)
await message.channel.send(subscribers)
await message.channel.send(videos)
#await message.channel.send(message.content)
@client.command()
async def hello(ctx):
await ctx.send("hi")
@client.command()
async def thanks(ctx):
await ctx.send("Welcome")
@client.command()
async def arigato(ctx):
await ctx.send("Do itashimashite")
@client.command(aliases=["mb","maari boys"])
async def maariboys(ctx):
await ctx.send("World's Greatest")
@client.command()
async def clear(ctx, amount=1):
await ctx.channel.purge(limit=amount)
@client.command()
async def ping(ctx):
await ctx.send(f"Bot latency: {client.latency*1000} ms")
client.run("")
| 865 | 0 | 184 |
7a99bc863f2d42f188708751c45a4d80a7214073 | 102 | py | Python | contrib/Research/nlp/bert/BERT_tf_Soapeggpain/script/e2e_func_node/tools/performance/performanceAnalysis_1p/test.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | contrib/Research/nlp/bert/BERT_tf_Soapeggpain/script/e2e_func_node/tools/performance/performanceAnalysis_1p/test.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | contrib/Research/nlp/bert/BERT_tf_Soapeggpain/script/e2e_func_node/tools/performance/performanceAnalysis_1p/test.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | import xlrd
import numpy as np
import sys
import math
a = 3
b = 7
mean = round( a / b,2 )
print(mean)
| 11.333333 | 23 | 0.676471 | import xlrd
import numpy as np
import sys
import math
a = 3
b = 7
mean = round( a / b,2 )
print(mean)
| 0 | 0 | 0 |
58ab960f369a7f7afb7e8972fe79234f1a6e4f7c | 1,507 | py | Python | tvseries/tests/test_forms.py | abnerpc/flask_tutorial | a7c02f9f2afb9727ff4b12df2a3b1d4c90ac5e4a | [
"MIT"
] | 7 | 2016-12-27T01:20:39.000Z | 2019-06-20T12:50:23.000Z | tvseries/tests/test_forms.py | rafaelhenrique/flask_tutorial | eb8f1fcc4b1b442c6bfe7657cc83f8e4c678e6b9 | [
"MIT"
] | 6 | 2018-10-25T21:22:20.000Z | 2019-05-19T16:03:35.000Z | tvseries/tests/test_forms.py | rafaelhenrique/flask_tutorial | eb8f1fcc4b1b442c6bfe7657cc83f8e4c678e6b9 | [
"MIT"
] | 6 | 2018-04-15T01:49:55.000Z | 2018-10-27T15:09:46.000Z | from datetime import date
import pytest
from tvseries.core.forms import TVSerieForm
from tvseries.config import TestConfig
@pytest.mark.usefixtures('client_class')
| 38.641026 | 73 | 0.608494 | from datetime import date
import pytest
from tvseries.core.forms import TVSerieForm
from tvseries.config import TestConfig
@pytest.mark.usefixtures('client_class')
class TestCoreForms:
@pytest.fixture
def app(self):
from tvseries import create_app
app = create_app(TestConfig)
return app
def test_form_valid(self):
description = (
"Há muito tempo, em um tempo esquecido, uma força "
"destruiu o equilíbrio das estações. Em uma terra "
"onde os verões podem durar vários anos e o inverno "
"toda uma vida, as reivindicações e as forças sobrenaturais "
"correm as portas do Reino dos Sete Reinos. A irmandade "
"da Patrulha da Noite busca proteger o reino de cada "
"criatura que pode vir de lá da Muralha, mas já não tem "
"os recursos necessários para garantir a segurança de "
"todos. Depois de um verão de dez anos, um inverno "
"rigoroso promete chegar com um futuro mais sombrio. "
"Enquanto isso, conspirações e rivalidades correm no jogo "
"político pela disputa do Trono de Ferro, o símbolo do "
"poder absoluto."
)
form_serie = TVSerieForm(name="Game of Thrones",
description=description,
author="George R.R. Martin",
year=date(2011, 1, 1))
assert form_serie.validate()
| 1,264 | 73 | 22 |
b875c79cb37234ac905904d44dde1cdd687086b2 | 10,039 | py | Python | poet.py | brucewuquant/POET | d93b5e048a95a4d7c1c2e8d10a45251f5edd4c4f | [
"Apache-2.0"
] | 1 | 2022-02-25T02:52:05.000Z | 2022-02-25T02:52:05.000Z | poet.py | dizhouwu/POET | d93b5e048a95a4d7c1c2e8d10a45251f5edd4c4f | [
"Apache-2.0"
] | null | null | null | poet.py | dizhouwu/POET | d93b5e048a95a4d7c1c2e8d10a45251f5edd4c4f | [
"Apache-2.0"
] | null | null | null | import numpy as np
import numba
import scipy
def estimate_nfactor_act(X, C=1):
"""
estimate number of factors given data matrix X (n*p)
threshold on eigenvalues of correlation matrix (bias corrected)
https://arxiv.org/abs/1909.10710
K = # eigenvalues of sample corr that > 1 + sqrt(p / (n-1))
"""
n, p = X.shape
# 1. get sample correlation matrix and eigenvalues
corr = np.corrcoef(X.T)
evals = np.flip(np.linalg.eigvalsh(corr)) # order large to small
# 2. get bias corrected eigenvalues
evals_adj = np.zeros(p - 1)
for i in range(p - 1):
mi = (
np.sum(1.0 / (evals[(i + 1) :] - evals[i]))
+ 4.0 / (evals[i + 1] - evals[i])
) / (p - i)
rho = (p - i) / (n - 1)
evals_adj[i] = -1.0 / (-(1 - rho) / evals[i] + rho * mi)
# 3. threshold to estimate number of factors
thres = 1.0 + np.sqrt(p / (n - 1)) * C
return np.where(evals_adj > thres)[0][-1] + 1 # max_j that lambda_j > thres
sign = lambda x: x and (1 if x >0 else -1)
def POET(Y, K=-np.inf, C=-np.inf, thres='soft', matrix='cor'):
"""
Estimates large covariance matrices in approximate factor models by thresholding principal orthogonal complements.
Y:p by n matrix of raw data, where p is the dimensionality, n is the sample size. It is recommended that Y is de-meaned, i.e., each row has zero mean
K: number of factors
C: float, the positive constant for thresholding.C=0.5 performs quite well for soft thresholding
thres: str, choice of thresholding. K=0 corresponds to threshoding the sample covariance directly
matrix: the option of thresholding either correlation or covairance matrix.'cor': threshold the error correlation matrix then transform back to covariance matrix. 'vad': threshold the error covariance matrix directly.
Return:
SigmaY: estimated p by p covariance matrix of y_t
SigmaU: estimated p by p covariance matrix of u_t
factors: estimated unobservable factors in a K by T matrix form
loadings: estimated factor loadings in a p by K matrix form
"""
if K == -np.inf:
try:
K = estimate_nfactor_act(Y)
except IndexError:
print("ill-formed matrix Y, provide K with suggestion (K>0 and K<8)")
return
# if K==-np.inf:
# K=_estimate_K(Y)
# Y: p feature * n obs
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
if K>0:
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = np.sqrt(n)*V[:,:K] #F is n by K
LamPCA = Y @ F / n
uhat = Y - LamPCA @ F.T # p by n
Lowrank = LamPCA @ LamPCA.T
rate = 1/np.sqrt(p)+np.sqrt((np.log(p))/n)
else:
uhat = Y # Sigma_y itself is sparse
rate = np.sqrt((np.log(p))/n)
Lowrank = np.zeros([p,p])
SuPCA = uhat @ uhat.T / n
SuDiag = np.diag(np.diag(SuPCA))
if matrix == 'cor':
R = np.linalg.inv(SuDiag**(1/2)) @ SuPCA @ np.linalg.inv(SuDiag**(1/2))
if matrix == 'vad':
R = SuPCA
if C == -np.inf:
C = POETCmin(Y,K,thres,matrix)+0.1
uu = np.zeros([p,p,n])
roottheta = np.zeros([p,p])
lambda_ = np.zeros([p,p])
for i in range(p):
for j in range(i+1): # symmetric matrix
uu[i,j,:] = uhat[i,] * uhat[j,]
roottheta[i,j] = np.std(uu[i,j,:],ddof=1)
lambda_[i,j] = roottheta[i,j]*rate*C
lambda_[j,i] = lambda_[i,j]
Rthresh = np.zeros([p,p])
if thres == 'soft':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
elif j == i:
Rthresh[i,j] = R[i,j]
else:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'hard':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'scad':
for i in range(p):
for j in range(i+1):
if j == i:
Rthresh[i,j] = R[i,j]
elif abs(R[i,j] < lambda_[i,j]):
Rthresh[i,j] = 0
elif abs(R[i,j])<2*lambda_[i,j]:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
elif abs(R[i,j])<3.7*lambda_[i,j]:
Rthresh[i,j]=((3.7-1)*R[i,j]-sign(R[i,j])*3.7*lambda_[i,j])/(3.7-2)
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
SigmaU = np.zeros([p,p])
if matrix == 'cor':
SigmaU = SuDiag**(1/2) @ Rthresh * SuDiag**(1/2)
if matrix == 'vad':
SigmaU = Rthresh
SigmaY = SigmaU + Lowrank
result = DotDict({'SigmaU':SigmaU,
'SigmaY':SigmaY,
'factors':F.T,
'loadings':LamPCA})
return result
def POETCmin(Y,K,thres,matrix):
"""
This function is for determining the minimum constant in the threshold that guarantees the positive
definiteness of the POET estimator.
"""
p, n = Y.shape
if f(50)*f(-50)<0:
roots = scipy.optimize.fsolve(f,[-50,50])
result = max(0,roots)
else:
result = 0
return result
def POETKhat(Y):
"""
This function is for calculating the optimal number of factors in an approximate factor model.
"""
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
#Hallin and Liska method
c=np.arange(0.05, 5.05,0.05)
re=20
rmax=10
IC=np.zeros([2,re,rmax,100])
gT1HL, gT2HL, pi, ni=np.ones(20),np.ones(20),np.ones(20),np.ones(20)
for i in range(re): #generate the subsets, "re" of them
pi[i]=min(i*np.floor(p/re)+min(p,5),p)
ni[i]=min(i*np.floor(n/re)+min(n,5),n)
if i==re-1:
pi[i]=p
ni[i]=n
Yi=Y[:int(pi[i]),:int(ni[i])]
frob=np.zeros(rmax)
penal=np.zeros(rmax)
for k in range(min(int(pi[i]),int(ni[i]),rmax)):
Dd, V = np.linalg.eigh(Yi.T @ Yi)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Yi @ F / ni[i]
uhat = Yi - LamPCA @ (F.T) # pi by ni
frob[k]=sum(np.diag(uhat @ (uhat.T)))/(pi[i]*ni[i])
gT1HL[i]=np.log((pi[i]*ni[i])/(pi[i]+ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
gT2HL[i]=np.log(min(pi[i],ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
for l in range(100): # only fills in the ICs up to k, which may be <rmax
IC[0,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT1HL[i]
IC[1,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT2HL[i]
rhat=np.zeros([2,re,100])
for i in range(re):
for l in range(100):
m=min(pi[i],ni[i],rmax)
temp1=np.argmin(IC[0,i,:int(m),l])
rhat[0,i,l]=temp1
temp2=np.argmin(IC[1,i,:int(m),l])
rhat[1,i,l]=temp2
rhat+=1
sc1, sc2 = np.zeros(100), np.zeros(100)
for l in range(100):
sc1[l] = np.std(rhat[0,:,l],ddof=1)
sc2[l] = np.std(rhat[1,:,l],ddof=1)
c1vec=np.where(sc1==0)
ctemp1=c1vec[0][0]
c1=c[ctemp1]
K1HL=rhat[0,0,ctemp1]
c2vec=np.where(sc2==0)
ctemp2=c2vec[0][0]
c2=c[ctemp2]
K2HL=rhat[1,0,ctemp2]
c=1
rmax=10
IC=np.zeros([2,rmax])
frob, penal = np.zeros(rmax), np.zeros(rmax)
for k in range(rmax):
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Y @ F / n
uhat = Y - LamPCA @ (F.T) # p by n
frob[k]=sum(np.diag(uhat @ uhat.T))/(p*n)
gT1BN=np.log(np.log((p*n))/(p+n))*(p+n)/(p*n)
gT2BN=np.log(min(p,n))*(p+n)/(p*n)
IC[0,k]=np.log(frob[k]) +(k+1)*gT1BN
IC[1,k]=np.log(frob[k]) +(k+1)*gT2BN
K1BN = np.argmin(IC[0,:])
K2BN = np.argmin(IC[1,:])
result = DotDict({"K1HL":K1HL,"K2HL":K2HL,"K1BN":K1BN,"K2BN":K2BN,"IC":IC})
return result
if __name__ == "__main__":
mat=np.array([
[0.8841665, -0.2017119 , 0.7010793 ,-0.8378639],
[-0.2017119, 2.2415674 ,-0.9365252 , 1.8725689],
[ 0.7010793 ,-0.9365252 , 1.7681529 ,-0.6699727],
[-0.8378639 ,1.8725689, -0.6699727 , 2.5185530],
])
a =POET(mat,K=3,C=0.5, thres='soft', matrix='vad')
| 32.383871 | 222 | 0.513199 | import numpy as np
import numba
import scipy
class DotDict(dict):
def __init__(self, *args, **kwargs):
super(DotDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.keys():
self[key] = self.__from_nested_dict(self[key])
def __setattr__(self, key, value):
if hasattr(dict, key) and callable(getattr(dict, key)): # Raise error if attempting to override dict method
raise AttributeError(f'Attempting to override dict method: {key}')
super(DotDict, self).__setattr__(key, value)
@classmethod
def __from_nested_dict(cls, data):
if not isinstance(data, dict):
return data
else:
return cls({key: cls.__from_nested_dict(data[key]) for key in data})
def estimate_nfactor_act(X, C=1):
"""
estimate number of factors given data matrix X (n*p)
threshold on eigenvalues of correlation matrix (bias corrected)
https://arxiv.org/abs/1909.10710
K = # eigenvalues of sample corr that > 1 + sqrt(p / (n-1))
"""
n, p = X.shape
# 1. get sample correlation matrix and eigenvalues
corr = np.corrcoef(X.T)
evals = np.flip(np.linalg.eigvalsh(corr)) # order large to small
# 2. get bias corrected eigenvalues
evals_adj = np.zeros(p - 1)
for i in range(p - 1):
mi = (
np.sum(1.0 / (evals[(i + 1) :] - evals[i]))
+ 4.0 / (evals[i + 1] - evals[i])
) / (p - i)
rho = (p - i) / (n - 1)
evals_adj[i] = -1.0 / (-(1 - rho) / evals[i] + rho * mi)
# 3. threshold to estimate number of factors
thres = 1.0 + np.sqrt(p / (n - 1)) * C
return np.where(evals_adj > thres)[0][-1] + 1 # max_j that lambda_j > thres
sign = lambda x: x and (1 if x >0 else -1)
def _estimate_K(Y):
POETKhat_y = POETKhat(Y)
K1=0.25*(POETKhat_y.K1HL+POETKhat_y.K2HL+POETKhat_y.K1BN+POETKhat_y.K2BN)
K=np.floor(K1)+1
return K
def POET(Y, K=-np.inf, C=-np.inf, thres='soft', matrix='cor'):
"""
Estimates large covariance matrices in approximate factor models by thresholding principal orthogonal complements.
Y:p by n matrix of raw data, where p is the dimensionality, n is the sample size. It is recommended that Y is de-meaned, i.e., each row has zero mean
K: number of factors
C: float, the positive constant for thresholding.C=0.5 performs quite well for soft thresholding
thres: str, choice of thresholding. K=0 corresponds to threshoding the sample covariance directly
matrix: the option of thresholding either correlation or covairance matrix.'cor': threshold the error correlation matrix then transform back to covariance matrix. 'vad': threshold the error covariance matrix directly.
Return:
SigmaY: estimated p by p covariance matrix of y_t
SigmaU: estimated p by p covariance matrix of u_t
factors: estimated unobservable factors in a K by T matrix form
loadings: estimated factor loadings in a p by K matrix form
"""
if K == -np.inf:
try:
K = estimate_nfactor_act(Y)
except IndexError:
print("ill-formed matrix Y, provide K with suggestion (K>0 and K<8)")
return
# if K==-np.inf:
# K=_estimate_K(Y)
# Y: p feature * n obs
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
if K>0:
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = np.sqrt(n)*V[:,:K] #F is n by K
LamPCA = Y @ F / n
uhat = Y - LamPCA @ F.T # p by n
Lowrank = LamPCA @ LamPCA.T
rate = 1/np.sqrt(p)+np.sqrt((np.log(p))/n)
else:
uhat = Y # Sigma_y itself is sparse
rate = np.sqrt((np.log(p))/n)
Lowrank = np.zeros([p,p])
SuPCA = uhat @ uhat.T / n
SuDiag = np.diag(np.diag(SuPCA))
if matrix == 'cor':
R = np.linalg.inv(SuDiag**(1/2)) @ SuPCA @ np.linalg.inv(SuDiag**(1/2))
if matrix == 'vad':
R = SuPCA
if C == -np.inf:
C = POETCmin(Y,K,thres,matrix)+0.1
uu = np.zeros([p,p,n])
roottheta = np.zeros([p,p])
lambda_ = np.zeros([p,p])
for i in range(p):
for j in range(i+1): # symmetric matrix
uu[i,j,:] = uhat[i,] * uhat[j,]
roottheta[i,j] = np.std(uu[i,j,:],ddof=1)
lambda_[i,j] = roottheta[i,j]*rate*C
lambda_[j,i] = lambda_[i,j]
Rthresh = np.zeros([p,p])
if thres == 'soft':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
elif j == i:
Rthresh[i,j] = R[i,j]
else:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'hard':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'scad':
for i in range(p):
for j in range(i+1):
if j == i:
Rthresh[i,j] = R[i,j]
elif abs(R[i,j] < lambda_[i,j]):
Rthresh[i,j] = 0
elif abs(R[i,j])<2*lambda_[i,j]:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
elif abs(R[i,j])<3.7*lambda_[i,j]:
Rthresh[i,j]=((3.7-1)*R[i,j]-sign(R[i,j])*3.7*lambda_[i,j])/(3.7-2)
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
SigmaU = np.zeros([p,p])
if matrix == 'cor':
SigmaU = SuDiag**(1/2) @ Rthresh * SuDiag**(1/2)
if matrix == 'vad':
SigmaU = Rthresh
SigmaY = SigmaU + Lowrank
result = DotDict({'SigmaU':SigmaU,
'SigmaY':SigmaY,
'factors':F.T,
'loadings':LamPCA})
return result
def POETCmin(Y,K,thres,matrix):
"""
This function is for determining the minimum constant in the threshold that guarantees the positive
definiteness of the POET estimator.
"""
p, n = Y.shape
def mineig(Y,K,C,thres,matrix):
SigmaU = POET(Y,K,C,thres,matrix).SigmaU
f = min(np.linalg.eigvals(SigmaU))
return f
def f(x):
return mineig(Y,K,x,thres,matrix)
if f(50)*f(-50)<0:
roots = scipy.optimize.fsolve(f,[-50,50])
result = max(0,roots)
else:
result = 0
return result
def POETKhat(Y):
"""
This function is for calculating the optimal number of factors in an approximate factor model.
"""
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
#Hallin and Liska method
c=np.arange(0.05, 5.05,0.05)
re=20
rmax=10
IC=np.zeros([2,re,rmax,100])
gT1HL, gT2HL, pi, ni=np.ones(20),np.ones(20),np.ones(20),np.ones(20)
for i in range(re): #generate the subsets, "re" of them
pi[i]=min(i*np.floor(p/re)+min(p,5),p)
ni[i]=min(i*np.floor(n/re)+min(n,5),n)
if i==re-1:
pi[i]=p
ni[i]=n
Yi=Y[:int(pi[i]),:int(ni[i])]
frob=np.zeros(rmax)
penal=np.zeros(rmax)
for k in range(min(int(pi[i]),int(ni[i]),rmax)):
Dd, V = np.linalg.eigh(Yi.T @ Yi)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Yi @ F / ni[i]
uhat = Yi - LamPCA @ (F.T) # pi by ni
frob[k]=sum(np.diag(uhat @ (uhat.T)))/(pi[i]*ni[i])
gT1HL[i]=np.log((pi[i]*ni[i])/(pi[i]+ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
gT2HL[i]=np.log(min(pi[i],ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
for l in range(100): # only fills in the ICs up to k, which may be <rmax
IC[0,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT1HL[i]
IC[1,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT2HL[i]
rhat=np.zeros([2,re,100])
for i in range(re):
for l in range(100):
m=min(pi[i],ni[i],rmax)
temp1=np.argmin(IC[0,i,:int(m),l])
rhat[0,i,l]=temp1
temp2=np.argmin(IC[1,i,:int(m),l])
rhat[1,i,l]=temp2
rhat+=1
sc1, sc2 = np.zeros(100), np.zeros(100)
for l in range(100):
sc1[l] = np.std(rhat[0,:,l],ddof=1)
sc2[l] = np.std(rhat[1,:,l],ddof=1)
c1vec=np.where(sc1==0)
ctemp1=c1vec[0][0]
c1=c[ctemp1]
K1HL=rhat[0,0,ctemp1]
c2vec=np.where(sc2==0)
ctemp2=c2vec[0][0]
c2=c[ctemp2]
K2HL=rhat[1,0,ctemp2]
c=1
rmax=10
IC=np.zeros([2,rmax])
frob, penal = np.zeros(rmax), np.zeros(rmax)
for k in range(rmax):
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Y @ F / n
uhat = Y - LamPCA @ (F.T) # p by n
frob[k]=sum(np.diag(uhat @ uhat.T))/(p*n)
gT1BN=np.log(np.log((p*n))/(p+n))*(p+n)/(p*n)
gT2BN=np.log(min(p,n))*(p+n)/(p*n)
IC[0,k]=np.log(frob[k]) +(k+1)*gT1BN
IC[1,k]=np.log(frob[k]) +(k+1)*gT2BN
K1BN = np.argmin(IC[0,:])
K2BN = np.argmin(IC[1,:])
result = DotDict({"K1HL":K1HL,"K2HL":K2HL,"K1BN":K1BN,"K2BN":K2BN,"IC":IC})
return result
if __name__ == "__main__":
mat=np.array([
[0.8841665, -0.2017119 , 0.7010793 ,-0.8378639],
[-0.2017119, 2.2415674 ,-0.9365252 , 1.8725689],
[ 0.7010793 ,-0.9365252 , 1.7681529 ,-0.6699727],
[-0.8378639 ,1.8725689, -0.6699727 , 2.5185530],
])
a =POET(mat,K=3,C=0.5, thres='soft', matrix='vad')
| 932 | 103 | 105 |
e6ae6236f01c9013093adb4bb5bee3dcafd38785 | 4,439 | py | Python | llap-server/src/main/resources/templates.py | bubblesly/hive | 83e52d3c22471f153a8e7ca1392ae500850fbbc2 | [
"Apache-2.0"
] | 4 | 2019-06-05T18:08:19.000Z | 2021-06-07T10:36:49.000Z | llap-server/src/main/resources/templates.py | bubblesly/hive | 83e52d3c22471f153a8e7ca1392ae500850fbbc2 | [
"Apache-2.0"
] | 62 | 2020-04-23T15:01:26.000Z | 2022-01-25T17:27:48.000Z | llap-server/src/main/resources/templates.py | bubblesly/hive | 83e52d3c22471f153a8e7ca1392ae500850fbbc2 | [
"Apache-2.0"
] | 21 | 2016-07-28T13:17:32.000Z | 2022-02-25T10:46:20.000Z | metainfo = """<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<application>
<name>LLAP</name>
<comment>LLAP is a daemon service that works with a cache and works on SQL constructs.</comment>
<version>%(version)s</version>
<exportedConfigs>None</exportedConfigs>
<exportGroups>
<exportGroup>
<name>Servers</name>
<exports>
<export>
<name>instances</name>
<value>${LLAP_HOST}:${site.global.listen_port}</value>
</export>
</exports>
</exportGroup>
</exportGroups>
<components>
<component>
<name>LLAP</name>
<category>MASTER</category>
<compExports>Servers-instances</compExports>
<commandScript>
<script>scripts/llap.py</script>
<scriptType>PYTHON</scriptType>
</commandScript>
</component>
</components>
<osSpecifics>
<osSpecific>
<osType>any</osType>
<packages>
<package>
<type>tarball</type>
<name>files/llap-%(version)s.tar.gz</name>
</package>
</packages>
</osSpecific>
</osSpecifics>
</application>
</metainfo>
"""
appConfig = """
{
"schema": "http://example.org/specification/v2.0.0",
"metadata": {
},
"global": {
"application.def": ".slider/package/LLAP/llap-%(version)s.zip",
"java_home": "%(java_home)s",
"site.global.app_user": "yarn",
"site.global.app_root": "${AGENT_WORK_ROOT}/app/install/",
"site.global.app_tmp_dir": "${AGENT_WORK_ROOT}/tmp/",
"site.global.app_logger": "%(daemon_logger)s",
"site.global.app_log_level": "%(daemon_loglevel)s",
"site.global.additional_cp": "%(hadoop_home)s",
"site.global.daemon_args": "%(daemon_args)s",
"site.global.library_path": "%(hadoop_home)s/lib/native",
"site.global.memory_val": "%(heap)d",
"site.global.pid_file": "${AGENT_WORK_ROOT}/app/run/llap-daemon.pid",
"internal.chaos.monkey.probability.amlaunchfailure": "0",
"internal.chaos.monkey.probability.containerfailure": "%(monkey_percentage)d",
"internal.chaos.monkey.interval.seconds": "%(monkey_interval)d",
"internal.chaos.monkey.enabled": "%(monkey_enabled)s"%(slider_appconfig_global_append)s
},
"components": {
"slider-appmaster": {
"jvm.heapsize": "%(slider_am_jvm_heapsize)dM",
"slider.hdfs.keytab.dir": "%(slider_keytab_dir)s",
"slider.am.login.keytab.name": "%(slider_keytab)s",
"slider.keytab.principal.name": "%(slider_principal)s"
}
}
}
"""
resources = """
{
"schema" : "http://example.org/specification/v2.0.0",
"metadata" : {
},
"global" : {
"yarn.log.include.patterns": ".*\\\\.done"
},
"components": {
"slider-appmaster": {
"yarn.memory": "%(slider.am.container.mb)d",
"yarn.component.instances": "1"
},
"LLAP": {
"yarn.role.priority": "1",
"yarn.component.instances": "%(instances)d",
"yarn.resource.normalization.enabled": "false",
"yarn.memory": "%(container.mb)d",
"yarn.component.placement.policy" : "%(placement)d"
}
}
}
"""
# placement policy "4" is a bit-mask
# only bit set is Slider PlacementPolicy.ANTI_AFFINITY_REQUIRED(4)
runner = """
#!/bin/bash -e
BASEDIR=$(dirname $0)
slider stop %(name)s --wait 10 || slider stop %(name)s --force --wait 30
slider destroy %(name)s --force || slider destroy %(name)s
slider install-package --name LLAP --package $BASEDIR/llap-%(version)s.zip --replacepkg
slider create %(name)s --resources $BASEDIR/resources.json --template $BASEDIR/appConfig.json %(queue.string)s
"""
| 32.639706 | 110 | 0.64519 | metainfo = """<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<application>
<name>LLAP</name>
<comment>LLAP is a daemon service that works with a cache and works on SQL constructs.</comment>
<version>%(version)s</version>
<exportedConfigs>None</exportedConfigs>
<exportGroups>
<exportGroup>
<name>Servers</name>
<exports>
<export>
<name>instances</name>
<value>${LLAP_HOST}:${site.global.listen_port}</value>
</export>
</exports>
</exportGroup>
</exportGroups>
<components>
<component>
<name>LLAP</name>
<category>MASTER</category>
<compExports>Servers-instances</compExports>
<commandScript>
<script>scripts/llap.py</script>
<scriptType>PYTHON</scriptType>
</commandScript>
</component>
</components>
<osSpecifics>
<osSpecific>
<osType>any</osType>
<packages>
<package>
<type>tarball</type>
<name>files/llap-%(version)s.tar.gz</name>
</package>
</packages>
</osSpecific>
</osSpecifics>
</application>
</metainfo>
"""
appConfig = """
{
"schema": "http://example.org/specification/v2.0.0",
"metadata": {
},
"global": {
"application.def": ".slider/package/LLAP/llap-%(version)s.zip",
"java_home": "%(java_home)s",
"site.global.app_user": "yarn",
"site.global.app_root": "${AGENT_WORK_ROOT}/app/install/",
"site.global.app_tmp_dir": "${AGENT_WORK_ROOT}/tmp/",
"site.global.app_logger": "%(daemon_logger)s",
"site.global.app_log_level": "%(daemon_loglevel)s",
"site.global.additional_cp": "%(hadoop_home)s",
"site.global.daemon_args": "%(daemon_args)s",
"site.global.library_path": "%(hadoop_home)s/lib/native",
"site.global.memory_val": "%(heap)d",
"site.global.pid_file": "${AGENT_WORK_ROOT}/app/run/llap-daemon.pid",
"internal.chaos.monkey.probability.amlaunchfailure": "0",
"internal.chaos.monkey.probability.containerfailure": "%(monkey_percentage)d",
"internal.chaos.monkey.interval.seconds": "%(monkey_interval)d",
"internal.chaos.monkey.enabled": "%(monkey_enabled)s"%(slider_appconfig_global_append)s
},
"components": {
"slider-appmaster": {
"jvm.heapsize": "%(slider_am_jvm_heapsize)dM",
"slider.hdfs.keytab.dir": "%(slider_keytab_dir)s",
"slider.am.login.keytab.name": "%(slider_keytab)s",
"slider.keytab.principal.name": "%(slider_principal)s"
}
}
}
"""
resources = """
{
"schema" : "http://example.org/specification/v2.0.0",
"metadata" : {
},
"global" : {
"yarn.log.include.patterns": ".*\\\\.done"
},
"components": {
"slider-appmaster": {
"yarn.memory": "%(slider.am.container.mb)d",
"yarn.component.instances": "1"
},
"LLAP": {
"yarn.role.priority": "1",
"yarn.component.instances": "%(instances)d",
"yarn.resource.normalization.enabled": "false",
"yarn.memory": "%(container.mb)d",
"yarn.component.placement.policy" : "%(placement)d"
}
}
}
"""
# placement policy "4" is a bit-mask
# only bit set is Slider PlacementPolicy.ANTI_AFFINITY_REQUIRED(4)
runner = """
#!/bin/bash -e
BASEDIR=$(dirname $0)
slider stop %(name)s --wait 10 || slider stop %(name)s --force --wait 30
slider destroy %(name)s --force || slider destroy %(name)s
slider install-package --name LLAP --package $BASEDIR/llap-%(version)s.zip --replacepkg
slider create %(name)s --resources $BASEDIR/resources.json --template $BASEDIR/appConfig.json %(queue.string)s
"""
| 0 | 0 | 0 |
462527f37af964ae83884faef922b78773288e6e | 1,017 | py | Python | tests/test_operations.py | WildCard65/validoot | 7a5e15498205c6da8bd09735808e8b4921d664ce | [
"MIT"
] | null | null | null | tests/test_operations.py | WildCard65/validoot | 7a5e15498205c6da8bd09735808e8b4921d664ce | [
"MIT"
] | null | null | null | tests/test_operations.py | WildCard65/validoot | 7a5e15498205c6da8bd09735808e8b4921d664ce | [
"MIT"
] | 1 | 2020-04-27T18:06:55.000Z | 2020-04-27T18:06:55.000Z | import pytest
from validoot.operations import And, Or
from validoot.exceptions import ValidationError
| 20.755102 | 55 | 0.639135 | import pytest
from validoot.operations import And, Or
from validoot.exceptions import ValidationError
def tt(v):
return True
def ff(v):
return False
def test_And():
assert And(tt, tt)(1)
assert not And(tt, ff)(1)
def test_And_bad_arg():
with pytest.raises(TypeError):
And(object(), tt)
def test_Or():
assert Or(ff, tt)(1)
assert not Or(ff, ff)(1)
def test_Or_bad_arg():
with pytest.raises(TypeError):
Or(object(), tt)
def test_And_And():
assert And(ff, tt)._and(tt).clauses == (ff, tt, tt)
def test_And_And_bad_arg():
with pytest.raises(TypeError):
And(ff, tt)._and(object()).clauses
def test_Or_Or():
assert Or(ff, tt)._or(tt).clauses == (ff, tt, tt)
def test_Or_Or_bad_arg():
with pytest.raises(TypeError):
Or(ff, tt)._or(object()).clauses
def test_And_Or():
assert And(ff, tt)._or(tt)(1)
assert not And(ff, tt)._or(ff)(1)
def test_Or_And():
assert Or(ff, tt)._and(tt)(1)
assert not Or(ff, tt)._and(ff)(1)
| 638 | 0 | 276 |
23e1ebdd60289e4d82ff1c4d62c75ea340b51248 | 4,262 | py | Python | plotting_scripts/plot_cov.py | grahamgower/island_thrush_scripts | 494be57780155979374776e3729d6c11e34cdbfc | [
"MIT"
] | null | null | null | plotting_scripts/plot_cov.py | grahamgower/island_thrush_scripts | 494be57780155979374776e3729d6c11e34cdbfc | [
"MIT"
] | null | null | null | plotting_scripts/plot_cov.py | grahamgower/island_thrush_scripts | 494be57780155979374776e3729d6c11e34cdbfc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import csv
import numpy as np
from sklearn import decomposition
import matplotlib
import matplotlib.cm
matplotlib.use("Agg") # don't try to use $DISPLAY
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
from circledist import circledist
import matplotlib_style
if __name__ == "__main__":
args = parse_args()
loc = parse_locations(args.csv_file)
indlist = parse_list(args.ind_file)
rmidx = [i for i, ind in enumerate(indlist) if ind not in loc]
for i in rmidx:
print(f"{indlist[i]} has no location")
lats = [loc[ind][0] for ind in indlist if ind in loc]
lons = [loc[ind][1] for ind in indlist if ind in loc]
ref_lat, ref_lon = 30, 120
dists = [
circledist(ref_lon, ref_lat, loc[ind][1], loc[ind][0])
for ind in indlist
if ind in loc
]
n_pcs = 6
C = np.loadtxt(args.cov_file)
pca = decomposition.PCA(n_components=n_pcs)
pc = pca.fit_transform(C)
pdf = PdfPages(args.out_file)
fig_w, fig_h = plt.figaspect(9.0 / 16.0)
cmap = matplotlib.cm.get_cmap("plasma")
distnorm = matplotlib.colors.Normalize(vmin=np.min(dists), vmax=np.max(dists))
for pc_i in range(n_pcs - 1):
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
x = np.delete(pc[:, pc_i], rmidx)
y = np.delete(pc[:, pc_i + 1], rmidx)
ax1.scatter(
x,
y,
s=50,
marker="o",
alpha=1,
lw=1,
# edgecolor=cmap(latnorm(lats)),
# edgecolor=cmap(lonnorm(lons)),
facecolor=cmap(distnorm(dists)),
# facecolor="none",
)
for i in rmidx:
ax1.scatter(pc[i, pc_i], pc[i, pc_i + 1], s=50, marker="x", c="black")
ax1.set_xlabel(f"PC{pc_i+1}")
ax1.set_ylabel(f"PC{pc_i+2}")
cb = fig1.colorbar(matplotlib.cm.ScalarMappable(norm=distnorm, cmap=cmap))
cb.ax.get_yaxis().labelpad = 15
cb.ax.set_ylabel("Distance from 120$^\circ$E, 30$^\circ$N", rotation=270)
fig1.tight_layout()
pdf.savefig(figure=fig1)
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
ax1.bar(list(range(1, n_pcs + 1)), pca.explained_variance_)
ax1.set_xlabel("Principal component")
ax1.set_ylabel("Percentage variance explained")
ax1.set_title(
"Scree plot (total variance explained: {:.2f}\%)".format(
np.sum(pca.explained_variance_)
)
)
fig1.tight_layout()
pdf.savefig(figure=fig1)
pdf.close()
| 28.413333 | 87 | 0.587518 | #!/usr/bin/env python3
import csv
import numpy as np
from sklearn import decomposition
import matplotlib
import matplotlib.cm
matplotlib.use("Agg") # don't try to use $DISPLAY
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
from circledist import circledist
import matplotlib_style
def parse_locations(fn):
loc = {}
with open(fn) as f:
reader = csv.DictReader(f)
for row in reader:
# taxid = row["Species"]
indid = row["Museum Number"]
try:
lat = float(row["Lat"])
except ValueError:
lat = None
try:
lon = float(row["Long"])
except ValueError:
lon = None
# Adjust the lat/lon to avoid negative numbers and give sensible
# distances between individuals.
if lat is not None:
lat = lat + 180
if lon is not None:
lon = lon if lon > 0 else 360 + lon
loc[indid] = (lat, lon)
return loc
def parse_list(fn):
l = []
with open(fn) as f:
for line in f:
l.append(line.rstrip())
return l
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="Plot PCA from covariance file.")
parser.add_argument(
"cov_file", metavar="file.cov", help="Covariance matrix, as output by PCAngsd."
)
parser.add_argument(
"ind_file",
metavar="ind.txt",
help="Order of individuals in the cov file, with IDs matching the csv.",
)
parser.add_argument(
"csv_file", metavar="file.csv", help="Metadata, with IDs and Lat/Lon columns."
)
parser.add_argument("out_file", metavar="out.pdf", help="output plot")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
loc = parse_locations(args.csv_file)
indlist = parse_list(args.ind_file)
rmidx = [i for i, ind in enumerate(indlist) if ind not in loc]
for i in rmidx:
print(f"{indlist[i]} has no location")
lats = [loc[ind][0] for ind in indlist if ind in loc]
lons = [loc[ind][1] for ind in indlist if ind in loc]
ref_lat, ref_lon = 30, 120
dists = [
circledist(ref_lon, ref_lat, loc[ind][1], loc[ind][0])
for ind in indlist
if ind in loc
]
n_pcs = 6
C = np.loadtxt(args.cov_file)
pca = decomposition.PCA(n_components=n_pcs)
pc = pca.fit_transform(C)
pdf = PdfPages(args.out_file)
fig_w, fig_h = plt.figaspect(9.0 / 16.0)
cmap = matplotlib.cm.get_cmap("plasma")
distnorm = matplotlib.colors.Normalize(vmin=np.min(dists), vmax=np.max(dists))
for pc_i in range(n_pcs - 1):
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
x = np.delete(pc[:, pc_i], rmidx)
y = np.delete(pc[:, pc_i + 1], rmidx)
ax1.scatter(
x,
y,
s=50,
marker="o",
alpha=1,
lw=1,
# edgecolor=cmap(latnorm(lats)),
# edgecolor=cmap(lonnorm(lons)),
facecolor=cmap(distnorm(dists)),
# facecolor="none",
)
for i in rmidx:
ax1.scatter(pc[i, pc_i], pc[i, pc_i + 1], s=50, marker="x", c="black")
ax1.set_xlabel(f"PC{pc_i+1}")
ax1.set_ylabel(f"PC{pc_i+2}")
cb = fig1.colorbar(matplotlib.cm.ScalarMappable(norm=distnorm, cmap=cmap))
cb.ax.get_yaxis().labelpad = 15
cb.ax.set_ylabel("Distance from 120$^\circ$E, 30$^\circ$N", rotation=270)
fig1.tight_layout()
pdf.savefig(figure=fig1)
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
ax1.bar(list(range(1, n_pcs + 1)), pca.explained_variance_)
ax1.set_xlabel("Principal component")
ax1.set_ylabel("Percentage variance explained")
ax1.set_title(
"Scree plot (total variance explained: {:.2f}\%)".format(
np.sum(pca.explained_variance_)
)
)
fig1.tight_layout()
pdf.savefig(figure=fig1)
pdf.close()
| 1,441 | 0 | 69 |
9de98025d2b5a2eb00b2e35ba0719f41819be4e8 | 2,900 | py | Python | HarmoAIServer/datastruc/config.py | Vector-7/HarmoAIComposorModule | 51ad553cf3f730e6038bf75d4f1ccf9a1ebd6264 | [
"Apache-2.0"
] | null | null | null | HarmoAIServer/datastruc/config.py | Vector-7/HarmoAIComposorModule | 51ad553cf3f730e6038bf75d4f1ccf9a1ebd6264 | [
"Apache-2.0"
] | null | null | null | HarmoAIServer/datastruc/config.py | Vector-7/HarmoAIComposorModule | 51ad553cf3f730e6038bf75d4f1ccf9a1ebd6264 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright SweetCase Project, Re_Coma(Ha Jeong Hyun). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import os.path
import json
# Config Name Labels
"""
서버 설정 파일로 필요시
추가 가능
"""
class ConfigNameLabels(Enum):
"""
서버를 설정하기 위한 Config.json의 key들
HOST: Redis 서버의 주소
PORT: Redis Server를 접속하기 위한 포트
PSWD: Redis Server를 접속하기 위한 패스워드
MAX_QUEUE_SIZE: 각 시스템의 큐의 최대 크기를 설정
MAX_NOTE_SIZE: AI 시스템이 최대로 작곡할 수 잇는 노트의 갯수(TODO 연구 필요)
SERIAL: 연결관련 확인용 인증코드
MODELS: AI 모델이 저장되어 있는 위치들(장르에 따라 다름)
TMP_DIR = AI 작곡을 마치고 클라이언트로 보내기 전에 임시로 저장되는 파일의 위치
"""
HOST = "host"
PORT = "port"
PSWD = "pswd"
MAX_QUEUE_SIZE = "max_queue_size"
MAX_NOTE_SIZE = "max_note_size"
USE_GPU_VALUE = "use_gpu_value"
SERIAL = "serial"
MODELS = "models"
TMP_DIR = "tmp_dir"
| 34.52381 | 86 | 0.631379 | # -*- coding: utf-8 -*-
# Copyright SweetCase Project, Re_Coma(Ha Jeong Hyun). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import os.path
import json
# Config Name Labels
"""
서버 설정 파일로 필요시
추가 가능
"""
class ConfigNameLabels(Enum):
"""
서버를 설정하기 위한 Config.json의 key들
HOST: Redis 서버의 주소
PORT: Redis Server를 접속하기 위한 포트
PSWD: Redis Server를 접속하기 위한 패스워드
MAX_QUEUE_SIZE: 각 시스템의 큐의 최대 크기를 설정
MAX_NOTE_SIZE: AI 시스템이 최대로 작곡할 수 잇는 노트의 갯수(TODO 연구 필요)
SERIAL: 연결관련 확인용 인증코드
MODELS: AI 모델이 저장되어 있는 위치들(장르에 따라 다름)
TMP_DIR = AI 작곡을 마치고 클라이언트로 보내기 전에 임시로 저장되는 파일의 위치
"""
HOST = "host"
PORT = "port"
PSWD = "pswd"
MAX_QUEUE_SIZE = "max_queue_size"
MAX_NOTE_SIZE = "max_note_size"
USE_GPU_VALUE = "use_gpu_value"
SERIAL = "serial"
MODELS = "models"
TMP_DIR = "tmp_dir"
class Config:
def __init__(self, configRoot):
#check root type
if isinstance(configRoot, str) is False:
raise TypeError("Config Root must be string")
# check file exist
if os.path.isfile(configRoot) is False:
raise FileNotFoundError("config file is not exist")
# json set
try:
with open(configRoot, 'r') as configFile:
jsonDataSet = json.load(configFile)
self.host = jsonDataSet[ConfigNameLabels.HOST.value]
self.port = jsonDataSet[ConfigNameLabels.PORT.value]
self.pswd = jsonDataSet[ConfigNameLabels.PSWD.value]
self.maxQueueSize = jsonDataSet[ConfigNameLabels.MAX_QUEUE_SIZE.value]
self.maxNoteSize = jsonDataSet[ConfigNameLabels.MAX_NOTE_SIZE.value]
self.useGpuValue = jsonDataSet[ConfigNameLabels.USE_GPU_VALUE.value]
self.serial = jsonDataSet[ConfigNameLabels.SERIAL.value]
self.tmpDir = jsonDataSet[ConfigNameLabels.TMP_DIR.value]
self.DLMap = {}
i = 0
DLKeys = jsonDataSet[ConfigNameLabels.MODELS.value].keys()
# 모델 파일 루트 갖고오기
for key in DLKeys:
self.DLMap[i+1] = jsonDataSet[ConfigNameLabels.MODELS.value][key]
i += 1
except Exception as e:
raise e
self.fileRooot = configRoot
| 1,452 | -8 | 49 |
2bab6367564f55340a0966cedd212054c2d4e201 | 221 | py | Python | enums.py | Tomvictor/category_tree | 4f15f1b38677d364c6e67029172e2b6dcd32f988 | [
"MIT"
] | null | null | null | enums.py | Tomvictor/category_tree | 4f15f1b38677d364c6e67029172e2b6dcd32f988 | [
"MIT"
] | null | null | null | enums.py | Tomvictor/category_tree | 4f15f1b38677d364c6e67029172e2b6dcd32f988 | [
"MIT"
] | null | null | null | import enum
| 13 | 32 | 0.59276 | import enum
class CategoryEnum(enum.Enum):
GROUP = 0
USER = 1
OFFICE = 2
TASK = 3
SUB_TASK = 4
FIXTURE = 5
PROJECT = 6
class TimeFactorEnum(enum.Enum):
EFFECTIVE = 0
CUMULATIVE = 1
| 0 | 161 | 46 |
73111ae41f7f2d2e5947cf711b523b93998217b9 | 33,176 | py | Python | onmyoji_win.py | AlanRuijia/onmyoji | 74f6de5f41a7800b728f23b7a67fd6a2d9eb2cc7 | [
"MIT"
] | null | null | null | onmyoji_win.py | AlanRuijia/onmyoji | 74f6de5f41a7800b728f23b7a67fd6a2d9eb2cc7 | [
"MIT"
] | null | null | null | onmyoji_win.py | AlanRuijia/onmyoji | 74f6de5f41a7800b728f23b7a67fd6a2d9eb2cc7 | [
"MIT"
] | null | null | null | # -*-coding:utf-8-*-
import time
import datetime
import os
import random
import shelve
import threading
from queue import Queue
import win32api, win32gui, win32con, win32com.client
from ctypes import *
from PIL import ImageGrab, Image as PLI_Image, ImageTk
from tkinter import *
from tkinter import ttk
import tkinter.messagebox as messagebox
from tkinter.scrolledtext import ScrolledText
app = Application()
# 隐藏console窗口
try:
test = sys.argv[1]
except IndexError:
test = False
if test == 'test':
pass
else:
whnd = windll.kernel32.GetConsoleWindow()
if whnd:
windll.user32.ShowWindow(whnd, 0)
windll.kernel32.CloseHandle(whnd)
app.master.title('就你破势多')
app.init_window_place(app.master, 1.1, 4)
app.mainloop()
| 40.458537 | 111 | 0.561219 | # -*-coding:utf-8-*-
import time
import datetime
import os
import random
import shelve
import threading
from queue import Queue
import win32api, win32gui, win32con, win32com.client
from ctypes import *
from PIL import ImageGrab, Image as PLI_Image, ImageTk
from tkinter import *
from tkinter import ttk
import tkinter.messagebox as messagebox
from tkinter.scrolledtext import ScrolledText
class GameController:
def __init__(self, hwnd, scaling):
# 获取游戏窗口坐标
self.hwnd = hwnd
self.left, self.top, self.right, self.bottom = win32gui.GetWindowRect(self.hwnd)
self.client_rect = win32gui.GetClientRect(self.hwnd)
self.width = self.client_rect[2]
self.height = self.client_rect[3]
# 获取游戏画面坐标
self.left, self.top = win32gui.ClientToScreen(self.hwnd, (0, 0))
self.right, self.bottom = win32gui.ClientToScreen(self.hwnd, (self.width, self.height))
# 缩放后的游戏窗口坐标
self.ltrb = list(map(lambda x: x * scaling, [self.left, self.top, self.right, self.bottom]))
self.scaling_width = self.width * scaling
self.scaling_height = self.height * scaling
# 挑战按钮坐标
self.chllg_btn = (round(self.left + self.width * 0.695),
round(self.top + self.height * 0.67),
round(self.left + self.width * 0.785),
round(self.top + self.height * 0.73))
# 开始战斗按钮坐标
self.fght_btn = (round(self.left + self.width * 0.75),
round(self.top + self.height * 0.82),
round(self.left + self.width * 0.87),
round(self.top + self.height * 0.88))
# 退出战斗按钮采样坐标
self.exit_btn = (round(self.ltrb[0] + self.scaling_width * 0.014),
round(self.ltrb[1] + self.scaling_height * 0.0245),
round(self.ltrb[0] + self.scaling_width * 0.0415),
round(self.ltrb[1] + self.scaling_height * 0.074))
# 退出战斗按钮hash
self.exit_btn_hash = '1ff83ffc3ffe3ffe007e001f001f019f079e1ffe7fff7ffe1ff8078001800000'
# 结算判定区域采样坐标
self.settle_area = (round(self.ltrb[0] + self.scaling_width * 0.42),
round(self.ltrb[1] + self.scaling_height * 0.82),
round(self.ltrb[0] + self.scaling_width * 0.58),
round(self.ltrb[1] + self.scaling_height * 0.86))
# 结算判定区域hash
self.settle_area_hash = '4f3f672f600fa01fb03ff03ff07df874f171d170c170c970c320c020c000c000'
# 单刷界面判定采样坐标
self.single_intf = (round(self.ltrb[0] + self.scaling_width * 0.45),
round(self.ltrb[1] + self.scaling_height * 0.1),
round(self.ltrb[0] + self.scaling_width * 0.58),
round(self.ltrb[1] + self.scaling_height * 0.18))
self.single_hash = '000000000000000000186e1836387ebc7ebc7eb86ed897fc0000ffffffffffff'
# 组队界面判定采样坐标#
self.form_team_intf = (round(self.ltrb[0] + self.scaling_width * 0.12),
round(self.ltrb[1] + self.scaling_height * 0.8),
round(self.ltrb[0] + self.scaling_width * 0.24),
round(self.ltrb[1] + self.scaling_height * 0.88))
# 组队界面判定hash
self.form_team_hash = '7ffeffffffffffffcd33cd33c823c923cd93c901e577ffffffff7ffe00000000'
# 组队栏位1采样坐标
self.form_team1 = (round(self.ltrb[0] + self.scaling_width * 0.2),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.28),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 组队栏位2采样坐标
self.form_team2 = (round(self.ltrb[0] + self.scaling_width * 0.46),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.54),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 组队栏位3采样坐标
self.form_team3 = (round(self.ltrb[0] + self.scaling_width * 0.76),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.84),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 点击屏幕继续字样采样坐标
self.notice_area = (round(self.ltrb[2] * 0.40),
round(self.ltrb[3] * 0.90),
round(self.ltrb[2] * 0.60),
round(self.ltrb[3] * 0.97))
# 结算点击区域坐标
self.blank_area = (round(self.left + self.width * 0.86),
round(self.top + self.height * 0.23),
round(self.left + self.width * 0.95),
round(self.top + self.height * 0.7))
# 组队栏位为空时hash
self.form_team_blank_hash = 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff'
# 悬赏界面采样坐标
self.offer_intf = (round(self.ltrb[0] + self.scaling_width * 0.4),
round(self.ltrb[1] + self.scaling_height * 0.2),
round(self.ltrb[0] + self.scaling_width * 0.6),
round(self.ltrb[1] + self.scaling_height * 0.28))
# 悬赏界面hash
self.offer_hash = 'ffffffffffff3fff35fde004200020000004040420100064247037f7ffffffff'
# 悬赏接受按钮坐标
self.accept = (round(self.left + self.width * 0.66),
round(self.top + self.height * 0.6))
# 悬赏拒绝按钮坐标
self.denied = (round(self.left + self.width * 0.66),
round(self.top + self.height * 0.74))
# 状态初始化
self._running = 1
@staticmethod
def click_left_cur(counts=1):
for o in range(counts):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP | win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
time.sleep(0.1)
time.sleep(0.1*random.random())
@staticmethod
def click_right_cur():
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP | win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)
@staticmethod
def move_curpos(x, y):
time.sleep(0.5*random.random())
windll.user32.SetCursorPos(x, y)
time.sleep(0.5*random.random())
@staticmethod
def get_curpos():
return win32gui.GetCursorPos()
@staticmethod
def get_hash(img):
img = img.resize((16, 16), PLI_Image.ANTIALIAS).convert('L')
avg = sum(list(img.getdata())) / 256 # 计算像素平均值
s = ''.join(map(lambda x: '0' if x < avg else '1', img.getdata())) # 每个像素进行比对,大于avg为1,反之为0
return ''.join(map(lambda j: '%x' % int(s[j:j + 4], 2), range(0, 256, 4)))
@staticmethod
def hamming(hash1, hash2, n=20):
result = False
assert len(hash1) == len(hash2)
# print(sum(ch1 != ch2 for ch1, ch2 in zip(hash1, hash2)))
if sum(ch1 != ch2 for ch1, ch2 in zip(hash1, hash2)) <= n:
result = True
return result
def form_team_phase(self, mode, fight_num, queue):
if mode == '单刷':
# 移动到挑战按钮并点击 每次移动在按钮范围内加入随机坐标位移
xrandom = int(random.uniform(0, self.chllg_btn[2] - self.chllg_btn[0]))
yrandom = int(random.uniform(0, self.chllg_btn[3] - self.chllg_btn[1]))
self.move_curpos(self.chllg_btn[0] + xrandom, self.chllg_btn[1] + yrandom)
time.sleep(0.5)
self.click_left_cur()
return
elif mode == '司机':
# 检测是否进入组队界面
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.form_team_intf)
if self.hamming(self.get_hash(catch_img), self.form_team_hash, 30):
break
time.sleep(0.5)
time.sleep(0.3*random.random())
elif self._running == 0:
return
# 检测队伍人数,符合预期再点开始战斗
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
num = 0
for i in [self.form_team1, self.form_team2, self.form_team3]:
catch_img = ImageGrab.grab(i)
# self.get_hash(catch_img)
if not self.hamming(self.get_hash(catch_img), self.form_team_blank_hash, 10):
num = num + 1
if num == fight_num:
break
time.sleep(0.5)
time.sleep(0.3*random.random())
elif self._running == 0:
return
# 移动到开始战斗按钮并点击 每次移动在按钮范围内加入随机坐标位移
xrandom = int(random.uniform(0, self.fght_btn[2] - self.fght_btn[0]))
yrandom = int(random.uniform(0, self.fght_btn[3] - self.fght_btn[1]))
self.move_curpos(self.fght_btn[0] + xrandom, self.fght_btn[1] + yrandom)
time.sleep(0.5)
time.sleep(0.3*random.random())
self.click_left_cur()
elif mode == '乘客':
return
def wait_fight_finish_phase(self, clear_time, queue):
t = 0
while t < clear_time:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
time.sleep(1)
t = t + 1
# print(t)
elif self._running == 0:
break
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.exit_btn)
# catch_img.save('fight.jpg', 'jpeg')
# 当退出战斗按钮消失时,视为战斗结束
if self.hamming(self.get_hash(catch_img), self.exit_btn_hash, 30):
pass
else:
break
elif self._running == 0:
return
time.sleep(0.5)
def settle_phase(self, queue):
for xx in range(0, 10):
if not queue.empty():
self._running = queue.get()
if self._running == 1:
# 当镜头旋转结束,出现结算达摩,则视为进入结算界面
catch_img = ImageGrab.grab(self.settle_area)
# catch_img.save('%s.jpg' % xx, 'jpeg')
if self.hamming(self.get_hash(catch_img), self.settle_area_hash, 20):
break
else:
# 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次
xrandom = int(random.uniform(0, self.blank_area[2] - self.blank_area[0]))
yrandom = int(random.uniform(0, self.blank_area[3] - self.blank_area[1]))
self.move_curpos(self.blank_area[0] + xrandom, self.blank_area[1] + yrandom)
self.click_left_cur(int(random.uniform(2, 4)))
elif self._running == 0:
break
time.sleep(0.5)
for xx in range(0, 10):
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.settle_area)
# 当结算达摩消失时,视为结算结束
if not self.hamming(self.get_hash(catch_img), self.settle_area_hash, 20):
break
else:
# 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次,直到结算结束
xrandom = int(random.uniform(0, self.blank_area[2] - self.blank_area[0]))
yrandom = int(random.uniform(0, self.blank_area[3] - self.blank_area[1]))
self.move_curpos(self.blank_area[0] + xrandom, self.blank_area[1] + yrandom)
time.sleep(random.random())
self.click_left_cur(int(random.uniform(2, 4)))
elif self._running == 0:
break
time.sleep(0.5)
time.sleep(0.3*random.random())
def check_offer(self, offer_mode, queue):
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.offer_intf)
if self.hamming(self.get_hash(catch_img), self.offer_hash, 30):
if offer_mode == "接受":
self.move_curpos(self.accept[0], self.accept[1])
elif offer_mode == "拒绝":
self.move_curpos(self.denied[0], self.denied[1])
self.click_left_cur()
time.sleep(1.3)
elif self._running == 0:
return
class Application(Frame):
def __init__(self, master=None):
self.warning = '【封号防止】\n' + \
'请尽量在自己的日常刷魂时间使用\n' + \
'请不要长时间连续使用,任何使你看起来明显违背人类正常作息规律的行为,很容易会被鬼使黑盯上\n' + \
'当你离开了常在城市,请不要使用,这会被认为是找了代练\n' + \
'点到为止,贪婪是万恶之源\n'
self.label = r'阴阳师-网易游戏'
self.hwnd = None
self.shell = None
if not self.info_get():
self.scaling = 1
self.clear_time = 35
self.fight = None
self.timing_value = None
# 控件初始化
Frame.__init__(self, master)
self.pack()
self.frame1 = Frame(self)
self.frame1.pack()
self.frame2 = Frame(self)
self.frame2.pack()
self.label_scaling = Label(self.frame1)
self.var_scaling = StringVar(self.frame1)
self.entry_scaling = Entry(self.frame1)
self.button_scaling_explain = Button(self.frame1)
self.label_mode = Label(self.frame1)
self.var_mode = StringVar(self.frame1)
self.listbox_mode = ttk.Combobox(self.frame1)
self.button_mode_explain = Button(self.frame1)
self.label_member = Label(self.frame1)
self.var_member = IntVar()
self.radio1 = Radiobutton(self.frame1)
self.radio2 = Radiobutton(self.frame1)
self.label_clear_time = Label(self.frame1)
self.var_clear_time = StringVar(self.frame1)
self.entry_clear_time = Entry(self.frame1)
self.button_clear_time_explain = Button(self.frame1)
self.label_offer = Label(self.frame1)
self.var_offer_mode = StringVar(self.frame1)
self.listbox_offer_mode = ttk.Combobox(self.frame1)
self.label_timing_mode = Label(self.frame1)
self.var_timing_mode = StringVar(self.frame1)
self.listbox_timing_mode = ttk.Combobox(self.frame1)
self.var_timing_value = StringVar(self.frame1)
self.entry_timing_value = Entry(self.frame1)
self.entry_test = Entry(self.frame1)
self.test_btn = Button(self.frame1)
self.start_ctn = Button(self.frame2)
self.stop_ctn = Button(self.frame2)
self.info_box = ScrolledText(self.frame2)
self.queue = Queue(maxsize=1)
self._running = 1
self.create_main()
@staticmethod
def check_hwnd(label):
# 获取游戏窗口句柄
hwnd = win32gui.FindWindow(None, label)
if hwnd:
return hwnd
else:
print('游戏没有运行')
return False
@staticmethod
def init_window_place(root, x, y):
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
root.resizable(False, False)
root.update_idletasks()
root.deiconify()
width = root.winfo_width()
height = root.winfo_height()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / x, (screenheight - height) / y)
root.geometry(size)
def jump_window(self):
# 跳转到游戏窗口
win32gui.SetForegroundWindow(self.hwnd)
win32gui.PostMessage(self.hwnd, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)
def get_scaling(self):
var = self.entry_scaling.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='缩放倍率只能为数字')
return False
if var > 2:
messagebox.showinfo(title='提示', message='缩放倍率过高')
return False
return var
def get_clear_time(self):
var = self.var_clear_time.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='平均通关时间只能为数字')
return False
if var <= 5:
messagebox.showinfo(title='提示', message='平均通关时间不能小于5')
return False
return var
def get_timimg(self):
if self.listbox_timing_mode.get() == '无':
return True
var = self.var_timing_value.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='预定结束只能填入数字')
return False
if var < 1:
messagebox.showinfo(title='提示', message='数字过小,无法执行')
return False
return var
@staticmethod
def time_format(second):
try:
second = int(second)
except ValueError:
return second
if second > 60:
m, s = divmod(second, 60)
h, m = divmod(m, 60)
return ':'.join((str(h).zfill(2), str(m).zfill(2), str(s).zfill(2)))
else:
return second
def info_get(self):
try:
with shelve.open('mysetting.db') as data:
setting_data = data['setting']
self.scaling = setting_data['scaling']
self.clear_time = setting_data['clear_time']
except KeyError:
return False
return True
def info_save(self):
with shelve.open('mysetting.db') as data:
setting_data = dict()
setting_data['scaling'] = self.var_scaling.get()
setting_data['clear_time'] = self.var_clear_time.get()
data['setting'] = setting_data
def turn_radio_on(self, *args):
type(args)
var = self.listbox_mode.get()
if var == '司机':
self.radio1.configure(state='active')
self.radio2.configure(state='active')
else:
self.radio1.configure(state='disabled')
self.radio2.configure(state='disabled')
def turn_entry_on(self, *args):
type(args)
var = self.listbox_timing_mode.get()
if var == '定时[分钟]' or var == '场数':
self.entry_timing_value.configure(state='normal')
else:
self.entry_timing_value.configure(state='disabled')
def fight_start(self):
self.scaling = self.get_scaling()
if not self.scaling:
return False
self.clear_time = self.get_clear_time()
if not self.clear_time:
return False
self.timing_value = self.get_timimg()
if not self.timing_value:
return False
self.info_save()
# 获取游戏窗口句柄
self.hwnd = self.check_hwnd(self.label)
if not self.hwnd:
messagebox.showinfo(title='提示', message='游戏没有运行')
return False
self.shell = win32com.client.Dispatch("WScript.Shell")
# self.shell.SendKeys('%')
self.jump_window()
time.sleep(0.5)
self.fight = GameController(self.hwnd, self.scaling)
thread1 = threading.Thread(target=self.fight_thread, name='fight_thread')
thread2 = threading.Thread(target=self.offer_thread, name='offer_thread')
# 将线程状态、队列内容置为1
self._running = 1
if self.queue.empty():
self.queue.put(1)
else:
self.queue.get()
self.queue.put(1)
self.start_ctn.configure(state='disabled')
self.stop_ctn.configure(state='active')
thread1.start()
thread2.start()
def fight_thread(self):
self.jump_window()
if not self.queue.empty():
self.queue.get()
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(self.warning) + '\n', 'RED')
self.info_box.tag_config('RED', foreground='red')
var = '[%s]挂机开始' % datetime.datetime.now().strftime("%H:%M:%S")
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
rounds = 0
total_time = 0
beginning_timg = time.clock()
while True:
if self._running == 1:
fight_start_time = time.clock()
self.fight.form_team_phase(self.listbox_mode.get(), self.var_member.get(), self.queue)
self.fight.wait_fight_finish_phase(self.clear_time, self.queue)
self.jump_window()
self.fight.settle_phase(self.queue)
if self._running == 1:
fight_end_time = time.clock()
fight_time = fight_end_time - fight_start_time
# time.sleep(0.5)
rounds = rounds + 1
total_time = total_time + fight_time
elapsed_time = fight_end_time - beginning_timg
var = '第 %s 场 耗时:%s 共计:%s' % \
(rounds, self.time_format(fight_time), self.time_format(elapsed_time))
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
# 检查是否到达预定结束场数或时间
if (self.listbox_timing_mode.get() == '场数' and rounds >= self.timing_value) or \
(self.listbox_timing_mode.get() == '定时[分钟]' and elapsed_time / 60 >= self.timing_value):
win32gui.PostMessage(self.hwnd, win32con.WM_CLOSE, 0, 0)
self.fight_stop()
var = '已到达预定目标,游戏窗口已关闭。下线15分钟后buff自动关闭'
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
time.sleep(random.uniform(1, 2))
elif self._running == 0:
return
def fight_stop(self):
# 将线程状态、队列内容置为0
self._running = 0
self.queue.put(0)
self.start_ctn.configure(state='active')
self.stop_ctn.configure(state='disabled')
var = '[%s]挂机结束。记得关御魂buff' % datetime.datetime.now().strftime("%H:%M:%S")
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
def offer_thread(self):
while True:
if self._running == 1:
self.fight.check_offer(self.listbox_offer_mode.get(), self.queue)
elif self._running == 0:
return
@staticmethod
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def what_is_scaling_window(self):
what_is_scaling = Toplevel(self)
what_is_scaling.title('缩放倍率 - 不能自动获取,技术就是这么菜,不服憋着_(:3」∠)_')
frame1 = Frame(what_is_scaling)
frame1.pack()
frame2 = Frame(what_is_scaling)
frame2.pack()
title = Label(frame1)
title['text'] = '\n【 缩放倍率 】'
title.pack()
desc1 = Message(frame1)
desc1['width'] = 600
desc1['text'] = '\n缩放倍率是指Windows系统在不改变分辨率的情况下,将窗口和图标放大以达到更加舒适的显示效果的功能\n' + \
'\n在某些分辨率下,Windows会自动设置一个超过100%的倍率。请确定自己系统当前的缩放倍率设置,并填入缩放倍率一栏中\n' + \
'\n不正确的缩放倍率设置,会导致坐标计算不准\n' + \
'\n若设置的缩放倍率是100%,则填入1,若是125%,则填1.25,依次类推\n'
desc1.pack()
label_win10 = Label(frame2)
label_win10['text'] = 'Windows 10'
label_win10.grid(row=0, column=0)
label_win7 = Label(frame2)
label_win7['text'] = 'Windows 7'
label_win7.grid(row=0, column=1)
ipath = self.resource_path('image/win10.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img_win10 = Label(frame2, image=render)
img_win10.image = render
img_win10.grid(row=1, column=0)
ipath = self.resource_path('image/win7.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img_win7 = Label(frame2, image=render)
img_win7.image = render
img_win7.grid(row=1, column=1)
self.init_window_place(what_is_scaling, 1.3, 3)
def when_click_start_window(self):
when_click_start = Toplevel(self)
when_click_start.title('模式说明')
var = self.listbox_mode.get()
if var == '单刷':
title = Label(when_click_start)
title['text'] = '\n【 单刷模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n请把游戏调整至如图所示界面,再点START\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/single.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.7), load.size)))
render = ImageTk.PhotoImage(load)
img = Label(when_click_start, image=render)
img.image = render
img.pack()
elif var == '乘客':
title = Label(when_click_start)
title['text'] = '\n【 乘客模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n建议接受了司机的默认邀请,再点START\n' + \
'因为我不会在战斗里帮你点开始...不服憋着\n_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/passenger_accept.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.7), load.size)))
render = ImageTk.PhotoImage(load)
img = Label(when_click_start, image=render)
img.image = render
img.pack()
elif var == '司机':
title = Label(when_click_start)
title['text'] = '\n【 司机模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n建议对乘客发出默认邀请,回到组队界面再点START\n' + \
'因为自动发出邀请这个功能没写...不服憋着\n_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/driver_invite.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img1 = Label(when_click_start, image=render)
img1.image = render
img1.pack()
ipath = self.resource_path('image/driver_form.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img2 = Label(when_click_start, image=render)
img2.image = render
img2.pack()
self.init_window_place(when_click_start, 1.3, 3)
def what_is_clear_time(self):
what_is_clear = Toplevel(self)
what_is_clear.title('平均通关时间说明')
title = Label(what_is_clear)
title['text'] = '\n【 平均通关时间 】'
title.pack()
desc = Message(what_is_clear)
desc['text'] = '\n平均通关时间是指在游戏中,从按下开始战斗到进入结算奖励界面所经过的时间(秒)\n' + \
'\n程序会在经过指定的时间后,再开始检测游戏画面是否进入了结算界面\n' + \
'\n如果设置一个较短的时间也可以,不过设置一个合理的时间,能节省你CPU资源\n(其实也没占多少_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
self.init_window_place(what_is_clear, 1.3, 3)
def create_main(self):
self.label_scaling['text'] = '缩放倍率'
self.var_scaling.set(self.scaling)
self.entry_scaling['textvariable'] = self.var_scaling
self.label_scaling.grid(row=0, column=0, sticky='E')
self.entry_scaling.grid(row=0, column=1, sticky='W', columnspan=2)
self.button_scaling_explain['text'] = '?'
self.button_scaling_explain['command'] = self.what_is_scaling_window
self.button_scaling_explain['relief'] = 'flat'
self.button_scaling_explain.grid(row=0, column=2, sticky='E')
self.label_mode['text'] = '模式'
self.var_mode.set('单刷')
self.listbox_mode['textvariable'] = self.var_mode
self.listbox_mode['width'] = 10
self.listbox_mode['values'] = ["单刷", "乘客", "司机"]
self.listbox_mode.bind("<<ComboboxSelected>>", self.turn_radio_on)
self.label_mode.grid(row=1, column=0, sticky='E')
self.listbox_mode.grid(row=1, column=1, sticky='W')
self.button_mode_explain['text'] = '?'
self.button_mode_explain['command'] = self.when_click_start_window
self.button_mode_explain['relief'] = 'flat'
self.button_mode_explain.grid(row=1, column=2, sticky='W')
self.var_member.set(2)
self.label_member['text'] = '车队人数'
self.label_member.grid(row=2, column=0, sticky='E')
self.radio1['text'] = '2人'
self.radio1['variable'] = self.var_member
self.radio1['value'] = 2
# self.radio1['command'] = self.test_val3
self.radio1.grid(row=2, column=1, sticky='W')
self.radio1.configure(state='disabled')
self.radio2['text'] = '3人'
self.radio2['variable'] = self.var_member
self.radio2['value'] = 3
# self.radio2['command'] = self.test_val3
self.radio2.grid(row=2, column=2, sticky='W')
self.radio2.configure(state='disabled')
self.label_clear_time['text'] = '平均通关时间'
self.var_clear_time.set(self.clear_time)
self.entry_clear_time['textvariable'] = self.var_clear_time
self.label_clear_time.grid(row=3, column=0, sticky='E')
self.entry_clear_time.grid(row=3, column=1, sticky='W', columnspan=2)
self.button_clear_time_explain['text'] = '?'
self.button_clear_time_explain['command'] = self.what_is_clear_time
self.button_clear_time_explain['relief'] = 'flat'
self.button_clear_time_explain.grid(row=3, column=2, sticky='E')
self.label_offer['text'] = '好友发来悬赏'
self.var_offer_mode.set('接受')
self.listbox_offer_mode['textvariable'] = self.var_offer_mode
self.listbox_offer_mode['width'] = 10
self.listbox_offer_mode['values'] = ["接受", "拒绝"]
self.listbox_offer_mode.bind("<<ComboboxSelected>>", self.turn_radio_on)
self.label_offer.grid(row=4, column=0, sticky='E')
self.listbox_offer_mode.grid(row=4, column=1, sticky='W')
self.label_timing_mode['text'] = '预定结束'
self.var_timing_mode.set('无')
self.listbox_timing_mode['textvariable'] = self.var_timing_mode
self.listbox_timing_mode['width'] = 10
self.listbox_timing_mode['values'] = ["无", "定时[分钟]", "场数"]
self.listbox_timing_mode.bind("<<ComboboxSelected>>", self.turn_entry_on)
self.label_timing_mode.grid(row=5, column=0, sticky='E')
self.listbox_timing_mode.grid(row=5, column=1, sticky='W')
self.var_timing_value.set('')
self.entry_timing_value['textvariable'] = self.var_timing_value
self.entry_timing_value['width'] = 5
self.entry_timing_value.configure(state='disabled')
self.entry_timing_value.grid(row=5, column=2, sticky='W')
self.start_ctn['text'] = 'START'
self.start_ctn['width'] = 10
self.start_ctn['height'] = 2
self.start_ctn['command'] = self.fight_start
self.start_ctn['relief'] = 'groove'
self.start_ctn.grid(row=0, column=0, sticky='E')
self.stop_ctn['text'] = 'STOP'
self.stop_ctn['width'] = 10
self.stop_ctn['height'] = 2
self.stop_ctn['command'] = self.fight_stop
self.stop_ctn['relief'] = 'groove'
self.stop_ctn.grid(row=0, column=1, sticky='W')
self.stop_ctn.configure(state='disabled')
self.info_box['width'] = 40
self.info_box['height'] = 20
self.info_box.grid(row=1, column=0, columnspan=2)
self.info_box.see(END)
var = '请授予此程序管理员权限运行,否则在游戏窗口内鼠标无法被控制'
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
app = Application()
# 隐藏console窗口
try:
test = sys.argv[1]
except IndexError:
test = False
if test == 'test':
pass
else:
whnd = windll.kernel32.GetConsoleWindow()
if whnd:
windll.user32.ShowWindow(whnd, 0)
windll.kernel32.CloseHandle(whnd)
app.master.title('就你破势多')
app.init_window_place(app.master, 1.1, 4)
app.mainloop()
| 33,530 | 1,282 | 46 |
0e4125b877912541eb05e96722580ac8afe2e1ce | 748 | py | Python | tests/unit_tests/test_models.py | mcrors/SimPyInvoice | cdd66476900d695c1f02939c572ee51defcf72ac | [
"MIT"
] | null | null | null | tests/unit_tests/test_models.py | mcrors/SimPyInvoice | cdd66476900d695c1f02939c572ee51defcf72ac | [
"MIT"
] | null | null | null | tests/unit_tests/test_models.py | mcrors/SimPyInvoice | cdd66476900d695c1f02939c572ee51defcf72ac | [
"MIT"
] | null | null | null | import pytest
from app.models import User
| 25.793103 | 57 | 0.661765 | import pytest
from app.models import User
class TestUserShould:
@staticmethod
def test_password_setter():
user = User(password='cat')
assert user.password_hash is not None
@staticmethod
def test_no_password_getter():
user = User(password='cat')
with pytest.raises(AttributeError):
user.password
@staticmethod
def test_password_verification():
user = User(password='cat')
assert user.verify_password('cat') is True
assert user.verify_password('dog') is False
@staticmethod
def test_password_salts_are_random():
user1 = User(password='cat')
user2 = User(password='cat')
assert user1.password_hash != user2.password_hash
| 502 | 180 | 23 |
81c2ea7ee9f616adb5fbf19a5bb4eb1e0adee74e | 1,250 | py | Python | synapse/replication/tcp/__init__.py | zauguin/synapse | ea00f18135ce30e8415526ce68585ea90da5b856 | [
"Apache-2.0"
] | 2 | 2020-04-30T18:38:02.000Z | 2020-07-08T21:38:28.000Z | synapse/replication/tcp/__init__.py | zauguin/synapse | ea00f18135ce30e8415526ce68585ea90da5b856 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | synapse/replication/tcp/__init__.py | zauguin/synapse | ea00f18135ce30e8415526ce68585ea90da5b856 | [
"Apache-2.0"
] | 2 | 2020-03-03T18:34:52.000Z | 2022-03-31T11:06:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the TCP replication protocol used by synapse to
communicate between the master process and its workers (when they're enabled).
Further details can be found in docs/tcp_replication.rst
Structure of the module:
* client.py - the client classes used for workers to connect to master
* command.py - the definitions of all the valid commands
* protocol.py - contains bot the client and server protocol implementations,
these should not be used directly
* resource.py - the server classes that accepts and handle client connections
* streams.py - the definitons of all the valid streams
"""
| 40.322581 | 78 | 0.7544 | # -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the TCP replication protocol used by synapse to
communicate between the master process and its workers (when they're enabled).
Further details can be found in docs/tcp_replication.rst
Structure of the module:
* client.py - the client classes used for workers to connect to master
* command.py - the definitions of all the valid commands
* protocol.py - contains bot the client and server protocol implementations,
these should not be used directly
* resource.py - the server classes that accepts and handle client connections
* streams.py - the definitons of all the valid streams
"""
| 0 | 0 | 0 |
b3e360298a3be7d671d634b3467134349a10212e | 110 | py | Python | Uche Clare/Phase 1/Python Basic 1/Day 12/Task 103.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Uche Clare/Phase 1/Python Basic 1/Day 12/Task 103.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Uche Clare/Phase 1/Python Basic 1/Day 12/Task 103.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #Write a Python program to extract the filename from a given path.
import os
print(os.path.basename(__file__)) | 36.666667 | 66 | 0.8 | #Write a Python program to extract the filename from a given path.
import os
print(os.path.basename(__file__)) | 0 | 0 | 0 |
8b20e6eebb2aa104b7721de7a6e2365e1af62208 | 3,186 | py | Python | tests/test_assembly_report/test_assembly_report.py | Edinburgh-Genome-Foundry/plateo | c9a608658325f3c507788d9b966a3f3c8e516bc5 | [
"MIT"
] | 22 | 2018-01-29T21:34:25.000Z | 2021-12-14T15:31:49.000Z | tests/test_assembly_report/test_assembly_report.py | Edinburgh-Genome-Foundry/plateo | c9a608658325f3c507788d9b966a3f3c8e516bc5 | [
"MIT"
] | 3 | 2017-09-20T16:08:45.000Z | 2021-05-28T17:45:14.000Z | tests/test_assembly_report/test_assembly_report.py | Edinburgh-Genome-Foundry/plateo | c9a608658325f3c507788d9b966a3f3c8e516bc5 | [
"MIT"
] | 5 | 2018-09-18T08:53:37.000Z | 2021-04-28T08:44:38.000Z | import os
import matplotlib
matplotlib.use("Agg")
from plateo import AssemblyPlan
from plateo.parsers import plate_from_content_spreadsheet
from plateo.containers.plates import Plate4ti0960
from plateo.exporters import (picklist_to_labcyte_echo_picklist_file,
PlateTextPlotter,
AssemblyPicklistGenerator,
picklist_to_assembly_mix_report)
from plateo.tools import human_volume
import flametree
from pandas import pandas
from collections import OrderedDict
import matplotlib.pyplot as plt
from Bio import SeqIO
| 34.630435 | 75 | 0.670747 | import os
import matplotlib
matplotlib.use("Agg")
from plateo import AssemblyPlan
from plateo.parsers import plate_from_content_spreadsheet
from plateo.containers.plates import Plate4ti0960
from plateo.exporters import (picklist_to_labcyte_echo_picklist_file,
PlateTextPlotter,
AssemblyPicklistGenerator,
picklist_to_assembly_mix_report)
from plateo.tools import human_volume
import flametree
from pandas import pandas
from collections import OrderedDict
import matplotlib.pyplot as plt
from Bio import SeqIO
def test_assembly_report(tmpdir):
data_path = os.path.join("tests", "test_assembly_report", "data")
root = flametree.file_tree(data_path)
df = pandas.read_excel(root.example_picklist_xls.open('rb'), index=0)
assembly_plan = AssemblyPlan(OrderedDict([
(row[0], [e for e in row[1:] if str(e) not in ['-', 'nan']])
for i, row in df.iterrows()
if row[0] not in ['nan', 'Construct name']
]))
parts_zip = flametree.file_tree(root.emma_parts_zip._path)
def read(f):
record = SeqIO.read(f.open('r'), 'genbank')
record.id = f._name_no_extension
return record
parts_data = {
f._name_no_extension: {'record': read(f)}
for f in parts_zip._all_files
if f._extension == 'gb'
}
assembly_plan.parts_data = parts_data
source_plate = plate_from_content_spreadsheet(
root.example_echo_plate_xlsx._path)
source_plate.name = "Source"
for well in source_plate.iter_wells():
if not well.is_empty:
content = well.content.components_as_string()
destination_plate = Plate4ti0960("Mixplate")
picklist_generator = AssemblyPicklistGenerator(
part_mol=1.3e-15,
complement_to=1e-6,
buffer_volume=300e-9,
volume_rounding=2.5e-9,
minimal_dispense_volume=5e-9)
picklist, data = picklist_generator.make_picklist(
assembly_plan,
source_wells=source_plate.iter_wells(),
destination_wells=destination_plate.iter_wells(direction='column'),
complement_well=source_plate.wells.O24,
buffer_well=source_plate.wells.P24
)
future_plates = picklist.execute(inplace=False)
def text(w):
txt = human_volume(w.content.volume)
if 'construct' in w.data:
txt = "\n".join([w.data.construct, txt])
return txt
plotter = PlateTextPlotter(text)
ax, _ = plotter.plot_plate(
future_plates[destination_plate], figsize=(20, 8))
ziproot = flametree.file_tree(os.path.join(str(tmpdir), 'a.zip'))
ax.figure.savefig(
ziproot._file("final_mixplate.pdf").open('wb'),
format="pdf",
bbox_inches="tight"
)
plt.close(ax.figure)
picklist_to_assembly_mix_report(
picklist,
ziproot._file(
"assembly_mix_picklist_report.pdf").open('wb'), data=data)
assembly_plan.write_report(
ziproot._file("assembly_plan_summary.pdf").open('wb'))
picklist_to_labcyte_echo_picklist_file(
picklist, ziproot._file("ECHO_picklist.csv").open('w'))
ziproot._close()
| 2,561 | 0 | 23 |
eab3ae228294c53fded04204fca3c7d6c2ecc943 | 1,029 | py | Python | pkmkt2_code/task3.py | toomastahves/ml-examples | ed7d3f9e64970fe0aacd13db9afb707f428ed2ac | [
"MIT"
] | 1 | 2022-03-08T10:53:02.000Z | 2022-03-08T10:53:02.000Z | pkmkt2_code/task3.py | toomastahves/ml-examples | ed7d3f9e64970fe0aacd13db9afb707f428ed2ac | [
"MIT"
] | null | null | null | pkmkt2_code/task3.py | toomastahves/ml-examples | ed7d3f9e64970fe0aacd13db9afb707f428ed2ac | [
"MIT"
] | null | null | null | from sympy import symbols, diff, solve, Eq
from task1 import get_Euler_descr
x1, x2, x3, t, v1, v2, v3 = symbols('x1 x2 x3 t v1 v2 v3')
#from testdata import eq1, eq2, eq3
#print(get_velocity_Euler(eq1, eq2, eq3))
#print(get_acceleration_Euler(eq1, eq2, eq3))
| 42.875 | 75 | 0.605442 | from sympy import symbols, diff, solve, Eq
from task1 import get_Euler_descr
x1, x2, x3, t, v1, v2, v3 = symbols('x1 x2 x3 t v1 v2 v3')
def get_velocity_Euler(eq1, eq2, eq3):
u1, u2, u3 = get_Euler_descr(eq1, eq2, eq3)
v1_ = diff(u1, t) + diff(u1, x1)*v1 + diff(u1, x2)*v2 + diff(u1, x3)*v3
v2_ = diff(u2, t) + diff(u2, x1)*v1 + diff(u2, x2)*v2 + diff(u2, x3)*v3
v3_ = diff(u3, t) + diff(u3, x1)*v1 + diff(u3, x2)*v2 + diff(u3, x3)*v3
sol = solve([Eq(v1_,v1), Eq(v2_,v2), Eq(v3_,v3)], [v1, v2, v3])
return [sol[v1], sol[v2], sol[v3]]
def get_acceleration_Euler(eq1, eq2, eq3):
v1, v2, v3 = get_velocity_Euler(eq1, eq2, eq3)
a1 = diff(v1, t) + diff(v1, x1)*v1 + diff(v1, x2)*v2 + diff(v1, x3)*v3
a2 = diff(v2, t) + diff(v2, x1)*v1 + diff(v2, x2)*v2 + diff(v2, x3)*v3
a3 = diff(v3, t) + diff(v3, x1)*v1 + diff(v3, x2)*v2 + diff(v3, x3)*v3
return [a1, a2, a3]
#from testdata import eq1, eq2, eq3
#print(get_velocity_Euler(eq1, eq2, eq3))
#print(get_acceleration_Euler(eq1, eq2, eq3))
| 721 | 0 | 46 |
31a5ee7a8c15c18aa103ed2598c34cffc3a69548 | 3,669 | py | Python | Games/hex/HexLogic.py | morozig/muzero | 9798cf9ac2ab46cf6da81827607b8fa8dafbeae4 | [
"MIT"
] | null | null | null | Games/hex/HexLogic.py | morozig/muzero | 9798cf9ac2ab46cf6da81827607b8fa8dafbeae4 | [
"MIT"
] | null | null | null | Games/hex/HexLogic.py | morozig/muzero | 9798cf9ac2ab46cf6da81827607b8fa8dafbeae4 | [
"MIT"
] | null | null | null | """
Class for the hex game logic. Unaltered skeleton code.
EDIT: Changed board representation from dict to numpy.ndarray.
EDIT: Changed numbers for the player-color indicators.
:version: FINAL
:date:
:author: Aske Plaat
:edited by: Joery de Vries
"""
import numpy as np
| 33.66055 | 114 | 0.531207 | """
Class for the hex game logic. Unaltered skeleton code.
EDIT: Changed board representation from dict to numpy.ndarray.
EDIT: Changed numbers for the player-color indicators.
:version: FINAL
:date:
:author: Aske Plaat
:edited by: Joery de Vries
"""
import numpy as np
class HexBoard:
BLUE = -1 # 1
RED = 1 # 2
EMPTY = 0 # 3
def __init__(self, board_size):
self.board = np.full((board_size, board_size), HexBoard.EMPTY) # Used to be represented by a dict.
self.size = board_size
self.game_over = False
def is_game_over(self):
return self.game_over
def is_empty(self, coordinates):
return self.board[coordinates] == HexBoard.EMPTY
def is_color(self, coordinates, color):
return self.board[coordinates] == color
def get_color(self, coordinates):
if coordinates == (-1, -1):
return HexBoard.EMPTY
return self.board[coordinates]
def place(self, coordinates, color):
if not self.game_over and self.board[coordinates] == HexBoard.EMPTY:
self.board[coordinates] = color
if self.check_win(HexBoard.RED) or self.check_win(HexBoard.BLUE):
self.game_over = True
def get_opposite_color(self, current_color):
if current_color == HexBoard.BLUE:
return HexBoard.RED
return HexBoard.BLUE
def get_neighbors(self, coordinates):
(cx, cy) = coordinates
neighbors = []
if cx - 1 >= 0: neighbors.append((cx - 1, cy))
if cx + 1 < self.size: neighbors.append((cx + 1, cy))
if cx - 1 >= 0 and cy + 1 <= self.size - 1: neighbors.append((cx - 1, cy + 1))
if cx + 1 < self.size and cy - 1 >= 0: neighbors.append((cx + 1, cy - 1))
if cy + 1 < self.size: neighbors.append((cx, cy + 1))
if cy - 1 >= 0: neighbors.append((cx, cy - 1))
return neighbors
def get_empty_coordinates(self):
return [(i, j) for i in range(self.size) for j in range(self.size) if self.is_empty((i, j))]
def border(self, color, move):
(nx, ny) = move
return (color == HexBoard.BLUE and nx == self.size - 1) or (color == HexBoard.RED and ny == self.size - 1)
def traverse(self, color, move, visited):
if not self.is_color(move, color) or (move in visited and visited[move]): return False
if self.border(color, move): return True
visited[move] = True
for n in self.get_neighbors(move):
if self.traverse(color, n, visited): return True
return False
def check_win(self, color):
for i in range(self.size):
if color == HexBoard.BLUE:
move = (0, i)
else:
move = (i, 0)
if self.traverse(color, move, {}):
return True
return False
def print(self):
print(" ", end="")
for y in range(self.size):
print(chr(y + ord('a')), "", end="")
print("")
print(" -----------------------")
for y in range(self.size):
print(y, "|", end="")
for z in range(y):
print(" ", end="")
for x in range(self.size):
piece = self.board[x, y]
if piece == HexBoard.BLUE:
print("b ", end="")
elif piece == HexBoard.RED:
print("r ", end="")
else:
if x == self.size:
print("-", end="")
else:
print("- ", end="")
print("|")
print(" -----------------------")
| 2,971 | 402 | 23 |
faf6ad233e20c9561b79ad3d76c83b660890b201 | 1,729 | py | Python | corehq/util/tests/test_teeout.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/util/tests/test_teeout.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/util/tests/test_teeout.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import re
import sys
from io import StringIO
from corehq.util.teeout import tee_output
from testil import assert_raises, eq
| 24.352113 | 73 | 0.606709 | import re
import sys
from io import StringIO
from corehq.util.teeout import tee_output
from testil import assert_raises, eq
def test_tee_output():
fileobj = StringIO()
fake = fakesys()
with assert_raises(Error), tee_output(fileobj, sys=fake):
print("testing...", file=fake.stdout)
fake.stderr.write("fail.\n")
raise Error("stop")
eq(fake.stdout.getvalue(), "testing...\n")
eq(fake.stderr.getvalue(), "fail.\n")
eq(sanitize_tb(fileobj.getvalue()),
"testing...\n"
"fail.\n"
"Traceback (most recent call last):\n"
" ...\n"
"corehq.util.tests.test_teeout.Error: stop\n")
def test_tee_output_with_KeyboardInterrupt():
fileobj = StringIO()
fake = fakesys()
with assert_raises(KeyboardInterrupt), tee_output(fileobj, sys=fake):
raise KeyboardInterrupt("errrt")
eq(fake.stdout.getvalue(), "")
eq(fake.stderr.getvalue(), "")
eq(sanitize_tb(fileobj.getvalue()),
"Traceback (most recent call last):\n"
" ...\n"
"KeyboardInterrupt: errrt\n")
def test_tee_output_with_SystemExit():
fileobj = StringIO()
fake = fakesys()
with assert_raises(SystemExit), tee_output(fileobj, sys=fake):
raise SystemExit(1)
eq(fake.stdout.getvalue(), "")
eq(fake.stderr.getvalue(), "")
eq(fileobj.getvalue(), "")
def fakesys():
class fake(object):
stdout = StringIO()
stderr = StringIO()
def exc_info():
return sys.exc_info()
return fake
def sanitize_tb(value):
return re.sub(
r"(Traceback .*:\n)(?: .*\n)+",
r"\1 ...\n",
value,
flags=re.MULTILINE,
)
class Error(Exception):
pass
| 1,449 | 11 | 138 |
16c38f5853f84fef743019e993c84890e0fb5c88 | 5,043 | py | Python | sdk/python/pulumi_aws/outposts/get_outposts.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/outposts/get_outposts.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/outposts/get_outposts.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOutpostsResult',
'AwaitableGetOutpostsResult',
'get_outposts',
]
@pulumi.output_type
class GetOutpostsResult:
"""
A collection of values returned by getOutposts.
"""
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of Amazon Resource Names (ARNs).
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter(name="availabilityZone")
@property
@pulumi.getter(name="availabilityZoneId")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
Set of identifiers.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="ownerId")
@property
@pulumi.getter(name="siteId")
# pylint: disable=using-constant-test
def get_outposts(availability_zone: Optional[str] = None,
availability_zone_id: Optional[str] = None,
owner_id: Optional[str] = None,
site_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutpostsResult:
"""
Provides details about multiple Outposts.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.outposts.get_outposts(site_id=data["aws_outposts_site"]["id"])
```
:param str availability_zone: Availability Zone name.
:param str availability_zone_id: Availability Zone identifier.
:param str owner_id: AWS Account identifier of the Outpost owner.
:param str site_id: Site identifier.
"""
__args__ = dict()
__args__['availabilityZone'] = availability_zone
__args__['availabilityZoneId'] = availability_zone_id
__args__['ownerId'] = owner_id
__args__['siteId'] = site_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:outposts/getOutposts:getOutposts', __args__, opts=opts, typ=GetOutpostsResult).value
return AwaitableGetOutpostsResult(
arns=__ret__.arns,
availability_zone=__ret__.availability_zone,
availability_zone_id=__ret__.availability_zone_id,
id=__ret__.id,
ids=__ret__.ids,
owner_id=__ret__.owner_id,
site_id=__ret__.site_id)
| 34.306122 | 137 | 0.655364 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOutpostsResult',
'AwaitableGetOutpostsResult',
'get_outposts',
]
@pulumi.output_type
class GetOutpostsResult:
"""
A collection of values returned by getOutposts.
"""
def __init__(__self__, arns=None, availability_zone=None, availability_zone_id=None, id=None, ids=None, owner_id=None, site_id=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if availability_zone and not isinstance(availability_zone, str):
raise TypeError("Expected argument 'availability_zone' to be a str")
pulumi.set(__self__, "availability_zone", availability_zone)
if availability_zone_id and not isinstance(availability_zone_id, str):
raise TypeError("Expected argument 'availability_zone_id' to be a str")
pulumi.set(__self__, "availability_zone_id", availability_zone_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if owner_id and not isinstance(owner_id, str):
raise TypeError("Expected argument 'owner_id' to be a str")
pulumi.set(__self__, "owner_id", owner_id)
if site_id and not isinstance(site_id, str):
raise TypeError("Expected argument 'site_id' to be a str")
pulumi.set(__self__, "site_id", site_id)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of Amazon Resource Names (ARNs).
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="availabilityZoneId")
def availability_zone_id(self) -> str:
return pulumi.get(self, "availability_zone_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
Set of identifiers.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> str:
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="siteId")
def site_id(self) -> str:
return pulumi.get(self, "site_id")
class AwaitableGetOutpostsResult(GetOutpostsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOutpostsResult(
arns=self.arns,
availability_zone=self.availability_zone,
availability_zone_id=self.availability_zone_id,
id=self.id,
ids=self.ids,
owner_id=self.owner_id,
site_id=self.site_id)
def get_outposts(availability_zone: Optional[str] = None,
availability_zone_id: Optional[str] = None,
owner_id: Optional[str] = None,
site_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutpostsResult:
"""
Provides details about multiple Outposts.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.outposts.get_outposts(site_id=data["aws_outposts_site"]["id"])
```
:param str availability_zone: Availability Zone name.
:param str availability_zone_id: Availability Zone identifier.
:param str owner_id: AWS Account identifier of the Outpost owner.
:param str site_id: Site identifier.
"""
__args__ = dict()
__args__['availabilityZone'] = availability_zone
__args__['availabilityZoneId'] = availability_zone_id
__args__['ownerId'] = owner_id
__args__['siteId'] = site_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:outposts/getOutposts:getOutposts', __args__, opts=opts, typ=GetOutpostsResult).value
return AwaitableGetOutpostsResult(
arns=__ret__.arns,
availability_zone=__ret__.availability_zone,
availability_zone_id=__ret__.availability_zone_id,
id=__ret__.id,
ids=__ret__.ids,
owner_id=__ret__.owner_id,
site_id=__ret__.site_id)
| 1,959 | 31 | 179 |
39213ae05145aca03d49aa5f9978e595e8bcfaf8 | 2,298 | py | Python | cost-analysis-result.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | 10 | 2017-12-12T17:20:41.000Z | 2021-05-03T14:40:35.000Z | cost-analysis-result.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | null | null | null | cost-analysis-result.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import argparse;
import os;
import sys;
import matplotlib as mpl;
mpl.use('Agg');
import matplotlib.pyplot as plt;
import json;
from matplotlib import ticker;
import numpy as np
filename="cost-analysis-result.pdf";
main();
# Col 0 are the x points
# Col 1 is the series 50/100 marker
# Col 2 is the series cat data
# Col 3 is the series no cat data
| 31.054054 | 124 | 0.644909 | #!/usr/bin/python
import argparse;
import os;
import sys;
import matplotlib as mpl;
mpl.use('Agg');
import matplotlib.pyplot as plt;
import json;
from matplotlib import ticker;
import numpy as np
filename="cost-analysis-result.pdf";
def graph():
fig = plt.figure(figsize=(8, 4));
ax = fig.add_subplot(1,1,1);
temp = list();
range_top = 2000;
t_total = np.arange(0, range_top, 1);
temp.append(ax.plot(t_total, t_total, 'm:', linewidth=4.0, label="NoContention"));
baseline_mite = 5.15;
#series_tuples = get_tuples(filename, slabels, xlabel, ylabels);
# Contention
contention_mite = 4.85
#contention_mite = 4.33
em = 0.951;
ek = contention_mite / baseline_mite;
n = 1000;
range_bottom = n / ek * em;
t_bottom = np.arange(0, range_bottom, 1);
t_top = np.arange(range_bottom, range_top, 1);
temp.append(ax.plot(t_bottom, t_bottom / em - (t_bottom / em) * ek, 'r--', linewidth=2.0, label="Contention-NoCAT"));
temp.append(ax.plot(t_top, (n / ek - n) + (t_top - (n / ek) * em), 'r--', linewidth=2.0));#, label="Contention-NoCAT"));
allocation_mite = 5.09;
#allocation_mite = 4.81;
em = 0.951;
ek = allocation_mite / baseline_mite;
range_bottom = n / ek * em;
t_bottom = np.arange(0, range_bottom, 1);
t_top = np.arange(range_bottom, range_top, 1);
temp.append(ax.plot(t_bottom, t_bottom / em - (t_bottom / em) * ek, 'b-', linewidth=2.0, label="Contention-CAT"));
temp.append(ax.plot(t_top, (n / ek - n) + (t_top - (n / ek) * em), 'b-', linewidth=2.0));#, label="Contention-CAT"));
handles, labels = ax.get_legend_handles_labels()
import operator
handles2 = None;
labels2 = None;
hl = zip(handles,labels);#sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
lgd = ax.legend(handles2, labels2, loc="upper center");
ax.set_xlabel("Machine learning throughput");
ax.set_ylabel("Number of extra machines");
plt.ylim(ymin=0, ymax=200);
plt.xlim(xmin=0, xmax=1200);
plt.savefig(filename, bbox_extra_artists=(lgd,), bbox_inches='tight');
exit();
def main():
graph();
main();
# Col 0 are the x points
# Col 1 is the series 50/100 marker
# Col 2 is the series cat data
# Col 3 is the series no cat data
| 1,882 | 0 | 46 |
3f9f62e3bd3edff609fc705e0328d2bab6bb7690 | 1,483 | py | Python | setup.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | 1 | 2020-07-31T06:34:27.000Z | 2020-07-31T06:34:27.000Z | setup.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null | setup.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="django-inbound-rules",
version="1.1.0",
description="Django Inbound Rules is an app to allow or restrict group of users on specified url(s) based on CIDR blocks(now IPv4 only) excluding user with superuser permissions.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/nilesh-kr-dubey/django-inbound-rules",
author="Nilesh Kumar Dubey",
author_email="nileshdubeyindia@gmail.com",
license="MIT",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.0",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
packages=['inbound'],
install_requires=[
"Django >= 2.0",
],
include_package_data=True,
project_urls={
'Documentation': 'https://github.com/nilesh-kr-dubey/django-inbound-rules/tree/master/docs',
},
)
| 33.704545 | 184 | 0.645314 |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="django-inbound-rules",
version="1.1.0",
description="Django Inbound Rules is an app to allow or restrict group of users on specified url(s) based on CIDR blocks(now IPv4 only) excluding user with superuser permissions.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/nilesh-kr-dubey/django-inbound-rules",
author="Nilesh Kumar Dubey",
author_email="nileshdubeyindia@gmail.com",
license="MIT",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.0",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
packages=['inbound'],
install_requires=[
"Django >= 2.0",
],
include_package_data=True,
project_urls={
'Documentation': 'https://github.com/nilesh-kr-dubey/django-inbound-rules/tree/master/docs',
},
)
| 0 | 0 | 0 |
de32a72207938ae327217698ba55d764ad8e76f8 | 10,175 | py | Python | analysis/usability/visualization.py | severinsimmler/usability-survey | a00254f6112965b6809153a0b7525c433a51befe | [
"MIT"
] | null | null | null | analysis/usability/visualization.py | severinsimmler/usability-survey | a00254f6112965b6809153a0b7525c433a51befe | [
"MIT"
] | 4 | 2018-12-05T09:20:52.000Z | 2018-12-23T16:00:02.000Z | analysis/usability/visualization.py | severinsimmler/usability-survey | a00254f6112965b6809153a0b7525c433a51befe | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
| 42.219917 | 209 | 0.591057 | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def plot_severity(data):
# absolute
plt.figure()
a = data.plot.barh(figsize=(7.7, 2.4),
width=.95,
color=("#BABDB6", "#8AE234", "#FCE94F", "#F57900", "#EF2929"))
a.set_xlabel("Absolute Häufigkeit")
a.set_ylabel("Website")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Schweregrad")
plt.tight_layout()
plt.show()
# rel
plt.figure()
r = data.div(data.sum(axis=1), axis=0).plot.barh(stacked=True,
figsize=(7.7, 1.9),
width=.4,
color=("#BABDB6", "#8AE234", "#FCE94F", "#F57900", "#EF2929"))
plt.legend(bbox_to_anchor=(1.05, 1), loc=None, borderaxespad=0., title="Schweregrad")
r.set_xlabel("Relative Häufigkeit")
r.set_ylabel("Website")
plt.tight_layout()
plt.show()
def plot_problems(data):
ax = data.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Website")
plt.tight_layout()
plt.show()
def plot_concordance(data):
data["Sum"] = data.sum(axis=1)
data = data.sort_values("Sum")
data = pd.DataFrame([data["Irrelevant"],
data["Irrelevant_S"],
data["Kosmetisch"],
data["Kosmetisch_S"],
data["Gering"],
data["Gering_S"],
data["Bedeutend"],
data["Bedeutend_S"],
data["Katastrophe"],
data["Katastrophe_S"]]).T
color = ("#3465A4","#3465A4",
"#BABDB6","#8AE234",
"#888A85","#FCE94F",
"#4E4E4E","#F57900",
"#000000", "#EF2929")
# absolute
a = data.plot.barh(stacked=True, color=color, figsize=(7.7, 3.5))
plt.legend(bbox_to_anchor=(1.05, 1),
loc=None,
borderaxespad=0.,
title="Sym. Differenz:\nSchweregrad")
a.set_xlabel("Absolute Häufigkeit")
plt.tight_layout()
plt.show()
# relative
r = data.div(data.sum(axis=1), axis=0).plot.barh(stacked=True, color=color, figsize=(7.7, 3.5))
plt.legend(bbox_to_anchor=(1.05, 1),
loc=None,
borderaxespad=0.,
title="Schweregrad")
r.set_xlabel("Relative Häufigkeit")
plt.tight_layout()
plt.show()
def plot_experience(sample):
ax = sample.pre["vorkenntnisse"].replace("n. a.", "Keine Angabe").replace("spreadshirt.de", "Spreadshirt").replace("nein", "Keine").value_counts().sort_values().plot.barh(color="#555753", figsize=(6, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Vorkenntnisse")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_gender(sample):
ax = sample.pre["geschlecht"].apply(lambda x: x.title()).value_counts().sort_values().plot.barh(color="#555753", figsize=(6, 2))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Geschlecht")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_education(sample):
ax = sample.pre["bildungsabschluss"].value_counts().sort_values().plot.barh(color="#555753", figsize=(7.7, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Höchster\nBildungsabschluss")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_occupation(sample):
occupation = sample.pre["beschäftigung"].replace("MCS", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Mensch-Computer-Systeme", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Mensch-Computer-Systeme (Student)", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Chemie Bachelor", "Student (Chemie)")
occupation = occupation.replace("digital humanities", "Student (Digital Humanities)")
occupation = occupation.replace("Physik", "Student (Physik)")
occupation = occupation.replace("digital humanities", "Student (Digital Humanities)")
occupation = occupation.replace("Mensch-Computer-Systeme Student", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("digital humanities".title(), "Student (Digital Humanities)")
occupation = occupation.replace("Student MCS", "Student (Mensch-Computer-Systeme)")
ax = occupation.value_counts().sort_values().plot.barh(color="#555753", figsize=(7.7, 3.5))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Beschäftigung")
plt.tight_layout()
plt.show()
def plot_age(sample):
age = sample.pre["alter"].apply(lambda x: int(x))
age.name = "Alter"
ax = age.plot.box(vert=False,
figsize=(6, 2),
widths=.45,
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Alter, in Jahren")
ax.set_yticklabels("")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_nasa_tlx(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.nasa[sample.nasa["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.nasa[sample.nasa["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("NASA-TLX Score")
plt.tight_layout()
plt.show()
def plot_quesi(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.quesi[sample.quesi["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.quesi[sample.quesi["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("QUESI Score")
plt.tight_layout()
plt.show()
def plot_feedback(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.quesi[sample.quesi["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.quesi[sample.quesi["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("positiv : negativ")
plt.tight_layout()
plt.show()
def plot_clicks(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.mouse[sample.mouse["website"] == "spreadshirt"]["clicks"].values
df.loc[:, "Shirtinator"] = sample.mouse[sample.mouse["website"] == "shirtinator"]["clicks"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Absolute Häufigkiet")
plt.tight_layout()
plt.show()
def plot_choice(sample):
choice = sample.post.besser.value_counts()
choice.index = ["Shirtinator besser", "Spreadshirt besser", "Beide gleich gut"]
ax = choice.sort_values().plot.barh(color="#555753", figsize=(7.7, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Bewertung")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_assistance(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.assistance[sample.assistance["website"] == "spreadshirt"]["n"].values
df.loc[:, "Shirtinator"] = sample.assistance[sample.assistance["website"] == "shirtinator"]["n"].values
assistance = pd.DataFrame({"Shirtinator": df["Shirtinator"].value_counts(),
"Spreadshirt": df["Spreadshirt"].value_counts()})
ax = assistance.plot.barh(figsize=(7.7, 2.4), color=("#D3D7CF", "grey"))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Anzahl Hilfestellungen")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Website")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_mistakes(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.mistakes[sample.mistakes["website"] == "spreadshirt"]["n"].values
df.loc[:, "Shirtinator"] = sample.mistakes[sample.mistakes["website"] == "shirtinator"]["n"].values
mistakes = pd.DataFrame({"Shirtinator": df["Shirtinator"].value_counts(),
"Spreadshirt": df["Spreadshirt"].value_counts()})
ax = mistakes.plot.barh(figsize=(7.7, 2.4), color=("#D3D7CF", "grey"))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Anzahl Fehler")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Website")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show() | 9,650 | 0 | 381 |
1956a073071c209bbc51cae342eaafe640bb4376 | 508 | py | Python | v1/chapter13/5-takeScreenshot.py | QTYResources/python-scraping | d7afe25a012fb5d079ee42372c7fce94b9494b9f | [
"MIT"
] | null | null | null | v1/chapter13/5-takeScreenshot.py | QTYResources/python-scraping | d7afe25a012fb5d079ee42372c7fce94b9494b9f | [
"MIT"
] | null | null | null | v1/chapter13/5-takeScreenshot.py | QTYResources/python-scraping | d7afe25a012fb5d079ee42372c7fce94b9494b9f | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver import ActionChains
#REPLACE WITH YOUR DRIVER PATH. EXAMPLES FOR CHROME AND PHANTOMJS
driver = webdriver.PhantomJS(executable_path='../phantomjs-2.1.1-macosx/bin/phantomjs')
#driver = webdriver.Chrome(executable_path='../chromedriver/chromedriver')
driver.implicitly_wait(5)
driver.get('http://www.pythonscraping.com/')
driver.get_screenshot_as_file('tmp/pythonscraping.png')
driver.close() | 39.076923 | 87 | 0.818898 | from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver import ActionChains
#REPLACE WITH YOUR DRIVER PATH. EXAMPLES FOR CHROME AND PHANTOMJS
driver = webdriver.PhantomJS(executable_path='../phantomjs-2.1.1-macosx/bin/phantomjs')
#driver = webdriver.Chrome(executable_path='../chromedriver/chromedriver')
driver.implicitly_wait(5)
driver.get('http://www.pythonscraping.com/')
driver.get_screenshot_as_file('tmp/pythonscraping.png')
driver.close() | 0 | 0 | 0 |
a214736461c3b22a8ec607bd797d4d9b7f88a194 | 1,736 | py | Python | oilandrope/urls.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 8 | 2019-08-27T20:08:22.000Z | 2021-07-23T22:49:47.000Z | oilandrope/urls.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 73 | 2020-03-11T18:07:29.000Z | 2022-03-28T18:07:47.000Z | oilandrope/urls.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 4 | 2020-02-22T19:44:17.000Z | 2022-03-08T09:42:45.000Z | """oilandrope URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.i18n import JavaScriptCatalog
urlpatterns = [
# JavaScript translations
path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
]
urlpatterns += i18n_patterns(
# Main site
path('', include('core.urls')),
# Admin site
path('admin/', admin.site.urls),
# API
path('api/', include('api.urls')),
# Common
path('common/', include('common.urls')),
# Auth system
path('accounts/', include('registration.urls')),
# Bot
path('bot/', include('bot.urls')),
# Dynamic Menu
path('dynamic_menu/', include('dynamic_menu.urls')),
# React FrontEnd
path('frontend/', include('frontend.urls')),
# Roleplay
path('roleplay/', include('roleplay.urls')),
prefix_default_language=False,
)
if settings.DEBUG: # pragma: no cover
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 32.754717 | 80 | 0.694124 | """oilandrope URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.i18n import JavaScriptCatalog
urlpatterns = [
# JavaScript translations
path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
]
urlpatterns += i18n_patterns(
# Main site
path('', include('core.urls')),
# Admin site
path('admin/', admin.site.urls),
# API
path('api/', include('api.urls')),
# Common
path('common/', include('common.urls')),
# Auth system
path('accounts/', include('registration.urls')),
# Bot
path('bot/', include('bot.urls')),
# Dynamic Menu
path('dynamic_menu/', include('dynamic_menu.urls')),
# React FrontEnd
path('frontend/', include('frontend.urls')),
# Roleplay
path('roleplay/', include('roleplay.urls')),
prefix_default_language=False,
)
if settings.DEBUG: # pragma: no cover
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 0 | 0 | 0 |
fcdef3a1ffd60dc99df10f30217799615727b51f | 4,467 | py | Python | nbs/matricula.py | ronaldokun/cpm-joao-XXIII | 0de3dc0a269d3c5bf17c5c0b111b93b01551af8d | [
"MIT"
] | null | null | null | nbs/matricula.py | ronaldokun/cpm-joao-XXIII | 0de3dc0a269d3c5bf17c5c0b111b93b01551af8d | [
"MIT"
] | null | null | null | nbs/matricula.py | ronaldokun/cpm-joao-XXIII | 0de3dc0a269d3c5bf17c5c0b111b93b01551af8d | [
"MIT"
] | 1 | 2019-11-02T22:47:13.000Z | 2019-11-02T22:47:13.000Z | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
# Access and edit Google Sheets by gspread
import gspread
# Module to transform gsheets to data frame
import gspread_dataframe as gs_to_df
from oauth2client.service_account import ServiceAccountCredentials
import datetime as dt
from pathlib import *
import sys
path = PurePath('__file__')
sys.path.insert(0, str(Path(path.parent).resolve().parent))
from cpm import functions as f
TEMPLATE = "Feedback_Template"
MATRICULA = "3. Planilha Matrículas 2019 - 1o sem"
MATR_ABA = "João XXIII"
MATR_CLEANED = "J23_Matrícula_2019-1S"
main()
planilhas
| 22.560606 | 95 | 0.614954 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
# Access and edit Google Sheets by gspread
import gspread
# Module to transform gsheets to data frame
import gspread_dataframe as gs_to_df
from oauth2client.service_account import ServiceAccountCredentials
import datetime as dt
from pathlib import *
import sys
path = PurePath('__file__')
sys.path.insert(0, str(Path(path.parent).resolve().parent))
from cpm import functions as f
TEMPLATE = "Feedback_Template"
MATRICULA = "3. Planilha Matrículas 2019 - 1o sem"
MATR_ABA = "João XXIII"
MATR_CLEANED = "J23_Matrícula_2019-1S"
def check_feedback(gc, name):
aloc = gc.open(name)
# Convert gsheet to df
aloc = gs_to_df.get_as_dataframe(aloc, dtype=str)
# Transform String Dates to datetime
f = lambda x : dt.datetime.strptime(x, "%d/%m/%Y")
aloc['Data'] = aloc['Data'].map(f)
# correct 'nan' strings to ''
aloc.replace('nan', '', inplace=True)
def split_date_hour(col):
return pd.Series(col.split(" "))
def concat_names(x,y):
return x + " " + y
def split_celfone(col):
if type(col) == str:
pattern = ".*\(.*(\d{2})\).*(\d{5})(\d{4}).*"
split = re.split(pattern, col)
if len(split) >= 4:
return "(" + split[1] + ")" + " " + split[2] + "-" + split[3]
return col
return col
def split_fone(col):
if type(col) == str:
pattern = ".*\(.*(\d{2})\).*(\d{4}|\d{4})(\d{4}).*"
split = re.split(pattern, col)
if len(split) >= 4:
return "(" + split[1] + ")" + " " + split[2] + "-" + split[3]
return col
return col
def preprocess_df(df):
presencial = df["Data/Hora preenchimento"] == "Presencial"
espera = df["Data/Hora preenchimento"] == "Lista de Espera"
pre = df[~ presencial & ~ espera]["Data/Hora preenchimento"]
data_hora = pre.apply(split_date_hour)
data = pd.Series.append(df[presencial]["Data/Hora preenchimento"],
df[espera]["Data/Hora preenchimento"])
data = data.append(data_hora.iloc[:, 0]).sort_index()
hora = pd.Series.append(df[presencial]["Data/Hora preenchimento"],
df[espera]["Data/Hora preenchimento"])
hora = hora.append(data_hora.iloc[:, 1]).sort_index()
df.rename(columns={"Data/Hora preenchimento": "Data_Pré_Matrícula"},
inplace=True)
df["Data_Pré_Matrícula"] = data
df["Hora_Pré_Matrícula"] = hora
df["Nome"] = df["Nome"].apply(str.upper).apply(str.strip)
df["Sobrenome"] = df["Sobrenome"].apply(str.upper).apply(str.strip)
df["Nome Responsável"] = df["Nome Responsável"].apply(str.upper).apply(str.strip)
df["Sobrenome Responsável"] = df["Sobrenome Responsável"].apply(str.upper).apply(str.strip)
df["Nome Responsável"] = concat_names(df["Nome Responsável"],
df["Sobrenome Responsável"])
del df["Sobrenome Responsável"]
df["Nome"] = concat_names(df["Nome"], df["Sobrenome"])
del df["Sobrenome"]
df.rename(columns={"Telefone Celular ex: (011) 00000-0000": "Tel_Celular"},
inplace=True)
df["Tel_Celular"] = df["Tel_Celular"].apply(split_celfone)
df.rename(columns={"Telefone Fixo ex: (011) 000-0000": "Tel_Fixo"},
inplace=True)
df["Tel_Fixo"] = df["Tel_Fixo"].apply(split_fone)
df.rename(columns={"Celular do Responsável": "Celular_Responsável"},
inplace=True)
df["Celular_Responsável"] = df["Celular_Responsável"].apply(split_celfone)
df.rename(columns={"RG \n(apenas números)" : "RG"}, inplace=True)
return df
def main():
gc = f.authenticate()
wb = f.load_workbooks_from_drive()[MATRICULA]
df = f.load_sheet_from_workbook(wb, MATR_ABA, skiprows=[1,2])[1]
df = df.fillna('')
df = preprocess_df(df)
df.to_csv("matricula.csv", sep=",", index=False, columns=COLUNAS, na_rep='')
df = pd.read_csv("matricula.csv", dtype=str, na_values='')
matricula = gc.open(MATR_CLEANED)
wks = matricula.worksheet("JoãoXXIII")
wks.clear()
gs_to_df.set_with_dataframe(worksheet=wks, dataframe=df)
main()
planilhas
| 3,444 | 0 | 161 |
4f37a655a86382d93f846b1a755b235f3d4d154b | 114 | py | Python | euler/2.py | sara-02/dsa_sg | 7c34b17772db728419070d35664ad75c67645b1e | [
"MIT"
] | null | null | null | euler/2.py | sara-02/dsa_sg | 7c34b17772db728419070d35664ad75c67645b1e | [
"MIT"
] | null | null | null | euler/2.py | sara-02/dsa_sg | 7c34b17772db728419070d35664ad75c67645b1e | [
"MIT"
] | null | null | null | a = 1
b = 2
c = a + b
sum = 2
while c <= 4000000:
if c%2 == 0:
sum =sum + c
a = b
b = c
c = a + b
print sum | 9.5 | 19 | 0.447368 | a = 1
b = 2
c = a + b
sum = 2
while c <= 4000000:
if c%2 == 0:
sum =sum + c
a = b
b = c
c = a + b
print sum | 0 | 0 | 0 |
1b04f65985d461f2954f5b017d2799bccd841cfe | 1,828 | py | Python | sciencebeam_gym/structured_document/lxml.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 25 | 2017-07-25T12:44:55.000Z | 2020-09-30T22:16:50.000Z | sciencebeam_gym/structured_document/lxml.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 192 | 2017-11-29T08:57:03.000Z | 2022-03-29T18:44:41.000Z | sciencebeam_gym/structured_document/lxml.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 6 | 2019-02-01T18:49:33.000Z | 2020-07-26T08:18:46.000Z | from sciencebeam_utils.utils.xml import (
set_or_remove_attrib
)
from sciencebeam_gym.utils.bounding_box import (
BoundingBox
)
from sciencebeam_gym.structured_document import (
AbstractStructuredDocument,
get_scoped_attrib_name,
get_attrib_by_scope
)
TAG_ATTRIB_NAME = 'tag'
| 27.283582 | 84 | 0.688731 | from sciencebeam_utils.utils.xml import (
set_or_remove_attrib
)
from sciencebeam_gym.utils.bounding_box import (
BoundingBox
)
from sciencebeam_gym.structured_document import (
AbstractStructuredDocument,
get_scoped_attrib_name,
get_attrib_by_scope
)
TAG_ATTRIB_NAME = 'tag'
def get_node_bounding_box(t):
return BoundingBox(
float(t.attrib.get('x', 0)),
float(t.attrib.get('y', 0)),
float(t.attrib['width']),
float(t.attrib['height'])
)
def _get_tag_attrib_name(scope, level):
return get_scoped_attrib_name(TAG_ATTRIB_NAME, scope=scope, level=level)
class LxmlStructuredDocument(AbstractStructuredDocument):
def __init__(self, root):
self.root = root
def get_pages(self):
return self.root.findall('.//PAGE')
def get_lines_of_page(self, page):
return page.findall('.//TEXT')
def get_tokens_of_line(self, line):
return line.findall('./TOKEN')
def get_x(self, parent):
return parent.attrib.get('x')
def get_text(self, parent):
return parent.text
def get_tag(self, parent, scope=None, level=None):
return parent.attrib.get(_get_tag_attrib_name(scope, level))
def set_tag(self, parent, tag, scope=None, level=None):
set_or_remove_attrib(parent.attrib, _get_tag_attrib_name(scope, level), tag)
def get_tag_by_scope(self, parent):
return get_attrib_by_scope(parent.attrib, TAG_ATTRIB_NAME)
def get_bounding_box(self, parent):
return get_node_bounding_box(parent)
def set_bounding_box(self, parent, bounding_box):
parent.attrib['x'] = str(bounding_box.x)
parent.attrib['y'] = str(bounding_box.y)
parent.attrib['width'] = str(bounding_box.width)
parent.attrib['height'] = str(bounding_box.height)
| 1,125 | 36 | 365 |
c1362fe8deec65f051b72fe3170e2c65b18eeedd | 237 | py | Python | numpy/numpyIteratingOverArray.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | numpy/numpyIteratingOverArray.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | numpy/numpyIteratingOverArray.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | import numpy as np
a = np.arange(12)
# shape array with 3 rows and
# 4 columns
a = a.reshape(3,4)
print('Original array is:')
print(a)
print()
print('Modified array is:')
# iterating an array
for x in np.nditer(a):
print(x) | 14.8125 | 30 | 0.654008 | import numpy as np
a = np.arange(12)
# shape array with 3 rows and
# 4 columns
a = a.reshape(3,4)
print('Original array is:')
print(a)
print()
print('Modified array is:')
# iterating an array
for x in np.nditer(a):
print(x) | 0 | 0 | 0 |
f72b21cb7cd90c4cedf514ee804f2b47f748ee67 | 4,395 | py | Python | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | 1 | 2021-07-27T21:03:40.000Z | 2021-07-27T21:03:40.000Z | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
| 49.943182 | 145 | 0.669852 | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
class Runner:
def __init__(self, config: Any, base_dirpath: str):
self.config = config
self.base_dirpath = base_dirpath
self.watcher = Watcher(runner=self)
self.files_dependencies = FilesDependenciesManager(watcher=self.watcher)
def _run_in_file(self, source_filepath: str, output_filepath: str, run_test: bool):
try:
with open(source_filepath, 'r') as source_markdown_file:
source_file_content = source_markdown_file.read()
rendered_file_content = ""
remaining_unprocessed_file_content = source_file_content
transformers_names_selectors: str = '|'.join(self.config.transformers.keys())
transformers_regex = '({{)' + f'({transformers_names_selectors})' + '(::)((.|\n)*)(::}})'
# Instead of looking for each transformer one by one, we create a simple regex tasked with finding any transformer
for match in re.finditer(pattern=transformers_regex, string=source_file_content):
match_start = match.start()
match_end = match.end()
index_relative_to_remaining_unprocessed = len(source_file_content) - len(remaining_unprocessed_file_content)
unprocessed_text_pre_match = remaining_unprocessed_file_content[0:match_start - index_relative_to_remaining_unprocessed]
remaining_unprocessed_file_content = remaining_unprocessed_file_content[match_end - index_relative_to_remaining_unprocessed:]
transformer_name = match[2]
transformer_attribute = match[4]
transformer_class_type = self.config.transformers.get(transformer_name, None)
if transformer_class_type is None:
raise Exception(f"No transformer found for {transformer_name}")
transformer_instance = transformer_class_type(
runner=self, source_filepath=source_filepath, attribute=transformer_attribute
)
if run_test is True:
transformer_instance.test()
transformed_content = transformer_instance.transform()
rendered_file_content += f"{unprocessed_text_pre_match}{transformed_content}"
rendered_file_content += remaining_unprocessed_file_content
with open(output_filepath, 'w+') as output_file:
output_file.write(rendered_file_content)
except Exception as e:
logging.warning(e)
def _run_with_filepath(self, source_filepath: str, run_test: bool):
source_filepath_object = Path(source_filepath)
formatted_output_filename = source_filepath_object.name[2:]
output_filepath = os.path.join(source_filepath_object.parent, formatted_output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_test)
def _run_in_folder(self, dirpath: str, run_tests: bool):
for root_dirpath, dirs, filenames in os.walk(dirpath):
for filename in filenames:
if filename[0:2] == '__':
source_filepath = os.path.join(root_dirpath, filename)
output_filename = filename[2:]
output_filepath = os.path.join(root_dirpath, output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_tests)
def _start(self, run_tests: bool):
self._run_in_folder(dirpath=self.base_dirpath, run_tests=run_tests)
# When starting the runner, we first run the base_dirpath folder once, which
# will build all of our mdscript files, and index all the dependency files.
self.watcher.start()
# Then, we simply start the watcher, which will always watch the entire base_dirpath
# folder, and all of the dependencies files will have already been added to its watch.
def start(self):
self._start(run_tests=False)
def start_with_tests(self):
self._start(run_tests=True)
| 3,996 | -8 | 211 |
788c9bc3871acd7be7a51f5f8566f799e83e6d86 | 1,504 | py | Python | nlptools/old/normalizeEmoji.py | hayj/NLPTools | 46562fc03195c8fafb0654aa3f887b9b5c45a869 | [
"MIT"
] | 4 | 2019-01-18T14:20:06.000Z | 2021-02-04T07:55:42.000Z | nlptools/old/normalizeEmoji.py | hayj/NLPTools | 46562fc03195c8fafb0654aa3f887b9b5c45a869 | [
"MIT"
] | null | null | null | nlptools/old/normalizeEmoji.py | hayj/NLPTools | 46562fc03195c8fafb0654aa3f887b9b5c45a869 | [
"MIT"
] | null | null | null | emojisAsciiToUtf8Strict = None
emojisAsciiToUtf8 = None | 37.6 | 88 | 0.701463 | emojisAsciiToUtf8Strict = None
emojisAsciiToUtf8 = None
def normalizeEmojis(text, logger=None, verbose=True):
global emojisAsciiToUtf8Strict
global emojisAsciiToUtf8
if text is None or len(text) <= 1:
return text
if emojisAsciiToUtf8Strict is None or emojisAsciiToUtf8 is None:
emojisAsciiToUtf8Strict = dict()
emojisAsciiToUtf8 = dict()
lines = fileToStrList(getExecDir(__file__) + "/data/emojis/emojis-ascii-to-utf82.txt")
for line in lines:
line = line.split("\t")
if len(line) == 3 and line[2] == "|":
emojisAsciiToUtf8Strict[line[0]] = line[1]
elif len(line) == 2:
emojisAsciiToUtf8[line[0]] = line[1]
else:
logError("this line is not well formed:\n" + str(line), logger, verbose=verbose)
text = " " + text + " "
for asciiEmoji, utf8Emoji in emojisAsciiToUtf8Strict.items():
# toEscape = "()|[]$^."
# toEscape = list(toEscape)
# for currentToEscape in toEscape:
# asciiEmoji = asciiEmoji.replace(currentToEscape, "\\" + currentToEscape)
asciiEmoji = re.escape(asciiEmoji)
# if previousAsciiEmoji != asciiEmoji:
# print(previousAsciiEmoji)
# print(asciiEmoji)
# print()
# input()
currentRegex = "(\s)(" + asciiEmoji + ")(\s)"
currentRegex = re.compile(currentRegex)
text = currentRegex.sub("\g<1>" + utf8Emoji + "\g<3>", text)
for asciiEmoji, utf8Emoji in emojisAsciiToUtf8.items():
text = text.replace(asciiEmoji, utf8Emoji)
text = text[1:-1]
return text
# printLTS(emojisAsciiToUtf8Strict)
# printLTS(emojisAsciiToUtf8) | 1,427 | 0 | 22 |
89b1295f6392c0397206c422a8068e8536fd8479 | 1,852 | py | Python | mail_templated/tests.py | aptivate/django-mail-templated | 82dff067baf8aa3dfff8d0fd9d4d447b15968356 | [
"MIT"
] | null | null | null | mail_templated/tests.py | aptivate/django-mail-templated | 82dff067baf8aa3dfff8d0fd9d4d447b15968356 | [
"MIT"
] | null | null | null | mail_templated/tests.py | aptivate/django-mail-templated | 82dff067baf8aa3dfff8d0fd9d4d447b15968356 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.test import TestCase
from mail_templated import EmailMessage
| 34.943396 | 75 | 0.606371 | from django.conf import settings
from django.test import TestCase
from mail_templated import EmailMessage
class MailTemplatedTest(TestCase):
def test_simple(self):
email = EmailMessage('simple.email',
{'foo': 'bar'}, to=['test@example.com'])
self.assertEqual('kevin spacey', email.subject)
self.assertEqual('The Admin <admin@example.com>', email.from_email)
self.assertItemsEqual(['test@example.com'], email.to)
self.assertDictEqual({
'X-Wrapped': 'wrap\twrap',
'X-Other': 'whee',
}, email.extra_headers)
self.assertEqual('Hello bar.', email.body)
def test_body_only(self):
# import pdb; pdb.set_trace()
email = EmailMessage('body_only.email',
{'foo': 'bar'}, to=['test@example.com'])
self.assertEqual('', email.subject)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, email.from_email)
self.assertItemsEqual(['test@example.com'], email.to)
self.assertDictEqual({}, email.extra_headers)
self.assertEqual('Only bar.', email.body)
def test_change_template_then_context(self):
email = EmailMessage('body_only.email',
{'foo': 'bar'}, to=['test@example.com'])
email.template_name = 'simple.email'
# writing to the context doesn't change anything
# email.context['foo'] = 'baz'
# but replacing it does:
email.context = {'foo': 'baz'}
self.assertEqual('Hello baz.', email.body)
def test_change_context_then_template(self):
email = EmailMessage('simple.email',
{'foo': 'bar'}, to=['test@example.com'])
email.context['foo'] = 'baz'
email.template_name = 'body_only.email'
self.assertEqual('Only baz.', email.body)
| 1,603 | 13 | 130 |
c07cfce99e7e93bd87e94c0f6b38cbaef5489f53 | 758 | py | Python | python/python-exec/src/server.py | IMULMUL/websitesVulnerableToSSTI | 0ddcae132014c4cb7324f7cfc671e733c1e775c0 | [
"Apache-2.0"
] | null | null | null | python/python-exec/src/server.py | IMULMUL/websitesVulnerableToSSTI | 0ddcae132014c4cb7324f7cfc671e733c1e775c0 | [
"Apache-2.0"
] | null | null | null | python/python-exec/src/server.py | IMULMUL/websitesVulnerableToSSTI | 0ddcae132014c4cb7324f7cfc671e733c1e775c0 | [
"Apache-2.0"
] | null | null | null | from flask import *
app = Flask(__name__)
template = '<!DOCTYPE html><html><body>\
<h1>Online Calculator</h1>\
<form action="/" method="post">\
expression:<br>\
<input type="text" name="expression" value="">\
<input type="submit" value="Submit">\
</form><h2>%s </h2></body></html>'
@app.route('/',methods=['GET'])
@app.route('/',methods=['POST'])
if __name__=="__main__":
app.run("0.0.0.0",port = 5005,debug=False)
| 22.294118 | 53 | 0.585752 | from flask import *
app = Flask(__name__)
template = '<!DOCTYPE html><html><body>\
<h1>Online Calculator</h1>\
<form action="/" method="post">\
expression:<br>\
<input type="text" name="expression" value="">\
<input type="submit" value="Submit">\
</form><h2>%s </h2></body></html>'
@app.route('/',methods=['GET'])
def base():
return template % "SEND INPUT"
@app.route('/',methods=['POST'])
def base2():
expression = ""
if request.form['expression']:
expression = request.form['expression']
result = ""
try:
exec("result =("+expression+")")
except:
result = "error"
html = template % result
return html
if __name__=="__main__":
app.run("0.0.0.0",port = 5005,debug=False)
| 262 | 0 | 44 |
da35b739dbf7dca1b1293875eb6ec7cd8e6f2cc9 | 7,391 | py | Python | src/psiz/keras/models/rank.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 21 | 2020-04-03T21:10:05.000Z | 2021-12-02T01:31:11.000Z | src/psiz/keras/models/rank.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 14 | 2020-04-10T00:48:02.000Z | 2021-05-25T18:06:55.000Z | src/psiz/keras/models/rank.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 4 | 2020-10-13T16:46:14.000Z | 2021-11-10T00:08:47.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for Rank psychological embedding model.
Classes:
Rank: Class that uses ordinal observations that are anchored by a
designated query stimulus.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from psiz.keras.models.psych_embedding import PsychologicalEmbedding
import psiz.keras.layers
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.models', name='Rank'
)
class Rank(PsychologicalEmbedding):
"""Psychological embedding inferred from ranked similarity judgments.
Attributes:
See PsychologicalEmbedding.
"""
def __init__(self, behavior=None, **kwargs):
"""Initialize.
Arguments:
See PschologicalEmbedding.
Raises:
ValueError: If arguments are invalid.
"""
# Initialize behavioral component.
if behavior is None:
behavior = psiz.keras.layers.RankBehavior()
kwargs.update({'behavior': behavior})
super().__init__(**kwargs)
def call(self, inputs):
"""Call.
Arguments:
inputs: A dictionary of inputs:
stimulus_set: dtype=tf.int32, consisting of the
integers on the interval [0, n_stimuli[
shape=(batch_size, n_max_reference + 1, n_outcome)
is_select: dtype=tf.bool, the shape implies the
maximum number of selected stimuli in the data
shape=(batch_size, n_max_select, n_outcome)
groups: dtype=tf.int32, Integers indicating the
group membership of a trial.
shape=(batch_size, k)
"""
# Grab inputs.
stimulus_set = inputs['stimulus_set']
is_select = inputs['is_select'][:, 1:, :]
groups = inputs['groups']
# Define some useful variables before manipulating inputs.
max_n_reference = tf.shape(stimulus_set)[-2] - 1
# Repeat `stimulus_set` `n_sample` times in a newly inserted
# axis (axis=1).
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome])
stimulus_set = psiz.utils.expand_dim_repeat(
stimulus_set, self.n_sample, axis=1
)
# Enbed stimuli indices in n-dimensional space:
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome, n_dim])
if self._use_group['stimuli']:
z = self.stimuli([stimulus_set, groups])
else:
z = self.stimuli(stimulus_set)
# Split query and reference embeddings:
# z_q: TensorShape([batch_size, sample_size, 1, n_outcome, n_dim]
# z_r: TensorShape([batch_size, sample_size, n_ref, n_outcome, n_dim]
z_q, z_r = tf.split(z, [1, max_n_reference], -3)
# The tf.split op does not infer split dimension shape. We know that
# z_q will always have shape=1, but we don't know `max_n_reference`
# ahead of time.
z_q.set_shape([None, None, 1, None, None])
# Pass through similarity kernel.
# TensorShape([batch_size, sample_size, n_ref, n_outcome])
if self._use_group['kernel']:
sim_qr = self.kernel([z_q, z_r, groups])
else:
sim_qr = self.kernel([z_q, z_r])
# Zero out similarities involving placeholder IDs by creating
# a mask based on reference indices. We drop the query indices
# because they have effectively been "consumed" by the similarity
# operation.
is_present = tf.cast(
tf.math.not_equal(stimulus_set[:, :, 1:], 0), K.floatx()
)
sim_qr = sim_qr * is_present
# Prepare for efficient probability computation by adding
# singleton dimension for `n_sample`.
is_select = tf.expand_dims(
tf.cast(is_select, K.floatx()), axis=1
)
# Determine if outcome is legitamate by checking if at least one
# reference is present. This is important because not all trials have
# the same number of possible outcomes and we need to infer the
# "zero-padding" of the outcome axis.
is_outcome = is_present[:, :, 0, :]
# Compute probability of different behavioral outcomes.
if self._use_group['behavior']:
probs = self.behavior([sim_qr, is_select, is_outcome, groups])
else:
probs = self.behavior([sim_qr, is_select, is_outcome])
return probs
def _ranked_sequence_probability(sim_qr, n_select):
"""Return probability of a ranked selection sequence.
Arguments:
sim_qr: A 3D tensor containing pairwise similarity values.
Each row (dimension 0) contains the similarity between
a trial's query stimulus and reference stimuli. The
tensor is arranged such that the first column
corresponds to the first selection in a sequence, and
the last column corresponds to the last selection
(dimension 1). The third dimension indicates
different samples.
shape = (n_trial, n_reference, n_sample)
n_select: Scalar indicating the number of selections made
by an agent.
Returns:
A 2D tensor of probabilities.
shape = (n_trial, n_sample)
Notes:
For example, given query Q and references A, B, and C, the
probability of selecting reference A then B (in that order)
would be:
P(A)P(B|A) = s_QA/(s_QA + s_QB + s_QC) * s_QB/(s_QB + s_QC)
where s_QA denotes the similarity between the query and
reference A.
The probability is computed by starting with the last
selection for efficiency and numerical stability. In the
provided example, this corresponds to first computing the
probability of selecting B second, given that A was
selected first.
"""
n_trial = sim_qr.shape[0]
n_sample = sim_qr.shape[2]
# Initialize.
seq_prob = np.ones((n_trial, n_sample), dtype=np.float64)
selected_idx = n_select - 1
denom = np.sum(sim_qr[:, selected_idx:, :], axis=1)
for i_selected in range(selected_idx, -1, -1):
# Compute selection probability.
prob = np.divide(sim_qr[:, i_selected], denom)
# Update sequence probability.
# seq_prob = np.multiply(seq_prob, prob)
seq_prob *= prob
# Update denominator in preparation for computing the probability
# of the previous selection in the sequence.
if i_selected > 0:
# denom = denom + sim_qr[:, i_selected-1, :]
denom += sim_qr[:, i_selected - 1, :]
return seq_prob
| 37.140704 | 78 | 0.630361 | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for Rank psychological embedding model.
Classes:
Rank: Class that uses ordinal observations that are anchored by a
designated query stimulus.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from psiz.keras.models.psych_embedding import PsychologicalEmbedding
import psiz.keras.layers
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.models', name='Rank'
)
class Rank(PsychologicalEmbedding):
"""Psychological embedding inferred from ranked similarity judgments.
Attributes:
See PsychologicalEmbedding.
"""
def __init__(self, behavior=None, **kwargs):
"""Initialize.
Arguments:
See PschologicalEmbedding.
Raises:
ValueError: If arguments are invalid.
"""
# Initialize behavioral component.
if behavior is None:
behavior = psiz.keras.layers.RankBehavior()
kwargs.update({'behavior': behavior})
super().__init__(**kwargs)
def call(self, inputs):
"""Call.
Arguments:
inputs: A dictionary of inputs:
stimulus_set: dtype=tf.int32, consisting of the
integers on the interval [0, n_stimuli[
shape=(batch_size, n_max_reference + 1, n_outcome)
is_select: dtype=tf.bool, the shape implies the
maximum number of selected stimuli in the data
shape=(batch_size, n_max_select, n_outcome)
groups: dtype=tf.int32, Integers indicating the
group membership of a trial.
shape=(batch_size, k)
"""
# Grab inputs.
stimulus_set = inputs['stimulus_set']
is_select = inputs['is_select'][:, 1:, :]
groups = inputs['groups']
# Define some useful variables before manipulating inputs.
max_n_reference = tf.shape(stimulus_set)[-2] - 1
# Repeat `stimulus_set` `n_sample` times in a newly inserted
# axis (axis=1).
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome])
stimulus_set = psiz.utils.expand_dim_repeat(
stimulus_set, self.n_sample, axis=1
)
# Enbed stimuli indices in n-dimensional space:
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome, n_dim])
if self._use_group['stimuli']:
z = self.stimuli([stimulus_set, groups])
else:
z = self.stimuli(stimulus_set)
# Split query and reference embeddings:
# z_q: TensorShape([batch_size, sample_size, 1, n_outcome, n_dim]
# z_r: TensorShape([batch_size, sample_size, n_ref, n_outcome, n_dim]
z_q, z_r = tf.split(z, [1, max_n_reference], -3)
# The tf.split op does not infer split dimension shape. We know that
# z_q will always have shape=1, but we don't know `max_n_reference`
# ahead of time.
z_q.set_shape([None, None, 1, None, None])
# Pass through similarity kernel.
# TensorShape([batch_size, sample_size, n_ref, n_outcome])
if self._use_group['kernel']:
sim_qr = self.kernel([z_q, z_r, groups])
else:
sim_qr = self.kernel([z_q, z_r])
# Zero out similarities involving placeholder IDs by creating
# a mask based on reference indices. We drop the query indices
# because they have effectively been "consumed" by the similarity
# operation.
is_present = tf.cast(
tf.math.not_equal(stimulus_set[:, :, 1:], 0), K.floatx()
)
sim_qr = sim_qr * is_present
# Prepare for efficient probability computation by adding
# singleton dimension for `n_sample`.
is_select = tf.expand_dims(
tf.cast(is_select, K.floatx()), axis=1
)
# Determine if outcome is legitamate by checking if at least one
# reference is present. This is important because not all trials have
# the same number of possible outcomes and we need to infer the
# "zero-padding" of the outcome axis.
is_outcome = is_present[:, :, 0, :]
# Compute probability of different behavioral outcomes.
if self._use_group['behavior']:
probs = self.behavior([sim_qr, is_select, is_outcome, groups])
else:
probs = self.behavior([sim_qr, is_select, is_outcome])
return probs
def _ranked_sequence_probability(sim_qr, n_select):
"""Return probability of a ranked selection sequence.
Arguments:
sim_qr: A 3D tensor containing pairwise similarity values.
Each row (dimension 0) contains the similarity between
a trial's query stimulus and reference stimuli. The
tensor is arranged such that the first column
corresponds to the first selection in a sequence, and
the last column corresponds to the last selection
(dimension 1). The third dimension indicates
different samples.
shape = (n_trial, n_reference, n_sample)
n_select: Scalar indicating the number of selections made
by an agent.
Returns:
A 2D tensor of probabilities.
shape = (n_trial, n_sample)
Notes:
For example, given query Q and references A, B, and C, the
probability of selecting reference A then B (in that order)
would be:
P(A)P(B|A) = s_QA/(s_QA + s_QB + s_QC) * s_QB/(s_QB + s_QC)
where s_QA denotes the similarity between the query and
reference A.
The probability is computed by starting with the last
selection for efficiency and numerical stability. In the
provided example, this corresponds to first computing the
probability of selecting B second, given that A was
selected first.
"""
n_trial = sim_qr.shape[0]
n_sample = sim_qr.shape[2]
# Initialize.
seq_prob = np.ones((n_trial, n_sample), dtype=np.float64)
selected_idx = n_select - 1
denom = np.sum(sim_qr[:, selected_idx:, :], axis=1)
for i_selected in range(selected_idx, -1, -1):
# Compute selection probability.
prob = np.divide(sim_qr[:, i_selected], denom)
# Update sequence probability.
# seq_prob = np.multiply(seq_prob, prob)
seq_prob *= prob
# Update denominator in preparation for computing the probability
# of the previous selection in the sequence.
if i_selected > 0:
# denom = denom + sim_qr[:, i_selected-1, :]
denom += sim_qr[:, i_selected - 1, :]
return seq_prob
| 0 | 0 | 0 |
68aa52e6e2687c10a112a827fe657534a910caed | 1,512 | py | Python | dalme_api/api/comments.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | null | null | null | dalme_api/api/comments.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | null | null | null | dalme_api/api/comments.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import viewsets
from rest_framework.response import Response
from dalme_api.serializers import CommentSerializer
from dalme_app.models import Comment
from dalme_api.access_policies import CommentAccessPolicy
from dalme_app.models import *
class Comments(viewsets.ModelViewSet):
""" API endpoint for managing comments """
permission_classes = (CommentAccessPolicy,)
queryset = Comment.objects.all()
serializer_class = CommentSerializer
| 37.8 | 100 | 0.638228 | from rest_framework import viewsets
from rest_framework.response import Response
from dalme_api.serializers import CommentSerializer
from dalme_app.models import Comment
from dalme_api.access_policies import CommentAccessPolicy
from dalme_app.models import *
class Comments(viewsets.ModelViewSet):
""" API endpoint for managing comments """
permission_classes = (CommentAccessPolicy,)
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def get_queryset(self, *args, **kwargs):
if self.request.GET.get('model') is not None and self.request.GET.get('object') is not None:
model = self.request.GET['model']
object = self.request.GET['object']
if type(object) is not str:
object = str(object)
obj_instance = eval(model+'.objects.get(pk="'+object+'")')
queryset = obj_instance.comments.all()
else:
queryset = self.queryset
return queryset
def create(self, request, *args, **kwargs):
result = {}
data = request.data
try:
content_object = eval(data['model']+'.objects.get(pk="'+str(data['object'])+'")')
new_comment = content_object.comments.create(body=data['body'])
serializer = self.get_serializer(new_comment)
result = serializer.data
status = 201
except Exception as e:
result = str(e)
status = 400
return Response(result, status)
| 985 | 0 | 54 |
56290136e3145217e8b5388646432b717f5350d7 | 7,850 | py | Python | post/plots.py | SeanChen0220/learnable-triangulation-pytorch | 780774442c78b8e43d36bbc054346188f95af835 | [
"MIT"
] | 1 | 2021-08-12T04:58:19.000Z | 2021-08-12T04:58:19.000Z | post/plots.py | SeanChen0220/learnable-triangulation-pytorch | 780774442c78b8e43d36bbc054346188f95af835 | [
"MIT"
] | 1 | 2021-08-13T19:25:41.000Z | 2021-08-16T14:56:00.000Z | post/plots.py | SeanChen0220/learnable-triangulation-pytorch | 780774442c78b8e43d36bbc054346188f95af835 | [
"MIT"
] | 1 | 2021-08-13T11:15:37.000Z | 2021-08-13T11:15:37.000Z | import numpy as np
from matplotlib import pyplot as plt
from mvn.utils.misc import find_min, drop_na
| 28.136201 | 163 | 0.52242 | import numpy as np
from matplotlib import pyplot as plt
from mvn.utils.misc import find_min, drop_na
def plot_SOTA(axis, _xrange):
# show original paper results
diff = _xrange[-1] - _xrange[0]
axis.hlines(
21.3, xmin=_xrange[0] + diff / 2, xmax=_xrange[-1],
color='blue', linestyle=':', label='algebraic SOTA = 21.3'
)
# I'm using algebraic so volumetric SOTA may be misleading
# axis.hlines(
# 13.7, xmin=_xrange[0], xmax=_xrange[-1],
# color='green', linestyle=':', label='volumetric (softmax) SOTA = 13.7'
# )
def plot_stuff(axis, stuff, label, xrange=None, ylim=None, color='black', alpha=1.0, legend_loc=None, show_min=False, marker=',', linestyle='solid', verbose=True):
if xrange is None:
done = len(stuff)
xrange = list(range(done))
if len(xrange) > len(stuff):
xrange = xrange[:len(stuff)]
axis.plot(
xrange, stuff, label=label, color=color,
marker=marker, markersize=5, linestyle=linestyle,
alpha=alpha
)
if show_min:
m, m_i = find_min(stuff)
axis.plot(
xrange[m_i],
m,
marker='x', color='r', markersize=20,
label='min = {:.1f}'.format(m)
)
if ylim:
axis.set_ylim(ylim)
if legend_loc:
axis.legend(loc=legend_loc)
if verbose:
print('- plotted "{}" metrics [{:.1f}, {:.1f}] in epochs [{:.0f}, {:.0f}]'.format(
label,
np.min(drop_na(stuff)), np.max(drop_na(stuff)),
xrange[0], xrange[-1]
))
return xrange
def plot_loss(axis, loss_history, label, xrange, color):
legend_loc = 'lower left'
plot_stuff(
axis,
loss_history,
label=label,
xrange=xrange,
color=color,
alpha=0.9 if 'total' in label else 0.4,
legend_loc=legend_loc,
show_min=False,
marker='o',
linestyle='dashed',
verbose=False
)
def plot_losses(axis, epochs, xrange, normalize_loss=None, title=None, xlabel='# epoch'):
loss_keys = list(filter(
lambda x: 'loss / batch' in x,
epochs[0].keys()
))
n_max_losses = 10
colors = plt.get_cmap('jet')(np.linspace(0, 1, n_max_losses))
loss_plotters = {
'total loss / batch': {
'color': colors[0],
'scaler': 1e-2,
'show': False,
},
'R loss / batch': {
'color': colors[1],
'scaler': 3e-1,
'show': False,
},
't loss / batch': {
'color': colors[2],
'scaler': 1e0,
'show': True,
},
'proj loss / batch': {
'color': colors[3],
'scaler': 5e-2,
'show': False,
},
'world loss / batch': {
'color': colors[4],
'scaler': 1e-2,
'show': False,
},
'self cam loss / batch': {
'color': colors[5],
'scaler': 5e-1,
'show': False,
},
'self proj loss / batch': {
'color': colors[7], # colors[6] is yellow ...
'scaler': 5e0,
'show': True,
},
'self world loss / batch': {
'color': colors[8],
'scaler': 1e-1,
'show': True,
},
'world struct loss / batch': {
'color': colors[9],
'scaler': 1e-3,
'show': True,
}
}
for key in loss_keys:
if key in epochs[0]: # be sure to plot something that exists, we are not in QM
if key in loss_plotters and loss_plotters[key]['show']:
loss_history = np.float64([
np.mean(epoch[key])
for epoch in epochs
])
nan = np.mean(drop_na(loss_history))
loss_history = np.nan_to_num(loss_history, nan=nan)
if np.mean(loss_history) > 1e-4: # non-trivial losses
_min, _max = np.min(drop_na(loss_history)), np.max(drop_na(loss_history))
_last = loss_history[-1]
label = '{} = {:.1f} [{:.1f}, {:.1f}]'.format(
key.replace('loss / batch', '').strip(),
_last, _min, _max
)
scaler = loss_plotters[key]['scaler']
plot_loss(
axis,
loss_history * scaler,
label,
xrange,
loss_plotters[key]['color'],
)
axis.set_xlim([xrange[0], xrange[-1]])
axis.set_xlabel(xlabel)
if title:
axis.set_title(title)
label = '{}loss'.format(
'[{:.1f}, {:.1f}]-normalized '.format(
normalize_loss[0], normalize_loss[1]
) if normalize_loss else ''
)
axis.set_ylabel(label)
def plot_metrics(axis, epochs, xrange, train_metric_ylim=[0, 1], eval_metric_ylim=[0, 1], metric_ylabel=None, with_SOTA=False):
legend_loc = 'upper right'
marker = ','
metrics = np.float64(list(map(lambda x: x['training metrics (rel)'], epochs)))
label = 'train rel MPJPE = {:.0f}'.format(metrics[-1])
if 'training metrics (abs)' in epochs[-1]:
last_abs_metrics = np.float64(epochs[-1]['training metrics (abs)'])
else:
last_abs_metrics = None
# maybe pelvis is in origin ...
if last_abs_metrics and abs(last_abs_metrics - metrics[-1]) > 1:
label += ', abs MPJPE = {:.0f}'.format(
last_abs_metrics
)
plot_stuff(
axis,
metrics,
label,
xrange=xrange,
ylim=train_metric_ylim,
color='aquamarine',
alpha=1.0,
legend_loc=legend_loc,
show_min=True,
marker=marker,
verbose=False
)
metrics = np.float64(list(map(lambda x: x['eval metrics (rel)'], epochs)))
label = 'eval rel MPJPE = {:.0f}'.format(metrics[-1])
if 'eval metrics (abs)' in epochs[-1]:
last_abs_metrics = np.float64(epochs[-1]['eval metrics (abs)'])
else:
last_abs_metrics = None
# mabe pelvis is in origin ...
if last_abs_metrics and abs(last_abs_metrics - metrics[-1]) > 1:
label += ', abs MPJPE = {:.0f}'.format(
last_abs_metrics
)
plot_stuff(
axis,
metrics,
label,
xrange=xrange,
ylim=eval_metric_ylim,
color='blue',
alpha=1.0,
legend_loc=legend_loc,
show_min=True,
marker=marker,
verbose=False
)
plot_SOTA(axis, [xrange[0], xrange[-1]])
axis.legend(loc=legend_loc)
axis.set_xlim([xrange[0], xrange[-1]])
axis.set_ylabel(metric_ylabel)
def plot_lr(axis, lr_reductions, batch_amount_per_epoch=8):
for lr_reduction in lr_reductions:
epoch = lr_reduction['epoch']
batch_its = epoch * batch_amount_per_epoch
new_lr = lr_reduction['lr']
axis.vlines(
x=batch_its,
ymin=0, ymax=1e2,
label='new lr: {:.3E}'.format(new_lr),
color='magenta',
linestyle=':',
alpha=0.5
)
def make_axis_great_again(ax, xlim=None, ylim=None, hide_y=False):
if ylim:
ax.set_ylim(ylim)
if not (xlim is None):
xlim = [xlim[0], xlim[-1]]
ax.set_xlim(xlim)
if hide_y:
ax.yaxis.set_ticks([])
def get_figsize(n_rows, n_cols, row_size=8, column_size=24):
return (n_cols * column_size, n_rows * row_size)
def get_figa(n_rows, n_cols, heigth=8, width=24):
fig_size = get_figsize(n_rows, n_cols, row_size=heigth, column_size=width)
fig, ax = plt.subplots(n_rows, n_cols, figsize=fig_size)
return fig, ax
| 7,532 | 0 | 207 |
78f699b89fb6cd8f93abd36f06dc3a19458651d3 | 1,562 | py | Python | src/genie/libs/parser/nxos/tests/ShowIpMrouteSummary/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/nxos/tests/ShowIpMrouteSummary/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/nxos/tests/ShowIpMrouteSummary/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
'vrf': {
'vxlan-1009': {
'address_family': {
'ipv4': {
'count_multicast_starg': 11,
'count_multicast_sg': 18,
'count_multicast_total': 29,
'count_multicast_starg_prefix': 0,
'group_count': 18,
'avg_source_per_group': 1.0,
'groups': {
'225.1.1.17/32': {
'source_count': 1,
'source': {
'(*,G)': {
'packets': 6,
'bytes': 636,
'aps': 106,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 1,
},
'1.1.91.67': {
'packets': 145,
'bytes': 7505,
'aps': 51,
'pps': 0,
'bitrate': 27.200,
'bitrate_unit': 'bps',
'oifs': 1,
}
}
}
}
}
}
}
}
}
| 37.190476 | 58 | 0.213188 | expected_output = {
'vrf': {
'vxlan-1009': {
'address_family': {
'ipv4': {
'count_multicast_starg': 11,
'count_multicast_sg': 18,
'count_multicast_total': 29,
'count_multicast_starg_prefix': 0,
'group_count': 18,
'avg_source_per_group': 1.0,
'groups': {
'225.1.1.17/32': {
'source_count': 1,
'source': {
'(*,G)': {
'packets': 6,
'bytes': 636,
'aps': 106,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 1,
},
'1.1.91.67': {
'packets': 145,
'bytes': 7505,
'aps': 51,
'pps': 0,
'bitrate': 27.200,
'bitrate_unit': 'bps',
'oifs': 1,
}
}
}
}
}
}
}
}
}
| 0 | 0 | 0 |
3844b3c1b40266802a6219987e07592e0f5ba9df | 3,908 | py | Python | src/sort_envs.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 17 | 2020-12-28T16:25:47.000Z | 2022-03-27T18:28:44.000Z | src/sort_envs.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 2 | 2021-04-18T03:40:02.000Z | 2022-01-24T08:40:10.000Z | src/sort_envs.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 8 | 2020-12-23T05:59:52.000Z | 2022-03-28T12:06:35.000Z | # Generic imports
import os, sys, glob, shutil
import numpy as np
# Find the number of envs
main_dir = '.'
envs = [f.path for f in os.scandir(main_dir) if f.is_dir()]
# Process names
tmp = []
for env in envs:
env = env[2:]
if (env[0:3] == 'env'):
tmp.append(env)
# Printing
envs = tmp
print('I found ',str(len(envs)),' environments')
# Create final dirs if necessary
path = 'sorted_envs'
png_path = path+'/png'
csv_path = path+'/csv'
sol_path = path+'/sol'
best_path = path+'/best'
if (not os.path.isdir(path)):
os.mkdir(path)
if (not os.path.isdir(png_path)):
os.mkdir(png_path)
if (not os.path.isdir(csv_path)):
os.mkdir(csv_path)
if (not os.path.isdir(sol_path)):
os.mkdir(sol_path)
if (not os.path.isdir(best_path)):
os.mkdir(best_path)
# Read env contents
n_outputs = 10
looping = True
glb_index = 1
loc_index = 0
ring_size = 250
ring_buffer = np.zeros([ring_size])
ring_index = 0
avg_rew = 0.0
avg_reward = []
reward = []
# Loop until no more shapes can be found
while looping:
# Copy loc index to check if loop must be stopped
loc_index_cp = loc_index
# Loop over envs
for env in envs:
img = env+'/save/png/shape_'+str(glb_index)+'.png'
csv = env+'/save/csv/shape_'+str(glb_index)+'.csv'
sol = env+'/save/sol/'+str(glb_index)+'.png'
sol_u = env+'/save/sol/'+str(glb_index)+'_u.png'
sol_v = env+'/save/sol/'+str(glb_index)+'_v.png'
sol_p = env+'/save/sol/'+str(glb_index)+'_p.png'
# If files exists, copy
if os.path.isfile(img):
shutil.copy(img, png_path+'/'+str(loc_index)+'.png')
if os.path.isfile(csv):
shutil.copy(csv, csv_path+'/'+str(loc_index)+'.csv')
if os.path.isfile(sol_u):
shutil.copy(sol_u, sol_path+'/'+str(loc_index)+'_u.png')
if os.path.isfile(sol_v):
shutil.copy(sol_v, sol_path+'/'+str(loc_index)+'_v.png')
if os.path.isfile(sol_p):
shutil.copy(sol_p, sol_path+'/'+str(loc_index)+'_p.png')
if os.path.isfile(sol):
shutil.copy(sol, sol_path+'/'+str(loc_index)+'.png')
# All the following is done only if computation ended well
# Store reward and check max reward
filename = env+'/save/reward_penalization'
line = None
with open(filename) as f:
line = f.read().split('\n')[glb_index-1]
line = line.split(' ')
# Handle reward
if (len(line)>1):
# Retrieve and store reward
rew = float(line[1])
ring_buffer[ring_index] = rew
# Compute new average
avg_rew = np.sum(ring_buffer)/ring_size
avg_reward.append(avg_rew)
reward.append(rew)
# Update ring buffer index
ring_index += 1
if (ring_index == ring_size): ring_index = 0
# Update index
loc_index += 1
# Stop looping if index has not changed
if (loc_index == loc_index_cp):
looping = False
# Update global index
glb_index += 1
# Sort reward
sort_rew = np.argsort(-1.0*np.asarray(reward))
# Write reward to file
filename = path+'/reward'
with open(filename, 'w') as f:
for i in range(len(reward)):
f.write(str(i)+' ')
f.write(str(reward[i])+' ')
f.write(str(avg_reward[i]))
f.write('\n')
# Copy best solutions
for i in range(n_outputs):
img = png_path+'/'+str(sort_rew[i])+'.png'
if os.path.isfile(img):
shutil.copy(img, best_path+'/.')
# Printing
print('I found '+str(loc_index)+' shapes in total')
print('Best rewards are:')
for i in range(n_outputs):
print(' '+str(reward[sort_rew[i]])+' for shape '+str(sort_rew[i]))
| 29.164179 | 87 | 0.572416 | # Generic imports
import os, sys, glob, shutil
import numpy as np
# Find the number of envs
main_dir = '.'
envs = [f.path for f in os.scandir(main_dir) if f.is_dir()]
# Process names
tmp = []
for env in envs:
env = env[2:]
if (env[0:3] == 'env'):
tmp.append(env)
# Printing
envs = tmp
print('I found ',str(len(envs)),' environments')
# Create final dirs if necessary
path = 'sorted_envs'
png_path = path+'/png'
csv_path = path+'/csv'
sol_path = path+'/sol'
best_path = path+'/best'
if (not os.path.isdir(path)):
os.mkdir(path)
if (not os.path.isdir(png_path)):
os.mkdir(png_path)
if (not os.path.isdir(csv_path)):
os.mkdir(csv_path)
if (not os.path.isdir(sol_path)):
os.mkdir(sol_path)
if (not os.path.isdir(best_path)):
os.mkdir(best_path)
# Read env contents
n_outputs = 10
looping = True
glb_index = 1
loc_index = 0
ring_size = 250
ring_buffer = np.zeros([ring_size])
ring_index = 0
avg_rew = 0.0
avg_reward = []
reward = []
# Loop until no more shapes can be found
while looping:
# Copy loc index to check if loop must be stopped
loc_index_cp = loc_index
# Loop over envs
for env in envs:
img = env+'/save/png/shape_'+str(glb_index)+'.png'
csv = env+'/save/csv/shape_'+str(glb_index)+'.csv'
sol = env+'/save/sol/'+str(glb_index)+'.png'
sol_u = env+'/save/sol/'+str(glb_index)+'_u.png'
sol_v = env+'/save/sol/'+str(glb_index)+'_v.png'
sol_p = env+'/save/sol/'+str(glb_index)+'_p.png'
# If files exists, copy
if os.path.isfile(img):
shutil.copy(img, png_path+'/'+str(loc_index)+'.png')
if os.path.isfile(csv):
shutil.copy(csv, csv_path+'/'+str(loc_index)+'.csv')
if os.path.isfile(sol_u):
shutil.copy(sol_u, sol_path+'/'+str(loc_index)+'_u.png')
if os.path.isfile(sol_v):
shutil.copy(sol_v, sol_path+'/'+str(loc_index)+'_v.png')
if os.path.isfile(sol_p):
shutil.copy(sol_p, sol_path+'/'+str(loc_index)+'_p.png')
if os.path.isfile(sol):
shutil.copy(sol, sol_path+'/'+str(loc_index)+'.png')
# All the following is done only if computation ended well
# Store reward and check max reward
filename = env+'/save/reward_penalization'
line = None
with open(filename) as f:
line = f.read().split('\n')[glb_index-1]
line = line.split(' ')
# Handle reward
if (len(line)>1):
# Retrieve and store reward
rew = float(line[1])
ring_buffer[ring_index] = rew
# Compute new average
avg_rew = np.sum(ring_buffer)/ring_size
avg_reward.append(avg_rew)
reward.append(rew)
# Update ring buffer index
ring_index += 1
if (ring_index == ring_size): ring_index = 0
# Update index
loc_index += 1
# Stop looping if index has not changed
if (loc_index == loc_index_cp):
looping = False
# Update global index
glb_index += 1
# Sort reward
sort_rew = np.argsort(-1.0*np.asarray(reward))
# Write reward to file
filename = path+'/reward'
with open(filename, 'w') as f:
for i in range(len(reward)):
f.write(str(i)+' ')
f.write(str(reward[i])+' ')
f.write(str(avg_reward[i]))
f.write('\n')
# Copy best solutions
for i in range(n_outputs):
img = png_path+'/'+str(sort_rew[i])+'.png'
if os.path.isfile(img):
shutil.copy(img, best_path+'/.')
# Printing
print('I found '+str(loc_index)+' shapes in total')
print('Best rewards are:')
for i in range(n_outputs):
print(' '+str(reward[sort_rew[i]])+' for shape '+str(sort_rew[i]))
| 0 | 0 | 0 |
bd275604aedcbd3ea8eb61bb8e604d60e13461fb | 1,612 | py | Python | formats.py | alspitz/esc_test | 4078609e3a84923b9cdbfdea8daf9814364fa9c8 | [
"MIT"
] | 1 | 2021-12-28T08:38:04.000Z | 2021-12-28T08:38:04.000Z | formats.py | alspitz/esc_test | 4078609e3a84923b9cdbfdea8daf9814364fa9c8 | [
"MIT"
] | null | null | null | formats.py | alspitz/esc_test | 4078609e3a84923b9cdbfdea8daf9814364fa9c8 | [
"MIT"
] | null | null | null | import datetime
import os
import numpy as np
from quantities import *
gf2N = 9.80665 / 1000
dirpath = os.path.dirname(os.path.realpath(__file__))
| 22.704225 | 125 | 0.636476 | import datetime
import os
import numpy as np
from quantities import *
gf2N = 9.80665 / 1000
dirpath = os.path.dirname(os.path.realpath(__file__))
class Source:
def __init__(self, filename):
self.filename = filename
def __str__(self):
return self.__class__.__name__
class CSVSource(Source):
def read(self):
mat = np.genfromtxt(os.path.join(self.base_dir, self.filename), delimiter=',', skip_header=1, converters=self.converters)
self.data = { quant : mat[:, index] for (quant, index) in self.quantity_map.items() }
# Convert thrust from grams to Newtons
if THRUST in self.data:
self.data[THRUST] *= gf2N
# Ensure torque is always positive.
if TORQUE in self.data:
self.data[TORQUE] = np.abs(self.data[TORQUE])
class AutoQuadECU(CSVSource):
base_dir = os.path.join(dirpath, "data", "autoquad")
converters = {0 : lambda x: datetime.datetime.strptime(x.decode('utf-8'), '%Y-%m-%d %H:%M:%S:%f').timestamp()}
display_name = "ESC"
plot_style = '-'
quantity_map = {
TIME : 0,
CURRENT : 1,
VOLTAGE : 2,
MOTOR_VOLTAGE : 3,
RPM : 4,
DUTY : 5,
COMM_PERIOD : 6
}
class RCBenchmark(CSVSource):
base_dir = os.path.join(dirpath, "data", "rcbench")
converters = {}
display_name = "RCBench"
plot_style = 'o-'
quantity_map = {
TIME : 0,
ACC_X : 5,
ACC_Y : 6,
ACC_Z : 7,
TORQUE : 8,
THRUST : 9,
VOLTAGE : 10,
CURRENT : 11,
RPM : 12,
ELECTRICAL_POWER : 14,
MECHANICAL_POWER : 15,
MOTOR_EFFICIENCY : 16,
PROP_MECH_EFF : 17,
OVERALL_EFF : 18,
VIBRATION : 19
}
| 504 | 793 | 165 |
b9f1d76a72d34b1a60e1eecfdda0647412221cd0 | 1,337 | py | Python | path_thin.py | ChainBreak/line_drawing | 2c999083f0b530a839b2588e75e7f5fd17ab3a5d | [
"MIT"
] | 1 | 2020-02-16T11:33:47.000Z | 2020-02-16T11:33:47.000Z | path_thin.py | ChainBreak/line_drawing | 2c999083f0b530a839b2588e75e7f5fd17ab3a5d | [
"MIT"
] | null | null | null | path_thin.py | ChainBreak/line_drawing | 2c999083f0b530a839b2588e75e7f5fd17ab3a5d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
if __name__ == "__main__":
print("Hello There!")
| 20.257576 | 55 | 0.481675 | #!/usr/bin/env python3
import math
def thin(path_list):
for path in path_list:
l = len(path)
if l > 3:
old_path = path.copy()
path.clear()
p0 = old_path[0]
p2 = old_path[1]
path.append(p0)
for i in range(2,l):
p1 = p2
p2 = old_path[i]
#vector betweem points p0 and p1
v0 = [ p1[0]-p0[0], p1[1]-p0[1] ]
#vector betweem points p1 and p2
v1 = [ p2[0]-p1[0], p2[1]-p1[1] ]
angle = cosine_angle(v0,v1)
angle_deg = math.degrees(angle)
if angle_deg > 10.0:
path.append(p1)
p0 = p1
path.append(p2)
return path_list
def cosine_angle(v0,v1):
#calculate the lenght of vector one
l0 = math.sqrt( v0[0]**2 + v0[1]**2)
#calculate the lenght of vector two
l1 = math.sqrt( v1[0]**2 + v1[1]**2)
#normalise vector one
v0[0] /= l0
v0[1] /= l0
#normalise vector two
v1[0] /= l1
v1[1] /= l1
#do the dot product
dot = v0[0]*v1[0] + v0[1]*v1[1]
#calculate the angle between the vectors in radians
angle = math.acos(dot)
return angle
if __name__ == "__main__":
print("Hello There!")
| 1,197 | 0 | 46 |
def8b64ed9f1e43acb27abd8c1cc1537f9aa27ff | 545 | py | Python | dashcollege/src/pool/urls.py | Akash-Sharma-1/dashcollege | 02832c2104637e32f5133da9d16b73bf7ba16c0a | [
"MIT"
] | null | null | null | dashcollege/src/pool/urls.py | Akash-Sharma-1/dashcollege | 02832c2104637e32f5133da9d16b73bf7ba16c0a | [
"MIT"
] | null | null | null | dashcollege/src/pool/urls.py | Akash-Sharma-1/dashcollege | 02832c2104637e32f5133da9d16b73bf7ba16c0a | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from django.conf.urls import url
app_name = 'pool'
urlpatterns = [
path('pool/', views.Pool.as_view(), name='pool'),
path('pool/cab/', views.PoolCab.as_view(), name='cab'),
path('pool/food/', views.PoolFood.as_view(), name='food'),
path('pool/others/', views.PoolMisc.as_view(), name='others'),
path('store/', views.Store.as_view(), name='store'),
path('find/', views.Find.as_view(), name='find'),
# path('resource', views.PoolResource.as_view(), name='edit_self'),
] | 38.928571 | 71 | 0.656881 | from django.urls import path
from . import views
from django.conf.urls import url
app_name = 'pool'
urlpatterns = [
path('pool/', views.Pool.as_view(), name='pool'),
path('pool/cab/', views.PoolCab.as_view(), name='cab'),
path('pool/food/', views.PoolFood.as_view(), name='food'),
path('pool/others/', views.PoolMisc.as_view(), name='others'),
path('store/', views.Store.as_view(), name='store'),
path('find/', views.Find.as_view(), name='find'),
# path('resource', views.PoolResource.as_view(), name='edit_self'),
] | 0 | 0 | 0 |
65a040bacaf5d53253989daaf3c527fbf1393b4b | 2,382 | py | Python | simulator/pybullet/manipulator_main.py | junhyeokahn/ASE389 | a57d668f968da1db56f0dfe8dadad548ad631f33 | [
"MIT"
] | null | null | null | simulator/pybullet/manipulator_main.py | junhyeokahn/ASE389 | a57d668f968da1db56f0dfe8dadad548ad631f33 | [
"MIT"
] | null | null | null | simulator/pybullet/manipulator_main.py | junhyeokahn/ASE389 | a57d668f968da1db56f0dfe8dadad548ad631f33 | [
"MIT"
] | 3 | 2021-02-05T06:59:43.000Z | 2021-05-11T20:14:02.000Z | import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import time, math
from collections import OrderedDict
import pybullet as p
import numpy as np
from util import pybullet_util
from config.manipulator_config import ManipulatorConfig
from pnc.manipulator_pnc.manipulator_interface import ManipulatorInterface
if __name__ == "__main__":
# Environment Setup
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=4.0,
cameraYaw=0,
cameraPitch=-45,
cameraTargetPosition=[1.5, 0., 0.])
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(fixedTimeStep=ManipulatorConfig.DT,
numSubSteps=ManipulatorConfig.N_SUBSTEP)
if ManipulatorConfig.VIDEO_RECORD:
if not os.path.exists('video'):
os.makedirs('video')
for f in os.listdir('video'):
os.remove('video/' + f)
p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, "video/atlas.mp4")
# Create Robot, Ground
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
robot = p.loadURDF(cwd +
"/robot_model/manipulator/three_link_manipulator.urdf",
useFixedBase=True)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
nq, nv, na, joint_id, link_id, pos_basejoint_to_basecom, rot_basejoint_to_basecom = pybullet_util.get_robot_config(
robot)
# Set Initial Config
p.resetJointState(robot, 0, -np.pi / 6., 0.)
p.resetJointState(robot, 1, np.pi / 6., 0.)
p.resetJointState(robot, 2, np.pi / 3., 0.)
# Joint Friction
pybullet_util.set_joint_friction(robot, joint_id, 0.1)
# Construct Interface
interface = ManipulatorInterface()
# Run Sim
t = 0
dt = ManipulatorConfig.DT
count = 0
while (1):
# Get SensorData
sensor_data = pybullet_util.get_sensor_data(robot, joint_id, link_id,
pos_basejoint_to_basecom,
rot_basejoint_to_basecom)
# Compute Command
command = interface.get_command(sensor_data)
# Apply Trq
pybullet_util.set_motor_trq(robot, joint_id, command)
p.stepSimulation()
time.sleep(dt)
t += dt
count += 1
| 31.342105 | 119 | 0.619228 | import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import time, math
from collections import OrderedDict
import pybullet as p
import numpy as np
from util import pybullet_util
from config.manipulator_config import ManipulatorConfig
from pnc.manipulator_pnc.manipulator_interface import ManipulatorInterface
if __name__ == "__main__":
# Environment Setup
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=4.0,
cameraYaw=0,
cameraPitch=-45,
cameraTargetPosition=[1.5, 0., 0.])
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(fixedTimeStep=ManipulatorConfig.DT,
numSubSteps=ManipulatorConfig.N_SUBSTEP)
if ManipulatorConfig.VIDEO_RECORD:
if not os.path.exists('video'):
os.makedirs('video')
for f in os.listdir('video'):
os.remove('video/' + f)
p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, "video/atlas.mp4")
# Create Robot, Ground
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
robot = p.loadURDF(cwd +
"/robot_model/manipulator/three_link_manipulator.urdf",
useFixedBase=True)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
nq, nv, na, joint_id, link_id, pos_basejoint_to_basecom, rot_basejoint_to_basecom = pybullet_util.get_robot_config(
robot)
# Set Initial Config
p.resetJointState(robot, 0, -np.pi / 6., 0.)
p.resetJointState(robot, 1, np.pi / 6., 0.)
p.resetJointState(robot, 2, np.pi / 3., 0.)
# Joint Friction
pybullet_util.set_joint_friction(robot, joint_id, 0.1)
# Construct Interface
interface = ManipulatorInterface()
# Run Sim
t = 0
dt = ManipulatorConfig.DT
count = 0
while (1):
# Get SensorData
sensor_data = pybullet_util.get_sensor_data(robot, joint_id, link_id,
pos_basejoint_to_basecom,
rot_basejoint_to_basecom)
# Compute Command
command = interface.get_command(sensor_data)
# Apply Trq
pybullet_util.set_motor_trq(robot, joint_id, command)
p.stepSimulation()
time.sleep(dt)
t += dt
count += 1
| 0 | 0 | 0 |
e6fe1fccd65a5ef935d782807aa32e17cc47c4f3 | 4,972 | py | Python | src/DioGUI.py | daign/daign-image-organizer | 4ce69681d01260594b7156b3ed944dc386244ae2 | [
"MIT"
] | null | null | null | src/DioGUI.py | daign/daign-image-organizer | 4ce69681d01260594b7156b3ed944dc386244ae2 | [
"MIT"
] | null | null | null | src/DioGUI.py | daign/daign-image-organizer | 4ce69681d01260594b7156b3ed944dc386244ae2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from PyQt4 import QtGui
from PyQt4 import QtCore
import Database
from DioView import DioView
from DioDetails import DioDetails
from DioScanDialog import DioScanDialog
| 26.588235 | 95 | 0.734714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from PyQt4 import QtGui
from PyQt4 import QtCore
import Database
from DioView import DioView
from DioDetails import DioDetails
from DioScanDialog import DioScanDialog
class DioGUI( QtGui.QSplitter ):
def __init__( self, parent = None ):
QtGui.QSplitter.__init__( self, parent )
self.setWindowTitle( 'Daign Image Organizer' )
self.selected_range = []
self.selected_image = None
Database.create_database()
# View
self.view = DioView( self )
# Controls
self.scan_dialog = DioScanDialog( None )
scan_button = QtGui.QPushButton( 'Scan Folders', self )
scan_button.clicked.connect( self.scan_dialog.show )
search_tags_button = QtGui.QPushButton( 'Load Tags', self )
search_tags_button.clicked.connect( self.load_tags )
self.search_tags_list = QtGui.QListWidget( self )
search_names_button = QtGui.QPushButton( 'Load Names', self )
search_names_button.clicked.connect( self.load_names )
self.search_names_list = QtGui.QListWidget( self )
search_stars_label = QtGui.QLabel( 'Stars' )
self.search_stars_from_input = QtGui.QSpinBox( self )
self.search_stars_from_input.setRange( 0, 7 )
self.search_stars_from_input.setValue( 0 )
search_stars_to_label = QtGui.QLabel( 'to' )
self.search_stars_to_input = QtGui.QSpinBox( self )
self.search_stars_to_input.setRange( 0, 7 )
self.search_stars_to_input.setValue( 7 )
search_stars_layout = QtGui.QHBoxLayout()
search_stars_layout.addWidget( search_stars_label )
search_stars_layout.addWidget( self.search_stars_from_input )
search_stars_layout.addWidget( search_stars_to_label )
search_stars_layout.addWidget( self.search_stars_to_input )
search_stars_layout.addStretch( 1 )
search_stars_widget = QtGui.QWidget( self )
search_stars_widget.setLayout( search_stars_layout )
show_random_button = QtGui.QPushButton( 'Random Image', self )
show_random_button.clicked.connect( self.show_random_image )
show_all_button = QtGui.QPushButton( 'All Images', self )
show_all_button.clicked.connect( self.show_all_images )
search_box = QtGui.QGroupBox( 'Image Search', self )
search_grid = QtGui.QGridLayout()
search_grid.addWidget( search_tags_button, 0, 0, 1, 1 )
search_grid.addWidget( search_names_button, 0, 1, 1, 1 )
search_grid.addWidget( self.search_tags_list, 1, 0, 1, 1 )
search_grid.addWidget( self.search_names_list, 1, 1, 1, 1 )
search_grid.addWidget( search_stars_widget, 2, 0, 1, 2 )
search_grid.addWidget( show_random_button, 3, 0, 1, 1 )
search_grid.addWidget( show_all_button, 3, 1, 1, 1 )
search_box.setLayout( search_grid )
self.details = DioDetails( self )
controls_layout = QtGui.QVBoxLayout()
controls_layout.addWidget( scan_button, 1 )
controls_layout.addWidget( search_box, 2 )
controls_layout.addWidget( self.details, 1 )
controls_widget = QtGui.QWidget( self )
controls_widget.setLayout( controls_layout )
self.addWidget( self.view )
self.addWidget( controls_widget )
self.setSizes( [ 600, 200 ] )
def load_tags( self ):
tags = Database.get_all_tags()
self.search_tags_list.clear()
if tags is not None:
self.search_tags_list.addItem( '' )
for t in tags:
self.search_tags_list.addItem( t )
def load_names( self ):
names = Database.get_all_names()
self.search_names_list.clear()
if names is not None:
self.search_names_list.addItem( '' )
for n in names:
self.search_names_list.addItem( n )
def get_filtered_selection( self ):
tag = self.search_tags_list.currentItem()
if tag is not None:
tag = str( tag.text() )
if len( tag ) == 0:
tag = None
name = self.search_names_list.currentItem()
if name is not None:
name = str( name.text() )
if len( name ) == 0:
name = None
stars_from = self.search_stars_from_input.value()
stars_to = self.search_stars_to_input.value()
self.set_selected_range( Database.get_filtered_selection( tag, name, stars_from, stars_to ) )
def set_selected_range( self, new_range ):
self.selected_range = new_range
self.view.list_needs_update = True
def show_all_images( self ):
self.get_filtered_selection()
self.selected_image = None
self.show_list()
def show_random_image( self ):
self.get_filtered_selection()
if len( self.selected_range ) > 0:
hash_md5 = random.choice( self.selected_range )
self.show_image( hash_md5 )
else:
self.selected_image = None
self.details.show_text( 'Found Nothing' )
self.view.show_text( 'Found Nothing' )
def show_list( self ):
if len( self.selected_range ) > 0:
self.details.show_list_details( self.selected_range )
self.view.show_list( self.selected_range )
else:
self.details.show_text( 'Found Nothing' )
self.view.show_text( 'Found Nothing' )
def show_image( self, hash_md5 ):
self.selected_image = hash_md5
self.details.show_image_details( hash_md5 )
self.view.show_image( hash_md5 )
| 4,482 | 11 | 239 |
c964fcb822ead37433ac5cf5759b2dcade6d3081 | 1,919 | py | Python | Tool/music_img_getter.py | bosssu/LocalMusicPlayerVR | 0147c1300bb3e2a35c0ba46e628fd25963801b5c | [
"MIT"
] | 2 | 2019-11-01T11:42:51.000Z | 2020-03-10T07:24:51.000Z | Tool/music_img_getter.py | bosssu/LocalMusicPlayerVR | 0147c1300bb3e2a35c0ba46e628fd25963801b5c | [
"MIT"
] | null | null | null | Tool/music_img_getter.py | bosssu/LocalMusicPlayerVR | 0147c1300bb3e2a35c0ba46e628fd25963801b5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import mutagen
import os
import json
current_dir = os.getcwd()
fileNameList_all = os.listdir(current_dir)
fileNameList_shouldUse = []
failelist = []
success_amount = 0 #https://www.jianshu.com/p/53cf61220828 感谢这位老哥的帖子
#提取同目录下所有mp3的封面图和一些其他信息
if(len(fileNameList_all) > 0):
for filename in fileNameList_all:
if(filename.endswith('.json' or filename.endswith('.jpg'))):
os.remove(filename)
if(len(fileNameList_all)>0):
for temp_name in fileNameList_all:
if str(temp_name).endswith(".mp3"):
fileNameList_shouldUse.append(temp_name)
# print('should process music file: ' + fileNameList_shouldUse)
if(len(fileNameList_shouldUse) > 0):
for temp_name in fileNameList_shouldUse:
AudioFileAssetsExport(temp_name)
print('------------------------------------------------------------------------------------------------------------------------------')
print('---------extract img success: ' + str(success_amount) + 'extract img fail: ' + str(len(fileNameList_shouldUse) - success_amount) + '----------------')
print(failelist)
os.system('pause')
| 30.460317 | 157 | 0.596144 | # -*- coding: utf-8 -*-
import mutagen
import os
import json
current_dir = os.getcwd()
fileNameList_all = os.listdir(current_dir)
fileNameList_shouldUse = []
failelist = []
success_amount = 0 #https://www.jianshu.com/p/53cf61220828 感谢这位老哥的帖子
#提取同目录下所有mp3的封面图和一些其他信息
def AudioFileAssetsExport(name):
global success_amount
json_savefile_name = str(name).replace('.mp3','')
try:
inf = mutagen.File(name)
title = inf.tags['TIT2'].text[0]#标题
author = inf.tags["TPE1"].text[0] #作者
album = inf.tags["TALB"].text[0] #专辑
#提取信息
mp3info = {'title':title, 'author':author,'album':album}
info_str = json.dumps(mp3info, sort_keys=True, indent=4, separators=(',', ': '))
with open(json_savefile_name + '.json','w+') as mp3inf:
mp3inf.write(info_str)
#提取图片
imgdata = inf.tags['APIC:'].data
with open(json_savefile_name + '.jpg','wb') as img:
img.write(imgdata)
success_amount += 1
except:
failelist.append(name)
if(len(fileNameList_all) > 0):
for filename in fileNameList_all:
if(filename.endswith('.json' or filename.endswith('.jpg'))):
os.remove(filename)
if(len(fileNameList_all)>0):
for temp_name in fileNameList_all:
if str(temp_name).endswith(".mp3"):
fileNameList_shouldUse.append(temp_name)
# print('should process music file: ' + fileNameList_shouldUse)
if(len(fileNameList_shouldUse) > 0):
for temp_name in fileNameList_shouldUse:
AudioFileAssetsExport(temp_name)
print('------------------------------------------------------------------------------------------------------------------------------')
print('---------extract img success: ' + str(success_amount) + 'extract img fail: ' + str(len(fileNameList_shouldUse) - success_amount) + '----------------')
print(failelist)
os.system('pause')
| 795 | 0 | 22 |
7904b86d8abe58e7e4529517770077d8ca8f90b5 | 140 | py | Python | metrics/outputs/__init__.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | metrics/outputs/__init__.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | metrics/outputs/__init__.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | """
Output formats.
"""
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| 14 | 28 | 0.735714 | """
Output formats.
"""
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| 0 | 0 | 0 |
5a27514d3ca08d3797946bc9e86882294446f79d | 13,040 | py | Python | views/web/dustWeb/WebPage.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | 4 | 2016-09-07T05:46:20.000Z | 2020-05-31T21:34:27.000Z | views/web/dustWeb/WebPage.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | null | null | null | views/web/dustWeb/WebPage.py | twatteynelinear/dustlink_sierra | 9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b | [
"BSD-3-Clause"
] | 6 | 2015-01-22T10:14:24.000Z | 2020-05-31T21:34:30.000Z | import logging
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import urllib
import web
from viz import Viz, \
VizBanner
TEMPLATE_PATH = os.path.join('templates')
LOOK_AND_FEEL = 'dust'
| 38.017493 | 108 | 0.486887 | import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import urllib
import web
from viz import Viz, \
VizBanner
TEMPLATE_PATH = os.path.join('templates')
LOOK_AND_FEEL = 'dust'
class WebPage(object):
DESIGN_ONE_COLUMN = 'one_column'
DESIGN_TWO_COLUMNS = 'two_columns'
DESIGN_ALL = [DESIGN_ONE_COLUMN,DESIGN_TWO_COLUMNS]
LAYOUT_HORIZONTAL = 'horizontal'
LAYOUT_VERTICAL = 'vertical'
LAYOUT_ALL = [LAYOUT_HORIZONTAL,LAYOUT_VERTICAL]
def __init__(self,webServer,url,title,webHandler,hidden=False):
# store params
self.webServer = webServer
self.url = url
self.title = title
self.webHandler = webHandler
self.hidden = hidden
# local variables
self.children = []
#======================== public ==========================================
def createPage(self,username=None,
currentPath=[],
design=DESIGN_TWO_COLUMNS,
layout=LAYOUT_VERTICAL,
visualizations=[]):
'''
\brief Create a full HTML page, ready to be sent back to the client.
\param[in] username The username associated with this client's session.
This can be used to display the username in the page.
\param[in] currentPath Path of the resulting page.
\param[in] design The design of the page, i.e. "look-and-feel" to expect.
This can translate in different templates.
Must be an element of DESIGN_ALL.
\param[in] layout The layout of the page, i.e. how the visualizations
are arranged inside the page.
Must be an element of LAYOUT_ALL.
\param[in] visualizations List of visualizations this page must contain.
Each visualization must be of type Viz.
'''
# filter errors
assert (not username) or isinstance(username,str)
assert isinstance(currentPath,list)
for p in currentPath:
assert isinstance(p,str)
assert design in self.DESIGN_ALL
assert layout in self.LAYOUT_ALL
assert isinstance(visualizations,list)
for v in visualizations:
assert isinstance(v,Viz.Viz)
# add a banner
visualizations += [
VizBanner.VizBanner(
webServer = self.webServer,
username = username,
resourcePath = ['banner'],
),
]
# get the pageTitle from the current path
pageTitle = self.webServer.getPageTitle(currentPath)
# get the template corresponding to the design
webtemplate = web.template.frender(
os.path.join(
TEMPLATE_PATH,
LOOK_AND_FEEL,
'{0}.html'.format(design)
)
)
# create the logFrameCode from the username
logFrameCode = self._buildLoginFrame(username)
# get the libraries from the visualizations
libraries = []
for v in visualizations:
libraries += v.getLibraries()
libraries = list(set(libraries)) # remove duplicates
# re-arrange library order to deal with dependencies
for lib in [Viz.Viz.LIBRARY_JQUERY]:
if lib in libraries:
# remove
libraries.remove(lib)
# put at front
libraries.insert(0,lib)
for lib in [Viz.Viz.LIBRARY_RAPHAEL,Viz.Viz.LIBRARY_MORRIS]:
if lib in libraries:
# remove
libraries.remove(lib)
# put at end
libraries.append(lib)
# create unique ID for each visualization
uniqueId = {}
for v in visualizations:
uniqueId[v] = 'id'+str(self.webServer.getUniqueNumber())
# create the headerCode from the visualizations
headerElems = []
for l in libraries:
headerElems += ['<script type="text/javascript" src="{0}"></script>'.format(l)]
for v in visualizations:
headerElems += [v.getHeaderCode(uniqueId[v])]
headerCode = '\n'.join(headerElems)
# get page level documentation
pathCopy = list(currentPath)
pathCopyLast = len(pathCopy) - 1
if pathCopyLast >= 0 and pathCopy[pathCopyLast].startswith("_"):
pathCopy[pathCopyLast] = '*'
pathTuple = tuple(pathCopy)
documentation = self.webServer.getDocumentation().getDocHTML(pathTuple, "page")
# create the bodyCode from the visualizations
bodyElems = []
for v in visualizations:
bodyElems += [v.getBodyCode(uniqueId[v])]
bodyCode = self._layoutElems(bodyElems,layout)
renderedPage = webtemplate (
pageTitle = pageTitle,
hierarchy = self.webServer.getUrlHierarchy(),
currentPath = currentPath,
logFrameCode = logFrameCode,
headerCode = headerCode,
bodyCode = bodyCode,
documentation = documentation,
)
return renderedPage
def registerPage(self,newChild):
# filter error
assert isinstance(newChild,WebPage)
# add to children
self.children.append(newChild)
def getUrlHierarchy(self,parentPath=[]):
assert not self.url.count('/')
newParentPath = parentPath+[self.url]
classUrl = newParentPath
if len(classUrl) and not classUrl[0]:
classUrl = classUrl[1:]
returnVal = {}
returnVal['url'] = self.urlListToString(newParentPath)
returnVal['title'] = self.title
returnVal['class'] = self.webServer.getDocumentation().getClass(classUrl)
returnVal['children'] = [c.getUrlHierarchy(newParentPath) for c in self.children if not c.hidden]
return returnVal
def getPageTitle(self,path):
# filter errors
assert isinstance(path,list)
for p in path:
assert isinstance(p,(str,unicode))
if len(path)>0:
if path[0].startswith('_'):
return urllib.unquote(urllib.unquote(path[0][1:]))
else:
for c in self.children:
urlElems = self.urlStringTolist(c.url)
if path[0]==urlElems[0]:
return c.getPageTitle(path[1:])
return 'unknown 1'
elif len(path)==0:
return self.title
else:
return 'unknown 2'
def getHandlerNameToHandlerClass(self,parentUrl=''):
assert not parentUrl.count('/')
assert not self.url.count('/')
returnVal = {}
# add my webHandler
returnVal[self.webHandler.__name__] = self.webHandler
# add my children's mapping
for child in self.children:
returnVal = dict(returnVal.items() + child.getHandlerNameToHandlerClass().items())
return returnVal
def getMappingUrlToHandlerName(self,parentUrl=''):
'''
\brief Return the mapping between URL's and webHandler's
This method returns a tuple, where URL's are in the odd positions and
webHandler in the even positions, e.g.:
(
'', 'rootHandler',
'level1', 'level1Handler',
'level1/level2','level2Handler',
)
This structure can be used directly by a web.py server.
'''
assert not parentUrl.count('/')
assert not self.url.count('/')
returnVal = []
# add me
returnVal += [self.urlListToString([parentUrl,self.url], trailingSlashOption=True),
self.webHandler.__name__]
returnVal += [self.urlListToString([parentUrl,self.url,'json','(.*)'],trailingSlashOption=True),
self.webHandler.__name__]
# add my children's mapping
for child in self.children:
returnVal += child.getMappingUrlToHandlerName(parentUrl=self.url)
# return a tuple
return tuple(returnVal)
#======================== private =========================================
def _buildLoginFrame(self,username):
if username in [self.webServer.defaultUsername]:
output = []
output += ["<form action=\"/login\" method=\"POST\">"]
output += [" <table id=\"login\">"]
output += [" <tr>"]
output += [" <td>Username:</td>"]
output += [" <td><input type=\"text\" name=\"username\"/></td>"]
output += [" <td>Password:</td>"]
output += [" <td><input type=\"password\" name=\"password\"/></td>"]
output += [" <td><input type=\"hidden\" name=\"action\" value=\"login\"/></td>"]
output += [" <td><input type=\"submit\" value=\"LOGIN\"/></td>"]
output += [" </tr>"]
output += [" </table>"]
output += ["</form>"]
return '\n'.join(output)
else:
output = []
output += ["<form action=\"/login\" method=\"POST\">"]
output += [" <table>"]
output += [" <tr>"]
output += [" <td>You are logged in as <b>{0}</b>.</td>".format(username)]
output += [" <td><input type=\"hidden\" name=\"action\" value=\"logout\"></td>"]
output += [" <td><input type=\"submit\" value=\"LOGOUT\"></td>"]
output += [" </tr>"]
output += [" </table>"]
output += ["</form>"]
return '\n'.join(output)
def _layoutElems(self,elems,layout):
# filter errors
assert isinstance(elems,list)
for e in elems:
assert isinstance(e,str)
assert layout in self.LAYOUT_ALL
returnVal = []
# returnVal += ['<table>']
if layout in [self.LAYOUT_HORIZONTAL]:
# returnVal += ['<tr>']
for e in elems:
# returnVal += ['<td>']
returnVal += [e]
# returnVal += ['</td>']
# returnVal += ['</tr>']
elif layout in [self.LAYOUT_VERTICAL]:
for e in elems:
# returnVal += ['<tr>']
# returnVal += ['<td>']
returnVal += [e]
# returnVal += ['</td>']
# returnVal += ['</tr>']
else:
raise SystemError('unexpected layout {0}'.format(layout))
# returnVal += ['</table>']
return '\n'.join(returnVal)
@classmethod
def urlListToString(self,urlList,trailingSlashOption=False):
# remove empty elements from urlList
urlList = [u for u in urlList if u]
returnVal = []
if urlList:
returnVal += ['/']
returnVal += ['/'.join(urlList)]
if trailingSlashOption:
returnVal += ['/?']
return ''.join(returnVal)
@classmethod
def urlStringTolist(self,urlString):
# filter errors
assert isinstance(urlString,(str,unicode))
# split into elements
urlList = urlString.split('/')
# remove empty elements (can happen with e.g. trailing slash)
urlList = [u for u in urlList if u]
# convert elements to string (can be unicode)
urlList = [str(u) for u in urlList]
return urlList | 5,607 | 7,086 | 74 |
07cae2206f471e746a05cb99a1ed9977cf590a4a | 43,501 | py | Python | CrimsonApi/monitorpackage/MonitorApi.py | denisdoci/PythonCrimsonHexagonAPI | 3f5cc3167586e236846d1843c80ff5e941cb497b | [
"MIT"
] | null | null | null | CrimsonApi/monitorpackage/MonitorApi.py | denisdoci/PythonCrimsonHexagonAPI | 3f5cc3167586e236846d1843c80ff5e941cb497b | [
"MIT"
] | null | null | null | CrimsonApi/monitorpackage/MonitorApi.py | denisdoci/PythonCrimsonHexagonAPI | 3f5cc3167586e236846d1843c80ff5e941cb497b | [
"MIT"
] | null | null | null | #
# Created by Denis Doci
#
# Copyright Mars Inc.
#
# For internal use only
#
import requests
import json
import pandas as pd
import numpy as np
import datetime
import itertools
import math
from datetime import date, timedelta
########################################
########################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
| 44.754115 | 119 | 0.517229 | #
# Created by Denis Doci
#
# Copyright Mars Inc.
#
# For internal use only
#
import requests
import json
import pandas as pd
import numpy as np
import datetime
import itertools
import math
from datetime import date, timedelta
class monitor_api:
monitorId = ''
authenticationToken = ''
def __init__(self, monitorId, authenticationToken):
self.monitorId = monitorId
self.authenticationToken = authenticationToken
def create_empty_df(self, columns, dtypes, index=None):
assert len(columns) == len(dtypes)
df = pd.DataFrame(index=index)
for c, d in zip(columns, dtypes):
df[c] = pd.Series(dtype=d)
return df
# ***************************************************** ###
# ***************************************************** ###
# *** *** ###
# *** Results: Volume, Sentiment & Categories *** ###
# *** The monitor results endpoint returns *** ###
# *** aggregate volume, sentiment, emotion and *** ###
# *** opinion category analysis for a given monitor *** ###
# *** *** ###
# ***************************************************** ###
# ***************************************************** ###
# ***************************************************** ###
def get_monitor_results_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/results"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
if "hideExcluded" in kwargs:
querystring["hideExcluded"] = kwargs["hideExcluded"]
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)['results']
return json_data
def create_empty_result_df(self):
df = self.create_empty_df(['startDate', 'endDate', 'creationDate',
'numberOfDocuments', 'numberOfRelevantDocuments',
'Basic_Positive_Proportion', 'Basic_Positive_Volume',
'Basic_Negative_Proportion',
'Basic_Negative_Volume', 'Basic_Neutral_Proportion', 'Basic_Neutral_Volume',
'Emotion_Joy_Proportion', 'Emotion_Joy_Volume', 'Emotion_Sadness_Proportion',
'Emotion_Sadness_Volume', 'Emotion_Anger_Proportion', 'Emotion_Anger_Volume',
'Emotion_Disgust_Proportion', 'Emotion_Disgust_Volume',
'Emotion_Surprise_Proportion',
'Emotion_Surprise_Volume', 'Emotion_Fear_Proportion', 'Emotion_Fear_Volume',
'Emotion_Neutral_Proportion', 'Emotion_Neutral_Volume'],
dtypes=[np.str, np.str, np.str,
np.float, np.float,
np.float, np.float, np.float,
np.float, np.float, np.float,
np.float, np.float, np.float,
np.float, np.float, np.float,
np.float, np.float, np.float,
np.float, np.float, np.float,
np.float, np.float]
)
return df
def results_row_2_df_row(self, resultsRow, data):
row = {}
if 'startDate' in resultsRow.keys():
row['startDate'] = resultsRow['startDate']
else:
row['startDate'] = ''
if 'endDate' in resultsRow.keys():
row['endDate'] = resultsRow['endDate']
else:
row['endDate'] = ''
if 'numberOfDocuments' in resultsRow.keys():
row['numberOfDocuments'] = resultsRow['numberOfDocuments']
else:
row['numberOfDocuments'] = ''
if 'numberOfRelevantDocuments' in resultsRow.keys():
row['numberOfRelevantDocuments'] = resultsRow['numberOfRelevantDocuments']
else:
row['numberOfRelevantDocuments'] = ''
try:
row['Basic_Positive_Proportion'] = resultsRow['categories'][2]['proportion']
row['Basic_Positive_Volume'] = resultsRow['categories'][2]['volume']
row['Basic_Negative_Proportion'] = resultsRow['categories'][0]['proportion']
row['Basic_Negative_Volume'] = resultsRow['categories'][0]['volume']
row['Basic_Neutral_Proportion'] = resultsRow['categories'][1]['proportion']
row['Basic_Neutral_Volume'] = resultsRow['categories'][1]['volume']
except:
row['Basic_Positive_Proportion'] = ''
row['Basic_Positive_Volume'] = ''
row['Basic_Negative_Proportion'] = ''
row['Basic_Negative_Volume'] = ''
row['Basic_Neutral_Proportion'] = ''
row['Basic_Neutral_Volume'] = ''
try:
row['Emotion_Joy_Proportion'] = resultsRow['emotions'][5]['proportion']
row['Emotion_Joy_Volume'] = resultsRow['emotions'][5]['volume']
row['Emotion_Sadness_Proportion'] = resultsRow['emotions'][2]['proportion']
row['Emotion_Sadness_Volume'] = resultsRow['emotions'][2]['volume']
row['Emotion_Anger_Proportion'] = resultsRow['emotions'][3]['proportion']
row['Emotion_Anger_Volume'] = resultsRow['emotions'][3]['volume']
row['Emotion_Disgust_Proportion'] = resultsRow['emotions'][4]['proportion']
row['Emotion_Disgust_Volume'] = resultsRow['emotions'][4]['volume']
row['Emotion_Surprise_Proportion'] = resultsRow['emotions'][1]['proportion']
row['Emotion_Surprise_Volume'] = resultsRow['emotions'][1]['volume']
row['Emotion_Fear_Proportion'] = resultsRow['emotions'][0]['proportion']
row['Emotion_Fear_Volume'] = resultsRow['emotions'][0]['volume']
row['Emotion_Neutral_Proportion'] = resultsRow['emotions'][6]['proportion']
row['Emotion_Neutral_Volume'] = resultsRow['emotions'][6]['volume']
except:
row['Emotion_Joy_Proportion'] = ''
row['Emotion_Joy_Volume'] = ''
row['Emotion_Sadness_Proportion'] = ''
row['Emotion_Sadness_Volume'] = ''
row['Emotion_Anger_Proportion'] = ''
row['Emotion_Anger_Volume'] = ''
row['Emotion_Disgust_Proportion'] = ''
row['Emotion_Disgust_Volume'] = ''
row['Emotion_Surprise_Proportion'] = ''
row['Emotion_Surprise_Volume'] = ''
row['Emotion_Fear_Proportion'] = ''
row['Emotion_Fear_Volume'] = ''
row['Emotion_Neutral_Proportion'] = ''
row['Emotion_Neutral_Volume'] = ''
data.insert(0, row)
def json_2_pandas_results(self, json_data):
returnDf = self.create_empty_result_df()
data = []
for row in json_data:
self.results_row_2_df_row(row, data)
returnDf = pd.concat([pd.DataFrame(data), returnDf], ignore_index=True, sort=False)
return returnDf
# ****************************************************** ###
# ****************************************************** ###
# *** Posts *** ###
# *** The posts endpoint returns post-level *** ###
# *** information (where available) and associated *** ###
# *** analysis (sentiment, emotion) for a given *** ###
# *** monitor. *** ###
# ****************************************************** ###
# ****************************************************** ###
def day_list(self, dates):
return pd.date_range(dates[0], dates[1], freq='D').strftime("%Y-%m-%d").tolist()
# recursive get posts INCLUSIVE: start_date EXCLUSIVE: end_date
def recursive_pull(self, start_date, end_date, df_numPosts, **kwargs):
# print(start_date)
# print(end_date)
numberPosts = df_numPosts.where(
df_numPosts["startDate"] >= pd.to_datetime(datetime.datetime.strptime(start_date, '%Y-%m-%d')))
numberPosts = numberPosts.where(
df_numPosts["endDate"] <= pd.to_datetime(datetime.datetime.strptime(end_date, '%Y-%m-%d')))
numberPosts = numberPosts['numberOfDocuments'].sum()
# print(numberPosts)
days = self.day_list([start_date, end_date])
# basecase
if numberPosts < 10000:
url = "https://api.crimsonhexagon.com/api/monitor/posts"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
if "filter" in kwargs:
querystring["filter"] = kwargs["filter"]
if "geotagged" in kwargs:
querystring["geotagged"] = kwargs["geotagged"]
querystring["extendLimit"] = 'True'
querystring["fullContents"] = 'True'
response = requests.request("GET", url, params=querystring)
if response.text:
json_data = json.loads(response.text)
if 'posts' in json_data.keys():
posts = json_data['posts']
return posts
elif len(days) == 2:
url = "https://api.crimsonhexagon.com/api/monitor/posts"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
if "filter" in kwargs:
querystring["filter"] = kwargs["filter"]
if "geotagged" in kwargs:
querystring["geotagged"] = kwargs["geotagged"]
querystring["extendLimit"] = 'True'
querystring["fullContents"] = 'True'
response = requests.request("GET", url, params=querystring)
if response.text:
json_data = json.loads(response.text)
if 'posts' in json_data.keys():
posts = json_data['posts']
logging.warning('10,000 posts per day limit has been enforced for {0}'.format(start_date))
return posts
elif len(days) == 3:
posts1 = self.recursive_pull(start_date, days[1], df_numPosts)
posts2 = self.recursive_pull(days[1], end_date, df_numPosts)
return posts1 + posts2
else:
half = math.ceil(len(days) / 2)
posts1 = self.recursive_pull(start_date, days[half], df_numPosts)
posts2 = self.recursive_pull(days[half], end_date, df_numPosts)
return posts1 + posts2
return []
def get_monitor_posts_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/results"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)['results']
volumeData = self.json_2_pandas_results(json_data)
volumeData["startDate"] = pd.to_datetime(volumeData["startDate"])
volumeData["endDate"] = pd.to_datetime(volumeData["endDate"])
# volumeData["startDate"] = volumeData["startDate"].dt.date
# volumeData["endDate"] = volumeData["endDate"].dt.date
url = "https://api.crimsonhexagon.com/api/monitor/posts"
totalposts = []
totalposts.append(self.recursive_pull(start_date, end_date, volumeData))
totalposts = list(itertools.chain.from_iterable(totalposts))
return totalposts
def create_empty_post_df(self):
df = self.create_empty_df(
['url', 'date', 'author', 'title', 'contents', 'type', 'location',
'geolocation_id', 'geolocation_name', 'geolocation_country', 'geolocation_state',
'language', 'authorGender', 'Basic_Positive', 'Basic_Negative', 'Basic_Neutral',
'Emotion_Joy', 'Emotion_Sadness', 'Emotion_Anger', 'Emotion_Disgust',
'Emotion_Surprise', 'Emotion_Fear', 'Emotion_Neutral'],
dtypes=[np.str, np.str, np.str, np.str, np.str, np.str, np.str,
np.str, np.str, np.str, np.str, np.str,
np.str, np.float, np.float, np.float,
np.float, np.float, np.float, np.float,
np.float, np.float, np.float]
)
return df
def post_row_2_df_row(self, postRow, data):
row = {}
if 'url' in postRow.keys():
row['url'] = postRow['url']
else:
row['url'] = ''
if 'date' in postRow.keys():
row['date'] = postRow['date']
else:
row['date'] = ''
try:
row['author'] = postRow['author'].encode('utf-8')
except:
row['author'] = ''
try:
row['title'] = postRow['title'].encode('utf-8')
except:
row['title'] = ''
if 'type' in postRow.keys():
row['type'] = postRow['type']
else:
row['type'] = ''
if 'location' in postRow.keys():
row['location'] = postRow['location']
else:
row['location'] = ''
try:
if postRow['type'] == 'Reddit':
print(postRow['contents'])
row['contents'] = postRow['contents'].encode('utf-8')
except:
row['contents'] = ''
if 'geolocation' in postRow.keys():
if 'id' in postRow['geolocation'].keys():
row['geolocation_id'] = postRow['geolocation']['id']
else:
row['geolocation_id'] = ''
if 'geolocation' in postRow.keys():
if 'name' in postRow['geolocation'].keys():
row['geolocation_name'] = postRow['geolocation']['name']
else:
row['geolocation_name'] = ''
if 'geolocation' in postRow.keys():
if 'country' in postRow['geolocation'].keys():
row['geolocation_name'] = postRow['geolocation']['country']
else:
row['geolocation_country'] = ''
if 'geolocation' in postRow.keys():
if 'state' in postRow['geolocation'].keys():
row['geolocation_state'] = postRow['geolocation']['state']
else:
row['geolocation_state'] = ''
if 'language' in postRow.keys():
row['language'] = postRow['language']
else:
row['language'] = ''
if 'authorGender' in postRow.keys():
row['authorGender'] = postRow['authorGender']
else:
row['authorGender'] = ''
try:
row['Basic_Positive'] = postRow['categoryScores'][0]['score']
row['Basic_Negative'] = postRow['categoryScores'][1]['score']
row['Basic_Neutral'] = postRow['categoryScores'][2]['score']
except:
row['Basic_Positive'] = ''
row['Basic_Negative'] = ''
row['Basic_Neutral'] = ''
try:
row['Emotion_Anger'] = postRow['emotionScores'][0]['score']
row['Emotion_Joy'] = postRow['emotionScores'][1]['score']
row['Emotion_Sadness'] = postRow['emotionScores'][2]['score']
row['Emotion_Disgust'] = postRow['emotionScores'][3]['score']
row['Emotion_Surprise'] = postRow['emotionScores'][4]['score']
row['Emotion_Fear'] = postRow['emotionScores'][5]['score']
row['Emotion_Neutral'] = postRow['emotionScores'][6]['score']
except:
row['Emotion_Joy'] = ''
row['Emotion_Sadness'] = ''
row['Emotion_Anger'] = ''
row['Emotion_Disgust'] = ''
row['Emotion_Surprise'] = ''
row['Emotion_Fear'] = ''
row['Emotion_Neutral'] = ''
data.insert(0, row)
def json_2_pandas_posts(self, json_data):
# returnDf = self.create_empty_post_df()
data = []
for post in json_data:
self.post_row_2_df_row(post, data)
returnDf = pd.DataFrame(data)
return returnDf
# ********************************************************** ###
# ********************************************************** ###
# *** Volume *** ###
# *** *** ###
# *** Returns volume metrics for a given monitor *** ###
# *** split by hour, day, week or month. Week and month *** ###
# *** aggregations requires a date range of at least 1 *** ###
# *** full unit; e.g., WEEKLY requires a date range of *** ###
# *** at least 1 week;. Additionally, these *** ###
# *** aggregations only returns full units so the range *** ###
# *** may be truncated. e.g., 2017-01-15 to 2017-03-15 *** ###
# *** with MONTHLY grouping will return a date range *** ###
# *** of 2017-02-01 to 2017-03-01. A monitor must have *** ###
# *** complete results for the specified date range. *** ###
# *** If any day in the range is missing results an *** ###
# *** error will be returned. *** ###
# *** *** ###
# ********************************************************** ###
# ********************************************************** ###
def get_monitor_volume_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/volume"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
if "groupBy" in kwargs:
querystring["groupBy"] = kwargs["groupBy"]
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)
return json_data
def get_sub_char_groupby_volume(self, data):
if data['groupBy'] == 'HOURLY':
return 'h'
elif data['groupBy'] == 'DAILY':
return 'd'
elif data['groupBy'] == 'WEEKLY':
return 'w'
elif data['groupBy'] == 'MONTHLY':
return 'm'
else:
return ''
def volumes_2_dict_list(self, json_data, data):
tempData = {}
for timeframe in json_data['volume']:
tempData['totalStartDate'] = json_data['startDate']
tempData['totalEndDate'] = json_data['endDate']
tempData['timezone'] = json_data['timezone']
tempData['groupBy'] = json_data['groupBy']
tempData['totalNumberOfDocuments'] = json_data['numberOfDocuments']
tempData['startDate'] = timeframe['startDate']
tempData['endDate'] = timeframe['endDate']
tempData['numberOfDocuments'] = timeframe['numberOfDocuments']
data.append(tempData)
tempData = {}
def json_2_pandas_volume(self, json_data):
data = []
self.volumes_2_dict_list(json_data, data)
returnDf = pd.DataFrame(data)
return returnDf
# ********************************************************** ###
# ********************************************************** ###
# *** Volume by Day and Time *** ###
# *** Buzz, Opinion & Social Account Monitors *** ###
# *** *** ###
# *** Returns volume information for a given monitor *** ###
# *** aggregated by time of day or day of week. A monitor*** ###
# *** must have complete results for the specified date *** ###
# *** range. If any day in the range is missing results *** ###
# *** an error will be returned. *** ###
# *** *** ###
# ********************************************************** ###
# ********************************************************** ###
def get_monitor_volume_by_dt_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/dayandtime"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
if "aggregatedbyday" in kwargs:
querystring["aggregatedbyday"] = kwargs["aggregatedbyday"]
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)
return json_data
def volumes_by_dt_2_dict_list(self, json_data, data):
tempData = {}
for timeframe in json_data['volumes']:
tempData['startDate'] = timeframe['startDate']
tempData['endDate'] = timeframe['endDate']
tempData['numberOfDocuments'] = timeframe['numberOfDocuments']
for key, value in timeframe['volume'].items():
tempData['volume_' + str(key)] = value
data.append(tempData)
tempData = {}
def json_2_pandas_volume_by_dt(self, json_data):
data = []
self.volumes_by_dt_2_dict_list(json_data, data)
returnDf = pd.DataFrame(data)
return returnDf
# ********************************************************** ###
# ********************************************************** ###
# *** Word Cloud *** ###
# *** Buzz, Opinion & Social Account Monitors *** ###
# *** *** ###
# *** The Word Cloud endpoint returns an alphabetized *** ###
# *** list of the top 300 words in a monitor. This data *** ###
# *** is generated using documents randomly selected *** ###
# *** from the pool defined by the submitted parameters. *** ###
# *** *** ###
# ********************************************************** ###
# ********************************************************** ###
def get_monitor_wordcloud_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/wordcloud"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)
return json_data
def wordcloud_row_2_df_row(self, postRow, data):
tempData = {}
for word, value in postRow['data'].items():
tempData['word'] = word
tempData['weight'] = value
data.append(tempData)
tempData = {}
row = {}
def json_2_pandas_wordcloud(self, json_data):
data = []
self.wordcloud_row_2_df_row(json_data, data)
returnDf = pd.DataFrame(data)
return returnDf
# ********************************************************** ###
# ********************************************************** ###
# *** Age *** ###
# *** Buzz, Opinion & Social Account Monitors *** ###
# *** *** ###
# *** Returns volume metrics for a given monitor split *** ###
# *** monitor split by age bracket. *** ###
# *** *** ###
# ********************************************************** ###
# ********************************************************** ###
def get_monitor_age_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/demographics/age"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)
return json_data
def create_empty_age_df(self):
df = self.create_empty_df(
['startDate', 'endDate', 'numberOfDocuments', 'ZERO_TO_SEVENTEEN', 'EIGHTEEN_TO_TWENTYFOUR',
'TWENTYFIVE_TO_THIRTYFOUR', 'THIRTYFIVE_AND_OVER'
, 'totalAgeCount'],
dtypes=[np.str, np.str, np.float, np.float, np.float, np.float, np.float, np.float]
)
return df
def age_row_2_df_row(self, resultsRow, data):
row = {}
if 'startDate' in resultsRow.keys():
row['startDate'] = resultsRow['startDate']
else:
row['startDate'] = ''
if 'endDate' in resultsRow.keys():
row['endDate'] = resultsRow['endDate']
else:
row['endDate'] = ''
if 'numberOfDocuments' in resultsRow.keys():
row['numberOfDocuments'] = resultsRow['numberOfDocuments']
else:
row['numberOfDocuments'] = ''
if 'ageCount' in resultsRow.keys() and 'sortedAgeCounts' in resultsRow['ageCount']:
if 'ZERO_TO_SEVENTEEN' in resultsRow['ageCount']['sortedAgeCounts']:
row['ZERO_TO_SEVENTEEN'] = resultsRow['ageCount']['sortedAgeCounts']['ZERO_TO_SEVENTEEN']
else:
row['ZERO_TO_SEVENTEEN'] = ''
if 'EIGHTEEN_TO_TWENTYFOUR' in resultsRow['ageCount']['sortedAgeCounts']:
row['EIGHTEEN_TO_TWENTYFOUR'] = resultsRow['ageCount']['sortedAgeCounts']['EIGHTEEN_TO_TWENTYFOUR']
else:
row['EIGHTEEN_TO_TWENTYFOUR'] = ''
if 'TWENTYFIVE_TO_THIRTYFOUR' in resultsRow['ageCount']['sortedAgeCounts']:
row['TWENTYFIVE_TO_THIRTYFOUR'] = resultsRow['ageCount']['sortedAgeCounts']['TWENTYFIVE_TO_THIRTYFOUR']
else:
row['TWENTYFIVE_TO_THIRTYFOUR'] = ''
if 'THIRTYFIVE_AND_OVER' in resultsRow['ageCount']['sortedAgeCounts']:
row['THIRTYFIVE_AND_OVER'] = resultsRow['ageCount']['sortedAgeCounts']['THIRTYFIVE_AND_OVER']
else:
row['THIRTYFIVE_AND_OVER'] = ''
if 'totalAgeCount' in resultsRow.keys():
row['totalAgeCount'] = resultsRow['ageCount']['totalAgeCount']
else:
row['totalAgeCount'] = ''
data.insert(0, row)
def json_2_pandas_age(self, json_data):
data = []
for day in json_data['ageCounts']:
self.age_row_2_df_row(day, data)
retdf = pd.DataFrame.from_records(data)
return retdf
# ********************************************************** ###
# ********************************************************** ###
# *** Gender *** ###
# *** Buzz, Opinion & Social Account Monitors *** ###
# *** *** ###
# *** Returns volume metrics for a given monitor split *** ###
# *** monitor split by gender. *** ###
# *** *** ###
# ********************************************************** ###
# ********************************************************** ###
def get_monitor_gender_request(self, start_date, end_date, **kwargs):
url = "https://api.crimsonhexagon.com/api/monitor/demographics/gender"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
json_data = json.loads(response.text)
return json_data
def create_empty_gender_df(self):
df = self.create_empty_df(
['startDate', 'endDate', 'numberOfDocuments', "maleCount",
"femaleCount",
"totalGenderedCount",
"percentMale",
"percentFemale"],
dtypes=[np.str, np.str, np.float, np.float, np.float, np.float, np.float, np.float]
)
return df
def gender_row_2_df_row(self, resultsRow, data):
row = {}
if 'startDate' in resultsRow.keys():
row['startDate'] = resultsRow['startDate']
else:
row['startDate'] = ''
if 'endDate' in resultsRow.keys():
row['endDate'] = resultsRow['endDate']
else:
row['endDate'] = ''
if 'numberOfDocuments' in resultsRow.keys():
row['numberOfDocuments'] = resultsRow['numberOfDocuments']
else:
row['numberOfDocuments'] = ''
if 'maleCount' in resultsRow.keys():
row['maleCount'] = resultsRow['genderCounts']['maleCount']
else:
row['maleCount'] = ''
if 'femaleCount' in resultsRow.keys():
row['femaleCount'] = resultsRow['genderCounts']['femaleCount']
else:
row['femaleCount'] = ''
if 'totalGenderedCount' in resultsRow.keys():
row['totalGenderedCount'] = resultsRow['genderCounts']['totalGenderedCount']
else:
row['totalGenderedCount'] = ''
if 'percentMale' in resultsRow.keys():
row['percentMale'] = resultsRow['genderCounts']['percentMale']
else:
row['percentMale'] = ''
if 'percentFemale' in resultsRow.keys():
row['percentFemale'] = resultsRow['genderCounts']['percentFemale']
else:
row['percentFemale'] = ''
data.insert(0, row)
def json_2_pandas_gender(self, json_data):
returnDf = self.create_empty_gender_df()
data = []
for day in json_data['genderCounts']:
self.gender_row_2_df_row(day, data)
returnDf = pd.concat([pd.DataFrame(data), returnDf], ignore_index=True, sort=False)
if 'returnDf' in locals():
return returnDf
########################################
def get_twitter_metrics(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/twittermetrics"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def twitter_metrics_row_2_df_row(self, resultsRow, data):
row = {}
date = resultsRow['startDate']
if 'topHashtags' in resultsRow.keys():
for hashtag in resultsRow['topHashtags']:
row['date'] = date
row['content'] = hashtag
row['type'] = 'hashtag'
row['count'] = resultsRow['topHashtags'][hashtag]
data.append(row)
row = {}
if 'topMentions' in resultsRow.keys():
for mention in resultsRow['topMentions']:
row['date'] = date
row['content'] = mention
row['type'] = 'mention'
row['count'] = resultsRow['topMentions'][mention]
data.append(row)
row = {}
if 'topRetweets' in resultsRow.keys():
for retweet in resultsRow['topRetweets']:
row['date'] = date
row['content'] = retweet['url']
row['type'] = 'retweet'
row['count'] = retweet['retweetCount']
data.append(row)
def json_2_pandas_twitter_metrics(self, json_data):
data = []
for day in json_data['dailyResults']:
self.twitter_metrics_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
########################################
def get_twitter_sentposts(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/twittersocial/sentposts"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def twitter_sentposts_row_2_df_row(self, resultsRow, data):
row = {}
if 'sentPostMetrics' in resultsRow.keys() and len(resultsRow['sentPostMetrics']) > 0:
for metric in resultsRow['sentPostMetrics']:
row['url'] = metric['url']
row['date'] = metric['date']
row['retweets'] = metric['retweets']
row['replies'] = metric['replies']
row['impressions'] = metric['impressions']
row['content'] = metric['content']
data.append(row)
row = {}
def json_2_pandas_sentposts(self, json_data):
data = []
for day in json_data['dailyResults']:
self.twitter_sentposts_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
def get_twitter_followers(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/twittersocial/followers"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def twitter_followers_row_2_df_row(self, resultsRow, data):
row = {}
row['date'] = resultsRow['date']
row['followers'] = resultsRow['followers']
data.append(row)
row = {}
def json_2_pandas_twitter_followers(self, json_data):
data = []
for day in json_data['dailyResults']:
self.twitter_followers_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
def get_facebook_admin_posts(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/facebook/adminposts"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def facebook_admin_posts_row_2_df_row(self, resultsRow, data):
row = {}
if 'adminPostMetrics' in resultsRow.keys() and len(resultsRow['adminPostMetrics']) > 0:
for metric in resultsRow['adminPostMetrics']:
row['content'] = metric['content']
row['url'] = metric['url']
row['date'] = metric['date']
row['postLikes'] = metric['postLikes']
row['postShares'] = metric['postShares']
row['postComments'] = metric['postComments']
row['isLocked'] = metric['isLocked']
data.append(row)
row = {}
def json_2_pandas_facebook_admin_posts(self, json_data):
data = []
for day in json_data['dailyResults']:
self.facebook_admin_posts_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
############################################################
def get_facebook_page_likes(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/facebook/pagelikes"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def facebook_page_likes_row_2_df_row(self, resultsRow, data):
row = {}
row['date'] = resultsRow['date']
row['likes'] = resultsRow['likes']
data.append(row)
def json_2_pandas_facebook_page_likes(self, json_data):
data = []
for day in json_data['dailyResults']:
self.facebook_page_likes_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
############################################################
def get_facebook_total_activity(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/facebook/totalactivity"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def facebook_total_activity_row_2_df_row(self, resultsRow, data):
row = {}
row['date'] = resultsRow['startDate']
admin = resultsRow['admin']
user = resultsRow['user']
if len(admin)>0:
row['adminPosts'] = admin['adminPosts']
row['likesOnAdmin'] = admin['likesOnAdmin']
row['commentsOnAdmin'] = admin['commentsOnAdmin']
row['sharesOnAdmin'] = admin['sharesOnAdmin']
if len(user)>0:
row['userPosts'] = user['userPosts']
row['likesOnUser'] = user['likesOnUser']
row['commentsOnUser'] = user['commentsOnUser']
row['sharesOnUser'] = user['sharesOnUser']
data.append(row)
def json_2_pandas_facebook_total_activity(self, json_data):
data = []
for day in json_data['dailyResults']:
self.facebook_total_activity_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
############################################################
def get_instagram_followers(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/instagram/followers"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def instagram_followers_row_2_df_row(self, resultsRow, data):
row = {}
row['date'] = resultsRow['date']
row['followerCount'] = resultsRow['followerCount']
data.append(row)
def json_2_pandas_instagram_followers(self, json_data):
data = []
for day in json_data['dailyResults']:
self.instagram_followers_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
############################################################
def get_instagram_sent_media(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/instagram/sentmedia"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def instagram_sent_media_row_2_df_row(self, resultsRow, data):
row = {}
if 'adminPostMetrics' in resultsRow.keys() and len(resultsRow['adminPostMetrics']) > 0:
for metric in resultsRow['adminPostMetrics']:
row['content'] = metric['content']
row['url'] = metric['url']
row['date'] = metric['date']
row['postLikes'] = metric['postLikes']
row['postComments'] = metric['postComments']
row['isLocked'] = metric['isLocked']
data.append(row)
row = {}
def json_2_pandas_instagram_sent_media(self, json_data):
data = []
for day in json_data['dailyResults']:
self.instagram_sent_media_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
############################################################
############################################################
def get_instagram_total_activity(self, start_date, end_date):
url = "https://api.crimsonhexagon.com/api/monitor/instagram/totalactivity"
querystring = {"auth": self.authenticationToken, "id": self.monitorId,
"start": start_date, "end": end_date}
response = requests.request("GET", url, params=querystring)
try:
json_data = json.loads(response.text)
return json_data
except Exception as E:
print(response.text)
print(E)
return
def instagram_total_activity_row_2_df_row(self, resultsRow, data):
row = {}
row['date'] = resultsRow['startDate']
if 'admin' in resultsRow.keys() and len(resultsRow['admin']) > 0:
admin = resultsRow['admin']
row['adminPosts'] = admin['adminPosts']
row['likesOnAdmin'] = admin['likesOnAdmin']
row['commentsOnAdmin'] = admin['commentsOnAdmin']
data.append(row)
row = {}
def json_2_pandas_instagram_total_activity(self, json_data):
data = []
for day in json_data['dailyResults']:
self.instagram_total_activity_row_2_df_row(day, data)
returnDf = pd.DataFrame(data)
if 'returnDf' in locals():
return returnDf
| 35,056 | 6,638 | 756 |
2f44edd03c922dc384d4313154a00786eacb401a | 5,756 | py | Python | tests/tests_py/test_interpreter.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 6 | 2021-08-18T19:14:09.000Z | 2022-02-20T05:43:46.000Z | tests/tests_py/test_interpreter.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 1 | 2021-11-25T05:08:28.000Z | 2021-12-01T15:41:21.000Z | tests/tests_py/test_interpreter.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 1 | 2021-11-25T05:03:53.000Z | 2021-11-25T05:03:53.000Z | import unittest
import datetime
from forthic.interpreter import Interpreter
from forthic.module import Module, ModuleWord
from tests.tests_py.sample_date_module import SampleDateModule
if __name__ == '__main__':
unittest.main()
| 32.156425 | 93 | 0.602849 | import unittest
import datetime
from forthic.interpreter import Interpreter
from forthic.module import Module, ModuleWord
from tests.tests_py.sample_date_module import SampleDateModule
class TestInterpreter(unittest.TestCase):
def test_initial_state(self):
interp = Interpreter()
self.assertEqual(0, len(interp.stack))
self.assertEqual("", interp.module_stack[0].name)
def test_push_string(self):
interp = Interpreter()
interp.run("'Howdy'")
self.assertEqual("Howdy", interp.stack[0])
def test_comment(self):
interp = Interpreter()
interp.run("# A comment")
interp.run("#A comment")
self.assertEqual(0, len(interp.stack))
def test_empty_array(self):
interp = Interpreter()
interp.run("[]")
self.assertEqual([], interp.stack[0])
def test_start_module(self):
interp = Interpreter()
# Push application module onto module stack
interp.run("{")
self.assertEqual(2, len(interp.module_stack))
self.assertEqual(interp.module_stack[0], interp.module_stack[1])
if interp.module_stack[0] != interp.module_stack[1]:
return False
# Push module-A onto module stack
interp = Interpreter()
interp.run("{module-A")
self.assertEqual(2, len(interp.module_stack))
self.assertEqual("module-A", interp.module_stack[1].name)
self.assertIsNotNone(interp.app_module.modules.get("module-A"))
# Push module-A and then module-B onto module stack
interp = Interpreter()
interp.run("{module-A {module-B")
self.assertEqual(3, len(interp.module_stack))
self.assertEqual("module-A", interp.module_stack[1].name)
self.assertEqual("module-B", interp.module_stack[2].name)
module_A = interp.app_module.modules["module-A"]
self.assertIsNotNone(module_A.modules.get("module-B"))
interp.run("}}")
self.assertEqual(1, len(interp.module_stack))
self.assertEqual(interp.module_stack[0], interp.app_module)
def test_definition(self):
# Can define and find a word in the app module
interp = Interpreter()
interp.run(": NOTHING ;")
word = interp.app_module.find_word("NOTHING")
self.assertIsNotNone(word)
# Words defined in other modules aren't automatically available in the app module
interp = Interpreter()
interp.run("{module-A : NOTHING ;}")
word = interp.app_module.find_word("NOTHING")
self.assertIsNone(word)
module_A = interp.app_module.modules["module-A"]
word = module_A.find_word("NOTHING")
self.assertIsNotNone(word)
def test_word_scope(self):
interp = Interpreter()
interp.run("""
: APP-MESSAGE "Hello (from app)";
{module1
APP-MESSAGE
}
""")
self.assertEqual("Hello (from app)", interp.stack[0])
def test_open_module(self):
# Test word
interp = Interpreter()
interp.run("""
{mymodule
: MESSAGE "Hello (from mymodule)";
}
: MESSAGE {mymodule MESSAGE };
MESSAGE
""")
self.assertEqual("Hello (from mymodule)", interp.stack[0])
# Test memo
interp = Interpreter()
interp.run("""
{mymodule
'MESSAGE-MEMO' '"Hello (from mymodule memo)"' MEMO
}
: MESSAGE {mymodule MESSAGE-MEMO };
MESSAGE
""")
self.assertEqual("Hello (from mymodule memo)", interp.stack[0])
def test_word(self):
interp = Interpreter()
interp.run(": MESSAGE 'Howdy' ;")
interp.run("MESSAGE")
self.assertEqual("Howdy", interp.stack[0])
interp = Interpreter()
interp.run("{module-A {module-B : MESSAGE 'In module-B' ;}}")
interp.run("{module-A {module-B MESSAGE}}")
self.assertEqual("In module-B", interp.stack[0])
def test_search_global_module(self):
interp = Interpreter()
interp.run("'Hi'")
if len(interp.stack) != 1:
return False
interp.run("POP")
if len(interp.stack) != 0:
return False
return True
def test_use_module(self):
interp = Interpreter()
interp.register_module(SampleDateModule)
interp.run("['date'] USE-MODULES")
interp.run("date.TODAY")
today = datetime.date.today()
self.assertEqual(today, interp.stack[0])
interp.run("{date TODAY}")
self.assertEqual(today, interp.stack[1])
interp.run("[['date' '']] USE-MODULES")
interp.run("TODAY")
self.assertEqual(today, interp.stack[2])
def test_builtin_import_builtin(self):
class ModuleA(Module):
def __init__(self, interp):
super().__init__("module-a", interp)
self.add_exportable_word(ModuleWord("MY-TODAY", self.word_MY_TODAY))
self.import_module("date1", SampleDateModule(interp), interp)
# ( -- today )
def word_MY_TODAY(self, interp):
interp.run("date1.TODAY")
interp = Interpreter()
interp.register_module(ModuleA)
interp.run("['module-a'] USE-MODULES")
interp.run("module-a.MY-TODAY")
today = datetime.date.today()
self.assertEqual(today, interp.stack[0])
self.assertEqual(1, len(interp.app_module.words))
self.assertEqual("module-a.MY-TODAY", interp.app_module.words[0].name)
self.assertEqual("date1.TODAY", interp.app_module.modules['module-a'].words[-1].name)
if __name__ == '__main__':
unittest.main()
| 5,149 | 20 | 347 |
68a3bd6446909308226cbeb22dd1ba7ed851a48d | 4,379 | py | Python | learntools/computer_vision/ex5.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 359 | 2018-03-23T15:57:52.000Z | 2022-03-25T21:56:28.000Z | learntools/computer_vision/ex5.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 84 | 2018-06-14T00:06:52.000Z | 2022-02-08T17:25:54.000Z | learntools/computer_vision/ex5.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 213 | 2018-05-02T19:06:31.000Z | 2022-03-20T15:40:34.000Z | import tensorflow as tf
from learntools.core import *
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
| 36.491667 | 410 | 0.622288 | import tensorflow as tf
from learntools.core import *
class Q1(CodingProblem):
_hint = """You should add two `Conv2D` layers and then a `MaxPool2D` layer. They will be just the same as the other layers in the model, except for some of the parameter values."""
_solution = CS("""
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
# Block One
layers.Conv2D(filters=32, kernel_size=3, activation='relu', padding='same',
input_shape=[128, 128, 3]),
layers.MaxPool2D(),
# Block Two
layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same'),
layers.MaxPool2D(),
# Block Three
layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
layers.MaxPool2D(),
# Head
layers.Flatten(),
layers.Dense(6, activation='relu'),
layers.Dropout(0.2),
layers.Dense(1, activation='sigmoid'),
])
""")
_var = "model"
def check(self, model):
# Check for correct number of layers
num_layers = len(model.layers)
assert num_layers == 11, \
("""You've added an incorrect number of layers. For `# Block Three`, try something like:
```python
layers.Conv2D(____),
layers.Conv2D(____),
layers.MaxPool2D(),
```
""")
# Check for correct layer types
layer_classes = [layer.__class__.__name__ for layer in model.layers]
assert all([
layer_classes[4] == 'Conv2D',
layer_classes[5] == 'Conv2D',
layer_classes[6] == 'MaxPooling2D',
]), \
("Your model doesn't have the right kind of layers. " +
"For the second block, you should have two convolutional layers " +
"and then a maximum pooling layer.")
# Check kernel size
kernel_sizes = [model.layers[4].kernel_size,
model.layers[5].kernel_size]
assert (kernel_sizes[0] == (3, 3) and kernel_sizes[1] == (3, 3)), \
(("Your convolutional layers don't have the right kernel size. " +
"You should have `kernel_size=3` or `kernel_size=(3, 3) for both." +
"Your model has {} for the first and {} for the second.")
.format(kernel_sizes[0], kernel_sizes[1]))
# Check filters
filters = [model.layers[4].filters,
model.layers[5].filters]
assert (filters[0] == 128 and filters[1] == 128), \
(("Your convolutional layers don't have the right number of filters." +
"You should have 128 for both. Your model has {} for the first " +
"and {} for the second.")
.format(filters[0], filters[1]))
# Check activations
activations = [model.layers[4].activation.__name__,
model.layers[5].activation.__name__]
assert (activations[0] is 'relu' and activations[1] is 'relu'), \
("Your convolutional layers should both have `'relu'` activation.")
class Q2(CodingProblem):
_hint = "This is a *binary* classification problem."
_solution = CS("""
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
loss='binary_crossentropy',
metrics=['binary_accuracy'],
)
""")
_var = "model"
def check(self, model):
loss = model.compiled_loss._losses
assert (loss == 'binary_crossentropy'), \
(("The loss should be `'binary_crossentropy'`. " +
"You gave {}")
.format(loss))
metric = model.compiled_metrics._metrics
assert (metric == ['binary_accuracy']), \
("The metrics should be `['binary_accuracy']`. " +
"You gave {}").format(metric)
class Q3(ThoughtExperiment):
_solution = """
The learning curves for the model from the tutorial diverged fairly rapidly. This would indicate that it was prone to overfitting and in need of some regularization. The additional layer in our new model would make it even more prone to overfitting. However, adding some regularization with the `Dropout` layer helped prevent this. These changes improved the validation accuracy of the model by several points.
"""
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
| 2,413 | 1,718 | 77 |
91328e065e28cec093506bd0b92c8120a36efe5e | 76 | py | Python | code/ans2/ans2.py | hedgerow512/coding-question-answers | 72ffa6aefac660f226470029b02db2ea87999b4f | [
"CC0-1.0"
] | null | null | null | code/ans2/ans2.py | hedgerow512/coding-question-answers | 72ffa6aefac660f226470029b02db2ea87999b4f | [
"CC0-1.0"
] | null | null | null | code/ans2/ans2.py | hedgerow512/coding-question-answers | 72ffa6aefac660f226470029b02db2ea87999b4f | [
"CC0-1.0"
] | null | null | null | i = 0
for i in range(0, 100):
if i % 2 == 0:
print(i)
i+= i
| 12.666667 | 23 | 0.394737 | i = 0
for i in range(0, 100):
if i % 2 == 0:
print(i)
i+= i
| 0 | 0 | 0 |
793f3e5587c2648c5cce05606c8889bfe6e2c709 | 1,835 | py | Python | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | 1 | 2017-12-31T18:09:06.000Z | 2017-12-31T18:09:06.000Z | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | null | null | null | argumentosSimple.py | sanxofon/basicnlp3 | 289415ec07fae69af04a8354bb9a9801cad564b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Hay diversas maneras de recibir información del usuario en la terminal
Para pedir información al usuario DURANTE la ejecución de un script
podemos usar la función "raw_input" (python2) o "input" (python3) y guardar la respuesta en una variable
como se puede ver en el script: "helloWorldUTF8.py"
A veces queremos recibir información del usuario desde que EJECUTAMOS el
script, es decir, desde un principio.
Ejemplos de ejecución:
>> python argumentosSimple.py Santiago Chávez
>> python argumentosSimple.py "Santiago Chávez"
>> python argumentosSimple.py "Santiago Chávez" utf8 > test.txt
"""
# Importamos una librería para poder usar sus funcionalidades
# La librería "sys" no permite acceder a información del sistema
import sys
# La librería "sys" nos permite acceder a los "argumentos" que fueron invocados al ejecutar este script
nombreScript = sys.argv[0] # El índice "0" siempre contiene el nombre del script actual: "argumentosSimple.py"
argumentos = [] # Definimos la variable "argumentos" como una "lista vacía"
# Recorremos los argumentos del 1 al total de argumentos
for i in range(1,len(sys.argv)):
argumentos.append(sys.argv[i]) # El índice "i" trae el argumento actual (si es que existe)
# Buscamos la cadena "utf8" en los argumentos recibidos
# Si existe creamos una variable "utf8" para acordarnos
utf8 = False
if "utf8" in argumentos:
utf8 = True
argumentos.remove("utf8") # Elimina el argumento "utf8" de la lista
# Por último imprimimos los argumentos invocados por el usuario
print(u"Argumentos recibidos:")
for i in range(len(argumentos)):
if utf8:
# Si se recibió "utf8" en los argumentos codificamos la salida
print("\t",i+1,".",argumentos[i].encode('utf-8'))
else:
# De lo contrario imprimimos tal cual
print("\t",i+1,".",argumentos[i]) | 42.674419 | 110 | 0.751499 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Hay diversas maneras de recibir información del usuario en la terminal
Para pedir información al usuario DURANTE la ejecución de un script
podemos usar la función "raw_input" (python2) o "input" (python3) y guardar la respuesta en una variable
como se puede ver en el script: "helloWorldUTF8.py"
A veces queremos recibir información del usuario desde que EJECUTAMOS el
script, es decir, desde un principio.
Ejemplos de ejecución:
>> python argumentosSimple.py Santiago Chávez
>> python argumentosSimple.py "Santiago Chávez"
>> python argumentosSimple.py "Santiago Chávez" utf8 > test.txt
"""
# Importamos una librería para poder usar sus funcionalidades
# La librería "sys" no permite acceder a información del sistema
import sys
# La librería "sys" nos permite acceder a los "argumentos" que fueron invocados al ejecutar este script
nombreScript = sys.argv[0] # El índice "0" siempre contiene el nombre del script actual: "argumentosSimple.py"
argumentos = [] # Definimos la variable "argumentos" como una "lista vacía"
# Recorremos los argumentos del 1 al total de argumentos
for i in range(1,len(sys.argv)):
argumentos.append(sys.argv[i]) # El índice "i" trae el argumento actual (si es que existe)
# Buscamos la cadena "utf8" en los argumentos recibidos
# Si existe creamos una variable "utf8" para acordarnos
utf8 = False
if "utf8" in argumentos:
utf8 = True
argumentos.remove("utf8") # Elimina el argumento "utf8" de la lista
# Por último imprimimos los argumentos invocados por el usuario
print(u"Argumentos recibidos:")
for i in range(len(argumentos)):
if utf8:
# Si se recibió "utf8" en los argumentos codificamos la salida
print("\t",i+1,".",argumentos[i].encode('utf-8'))
else:
# De lo contrario imprimimos tal cual
print("\t",i+1,".",argumentos[i]) | 0 | 0 | 0 |