max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
analysis_scripts/plot_gender.py
|
dezeraecox/Behind-the-scenes---Investigator-Grants-2019
| 0
|
12778451
|
from wordcloud import WordCloud, STOPWORDS
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Patch
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/summary_stats/summary_stats.xlsx'
output_folder = 'images/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1,1,1,1)})
# Retrieve cleaned data from HDF5
raw_data = pd.read_excel(input_path, sheetname=None)
raw_data.keys()
gender_summary = raw_data['per_gender']
gender_summary = gender_summary.drop(
[col for col in gender_summary.columns.tolist() if 'Unnamed' in col], axis=1)
# As Leadership levels were maintained separately in this table, need to map these to level 3 for 2019
# Generate data for plotting
for_plotting = gender_summary.copy().reset_index(drop=True)
males = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'm_' in col]]
males.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
males['gender'] = 'M'
females = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'f_' in col]]
females.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
females['gender'] = 'F'
for_plotting = pd.concat([males, females]).reset_index(drop=True)
for_plotting = for_plotting.groupby(['Year', 'gender', 'type_cat']).sum().drop('Rate', axis=1).reset_index()
numeric_cols = ['Year', 'type_cat', 'Applications', 'Funded', 'Amount']
for_plotting[numeric_cols] = for_plotting[numeric_cols].astype(float)
year_dict = {2015: 0, 2016: 1, 2017: 2, 2018: 3, 2019: 4}
for_plotting['Year_num'] = for_plotting['Year'].map(year_dict)
for_plotting['Amount'] = for_plotting['Amount'] / 1000000
for_plotting['proportion_Funded'] = for_plotting['Funded'] / for_plotting['Applications'] *100
total_funded = for_plotting.groupby(['Year', 'type_cat']).sum()['Funded'].to_dict()
total_amounts = for_plotting.groupby(['Year', 'type_cat']).sum()[
'Amount'].to_dict()
for_plotting['mapper'] = tuple(zip(for_plotting['Year'], for_plotting['type_cat']))
for_plotting['total_amount'] = for_plotting['mapper'].map(total_amounts)
for_plotting['total_funded'] = for_plotting['mapper'].map(total_funded)
for_plotting['proportion_amount'] = for_plotting['Amount'] / for_plotting['total_amount'] * 100
for_plotting['proportion_total_funded'] = for_plotting['Funded'] / \
for_plotting['total_funded'] * 100
# Generate plot 1
# sns.palplot(sns.color_palette("Purples"))
# fem_colour = sns.color_palette("Purples")[4]
fem_colour = '#511751'
male_colour = sns.color_palette("Oranges")[4]
col_pal = [fem_colour, male_colour]
labels = ['Female', 'Male']
df = for_plotting.groupby(['Year_num', 'gender']).sum().reset_index()
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(x='Year_num', y='Amount', data=df, hue='gender', ax=ax, palette=col_pal)
legend_elements = [Patch(facecolor=col_pal[x], label=labels[x]) for x in range(0, len(labels))]
ax.legend(handles=legend_elements, loc='upper left', title='Funding Amount', ncol=3)
ax2 = ax.twinx()
sns.lineplot(x='Year_num', y='Funded', data=df,
hue='gender', marker='o', markersize=10, palette=col_pal, ax=ax2)
ax2.set_ylim(0, 200)
# Fix all the adjusted elements
plt.legend(labels, loc='upper left', title='Number funded', ncol=3, bbox_to_anchor=(0.67, 1.0))
ax.set_xlabel('Year of funding')
ax.set_ylabel('Total funding amount ($M AUD)')
ax2.set_ylabel('Number of successful applications', rotation=-90, labelpad=15)
plt.xticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title('Total funding awarded according to gender.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_total.png', dpi=300)
plt.show()
# Generate plot 2
for level, df in for_plotting.groupby('type_cat'):
plotting = df[df['gender'] == 'F']
fig, ax = plt.subplots(figsize=(10, 4))
m = sns.barplot(orient='h', y=list(plotting['Year_num']), x=[100 for x in plotting['Year_num']], color=male_colour)
f = sns.barplot(x=plotting['proportion_total_funded'], y=plotting['Year_num'], color=fem_colour, orient='h')
# Fix all the adjusted elements
ax.set_ylabel('Year of funding')
ax.set_xlabel('Proportion of funded applications (%)')
ax2.set_ylabel('Success rate (%)', rotation=-90, labelpad=15)
plt.yticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title(f'Proportion of Fellowships awarded by gender at level {int(level)}.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
ax.axvline(50, c='#636363', linestyle='--', linewidth=3)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_proportion_level{level}.png', dpi=300)
plt.show()
| 2.53125
| 3
|
natlas-server/app/auth/email.py
|
pryorda/natlas
| 0
|
12778452
|
<reponame>pryorda/natlas<filename>natlas-server/app/auth/email.py<gh_stars>0
from flask import render_template, current_app, flash
from app.email import send_email
from app.models import User
def validate_email(addr):
validemail = User.validate_email(addr)
if not validemail:
flash(
f"{addr} does not appear to be a valid, deliverable email address.",
"danger",
)
return None
return validemail
def send_auth_email(email, token, token_type):
token_types = {
"reset": {
"subject": "[Natlas] Reset Your Password",
"template": "email/reset_password.txt",
},
"invite": {
"subject": "[Natlas] You've been invited to Natlas!",
"template": "email/user_invite.txt",
},
}
send_email(
token_types[token_type]["subject"],
sender=current_app.config["MAIL_FROM"],
recipients=[email],
text_body=render_template(token_types[token_type]["template"], token=token),
)
| 2.078125
| 2
|
backend2/src/api/api_v1/endpoints/user_subscription.py
|
alexxxnf/VirusMutationsAI
| 1
|
12778453
|
<gh_stars>1-10
from typing import Any
from fastapi import APIRouter, Depends, status, Query
from sqlalchemy.orm import Session
from sqlalchemy import exc, asc
from src.api import deps
from src.db import models
from src.schemas import PaginatedList
router = APIRouter()
@router.put("/subscriptions/{mutation}", response_model=str, status_code=status.HTTP_201_CREATED)
def subscribe_user_me(
*,
user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
mutation: str
) -> Any:
"""
Add a subscription to the subscriptions
"""
subscr = models.Subscription(user_id=user.id, mutation=mutation)
db.add(subscr)
try:
db.commit()
except exc.IntegrityError as e:
# The mutation already subscribed (expected case)
pass
return mutation
@router.get("/subscriptions", response_model=PaginatedList[str])
def read_subscriptions_user_me(
skip: int = Query(0, ge=0, description="Items offset"),
limit: int = Query(100, gt=0, le=100, description="Page size limit"),
user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Get subscription list
"""
query = db.query(models.Subscription.mutation).filter(models.Subscription.user_id == user.id)
total = query.count()
items = query.order_by(asc(models.Subscription.mutation)).limit(limit).offset(skip).all()
res = PaginatedList(items=[x[0] for x in items], total=total)
return res
@router.delete("/subscriptions/{mutation}", response_model=str)
def unsubscribe_user_me(
*,
user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
mutation: str
) -> Any:
"""
Delete a subscription from the subscriptions
"""
db.query(models.Subscription.mutation).filter(models.Subscription.user_id == user.id,
models.Subscription.mutation == mutation).delete()
db.commit()
return mutation
| 2.1875
| 2
|
src/jk_simplexml/HToolkit_Write_HTML.py
|
jkpubsrc/python-module-jk-simplexml
| 0
|
12778454
|
from jk_hwriter import HWriter
from jk_rawhtml.htmlgeneral import *
from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement
from jk_rawhtml._HTMLCommentProto import _HTMLCommentProto, HTMLComment
from jk_rawhtml._HTMLRawTextProto import _HTMLRawTextProto, HTMLRawText
from jk_rawhtml._HTMLRawCSSProto import _HTMLRawCSSProto, HTMLRawCSS
from jk_rawhtml.HTML5RootElement import HTML5RootElement
from jk_rawhtml.HTML5HeadElement import HTML5HeadElement
from jk_rawhtml.HTML5Scope import HTML5Scope
from .HElement_HAbstractElementList import *
class HTMLScopeDefault(object):
spanNameBegin = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eNameB"})
spanElementName = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementName"})
spanNameEnd = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eNameE"})
spanAttributes = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttributes"})
spanAttrName = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttrName"})
spanAttrValue = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttrValue"})
divText = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eText"})
divTextInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eTextInline"})
divMain = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElement"})
divMainInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementInline"})
divElement = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementWrapper"})
divElementInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementWrapper"})
divChildren = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementChildren"})
divChildrenInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementChildrenInline"})
raw_html = _HTMLRawTextProto()
def __enter__(self):
return self
#
def __exit__(self, exc_type, exc_val, exc_tb):
pass
#
#
class HToolkit_Write_HTML(object):
@staticmethod
def writeHTMLDoc(root:HElement, w:HWriter):
assert isinstance(root, HElement)
assert isinstance(w, HWriter)
H = HTML5Scope()
scope = HTMLScopeDefault()
htmlRoot = H.html()[
H.head()[
H.raw_style_css("""
body {
font-family: 'Courier New', Courier, monospace;
font-size: 12px;
background-color: #f0f0f0;
color: #404040;
font-weight: normal;
}
.eElement {
margin-left: 20px;
}
.eElementInline {
}
.eElementChildren {
}
.eElementChildrenInline {
}
.eTextInline {
color: #006000;
background-color: #f0f8f0;
}
.eText {
margin-left: 20px;
color: #006000;
background-color: #f0f8f0;
display: block;
}
.eNameB {
color: #000060;
}
.eElementName {
background-color: #e8e8f8;
font-weight: bold;
}
.eAttrName {
font-weight: bold;
color: #008080;
}
.eAttrValue {
font-weight: bold;
color: #808000;
}
""")
],
H.body()[
HToolkit_Write_HTML.__convertElementToHTML(scope, root, False)
]
]
htmlRoot._serialize(w)
#
@staticmethod
def writeHTML(root:HElement, w:HWriter):
assert isinstance(root, HElement)
assert isinstance(w, HWriter)
scope = HTMLScopeDefault()
htmlElement = HToolkit_Write_HTML.__convertElementToHTML(scope, root, False)
htmlElement._serialize(w)
#
@staticmethod
def __convertElementToHTML(scope:HTMLScopeDefault, e:HElement, bInline:bool) -> HTMLElement:
divMain = scope.divMainInline if bInline else scope.divMain
divChildren = scope.divChildrenInline if bInline else scope.divChildren
# build attribute list
eAttrList = scope.spanAttributes()
for a in e.attributes:
if a.value:
eAttrList.children.extend([
scope.raw_html(" "),
scope.spanAttrName()[
a.name,
],
"=\"",
scope.spanAttrValue()[
a.value,
],
"\""
])
else:
eAttrList.children.extend([
scope.raw_html(" "),
scope.spanAttrName()[
a.name,
],
])
bChildsInline = e.name in [ "h1", "h2", "h3", "a", "b", "i", "img", "span", "label", "strong" ]
eChildrenList = []
for c in e.children:
if isinstance(c, HText):
if bChildsInline:
eChildrenList.append(scope.divTextInline()[
c.text
])
else:
eChildrenList.append(scope.divText()[
c.text
])
else:
eChildrenList.append(HToolkit_Write_HTML.__convertElementToHTML(scope, c, bInline or bChildsInline))
if eChildrenList:
if bChildsInline:
return divMain()[
scope.divElementInline()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
">",
],
eChildrenList,
scope.spanNameBegin()[
"</",
scope.spanElementName()[
e.name
],
],
scope.spanNameEnd()[
">",
]
]
]
else:
return divMain()[
scope.divElement()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
">",
]
],
divChildren()[
eChildrenList
],
scope.divElement()[
scope.spanNameBegin()[
"</",
scope.spanElementName()[
e.name
],
],
scope.spanNameEnd()[
">",
]
]
]
else:
return divMain()[
scope.divElement()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
" />",
]
]
]
#
#
| 2.578125
| 3
|
tests/seahub/views/sysadmin/test_sysadmin.py
|
samuelduann/seahub
| 420
|
12778455
|
<filename>tests/seahub/views/sysadmin/test_sysadmin.py<gh_stars>100-1000
import os
import openpyxl
from io import BytesIO
from mock import patch
from django.urls import reverse
from seahub.base.accounts import User
from seahub.options.models import (UserOptions, KEY_FORCE_PASSWD_CHANGE)
from seahub.test_utils import BaseTestCase
from seahub.utils.ms_excel import write_xls as real_write_xls
import pytest
pytestmark = pytest.mark.django_db
from seaserv import ccnet_threaded_rpc
class BatchUserMakeAdminTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_make_admins(self):
resp = self.client.post(
reverse('batch_user_make_admin'), {
'set_admin_emails': self.user.username
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
old_passwd = <PASSWORD>
self.assertContains(resp, '"success": true')
u = User.objects.get(email=self.user.username)
assert u.is_staff is True
assert u.enc_password == <PASSWORD>
# class UserMakeAdminTest(TestCase, Fixtures):
# def test_can_make_admin(self):
# self.client.post(
# reverse('auth_login'), {'username': self.admin.username,
# 'password': '<PASSWORD>'}
# )
# resp = self.client.get(
# reverse('user_make_admin', args=[self.user.id])
# )
# old_passwd = <PASSWORD>
# self.assertEqual(302, resp.status_code)
# u = User.objects.get(email=self.user.username)
# assert u.is_staff is True
# assert u.enc_password == <PASSWORD>
class UserRemoveTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_remove(self):
# create one user
username = self.user.username
resp = self.client.post(
reverse('user_remove', args=[username])
)
self.assertEqual(302, resp.status_code)
assert len(ccnet_threaded_rpc.search_emailusers('DB', username, -1, -1)) == 0
class SudoModeTest(BaseTestCase):
def test_normal_user_raise_404(self):
self.login_as(self.user)
resp = self.client.get(reverse('sys_sudo_mode'))
self.assertEqual(404, resp.status_code)
def test_admin_get(self):
self.login_as(self.admin)
resp = self.client.get(reverse('sys_sudo_mode'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed('sysadmin/sudo_mode.html')
def test_admin_post(self):
self.login_as(self.admin)
resp = self.client.post(reverse('sys_sudo_mode'), {
'username': self.admin.username,
'password': <PASSWORD>,
})
self.assertEqual(302, resp.status_code)
self.assertRedirects(resp, reverse('sys_info'))
class SysGroupAdminExportExcelTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_export_excel(self):
resp = self.client.get(reverse('sys_group_admin_export_excel'))
self.assertEqual(200, resp.status_code)
assert 'application/ms-excel' in resp.headers['content-type']
class SysUserAdminExportExcelTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_export_excel(self):
resp = self.client.get(reverse('sys_useradmin_export_excel'))
self.assertEqual(200, resp.status_code)
assert 'application/ms-excel' in resp.headers['content-type']
def write_xls(self, sheet_name, head, data_list):
assert 'Role' in head
return real_write_xls(sheet_name, head, data_list)
@patch('seahub.views.sysadmin.write_xls')
@patch('seahub.views.sysadmin.is_pro_version')
def test_can_export_excel_in_pro(self, mock_is_pro_version, mock_write_xls):
mock_is_pro_version.return_value = True
mock_write_xls.side_effect = self.write_xls
# mock_write_xls.assert_called_once()
resp = self.client.get(reverse('sys_useradmin_export_excel'))
self.assertEqual(200, resp.status_code)
assert 'application/ms-excel' in resp.headers['content-type']
class BatchAddUserHelpTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_get_excel(self):
resp = self.client.get(reverse('batch_add_user_example')+"?type=xlsx")
assert resp.status_code == 200
def test_validate_excel(self):
resp = self.client.get(reverse('batch_add_user_example')+"?type=xlsx")
wb = openpyxl.load_workbook(filename=BytesIO(resp.content), read_only=True)
assert wb.sheetnames[0] == 'sample'
rows = wb.worksheets[0].rows
i = 0
next(rows)
for r in rows:
assert r[0].value == 'test' + str(i) + '@example.com'
assert r[1].value == '123456'
assert r[2].value == 'test' + str(i)
assert r[3].value == 'default'
assert r[4].value == '1000'
i += 1
| 2.078125
| 2
|
main/contact.py
|
gae-init/phonebook
| 10
|
12778456
|
<filename>main/contact.py
# coding: utf-8
from flask.ext import wtf
import flask
import wtforms
import auth
import model
import util
from main import app
class ContactUpdateForm(wtf.Form):
name = wtforms.StringField('Name', [wtforms.validators.required()])
email = wtforms.StringField('Email', [wtforms.validators.optional(), wtforms.validators.email()])
phone = wtforms.StringField('Phone', [wtforms.validators.optional()])
address = wtforms.TextAreaField('Address', [wtforms.validators.optional()])
@app.route('/contact/create/', methods=['GET', 'POST'])
@auth.login_required
def contact_create():
form = ContactUpdateForm()
if form.validate_on_submit():
contact_db = model.Contact(
user_key=auth.current_user_key(),
name=form.name.data,
email=form.email.data,
phone=form.phone.data,
address=form.address.data,
)
contact_db.put()
flask.flash('New contact was successfully created!', category='success')
return flask.redirect(flask.url_for('contact_list', order='-created'))
return flask.render_template(
'contact_update.html',
html_class='contact-create',
title='Create Contact',
form=form,
)
@app.route('/contact/')
@auth.login_required
def contact_list():
contact_dbs, contact_cursor = model.Contact.get_dbs(
user_key=auth.current_user_key(),
)
return flask.render_template(
'contact_list.html',
html_class='contact-list',
title='Contact List',
contact_dbs=contact_dbs,
next_url=util.generate_next_url(contact_cursor),
)
@app.route('/contact/<int:contact_id>/')
@auth.login_required
def contact_view(contact_id):
contact_db = model.Contact.get_by_id(contact_id)
if not contact_db or contact_db.user_key != auth.current_user_key():
flask.abort(404)
return flask.render_template(
'contact_view.html',
html_class='contact-view',
title=contact_db.name,
contact_db=contact_db,
)
@app.route('/contact/<int:contact_id>/update/', methods=['GET', 'POST'])
@auth.login_required
def contact_update(contact_id):
contact_db = model.Contact.get_by_id(contact_id)
if not contact_db or contact_db.user_key != auth.current_user_key():
flask.abort(404)
form = ContactUpdateForm(obj=contact_db)
if form.validate_on_submit():
form.populate_obj(contact_db)
contact_db.put()
return flask.redirect(flask.url_for('contact_list', order='-modified'))
return flask.render_template(
'contact_update.html',
html_class='contact-update',
title=contact_db.name,
form=form,
contact_db=contact_db,
)
| 2.71875
| 3
|
New.py
|
UJJWAL97/MAJORPYTHON
| 0
|
12778457
|
<gh_stars>0
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import tkintergui
from featureextractor import featureextractor
from scipy.special import expit
import numpy as np
import tkFont
from tkinter import messagebox as tkMessageBox
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
top = PHISHING_DETECTION_SYSTEM(root)
tkintergui.init(root, top)
root.mainloop()
w = None
def create_PHISHING_DETECTION_SYSTEM(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel(root)
top = PHISHING_DETECTION_SYSTEM(w)
tkintergui.init(w, top, *args, **kwargs)
return (w, top)
def destroy_PHISHING_DETECTION_SYSTEM():
global w
w.destroy()
w = None
def predict(str):
temp = featureextractor(str)
feature = []
feature.append(1)
for i in range(0, len(temp)):
feature.append(temp[i])
feature = np.array(feature)
theta = ([
-0.905877,
1.474401,
0.508719,
- 0.633691,
0.470666,
- 0.312943,
2.966641,
2.875487,
8.158240,
- 2.047143,
0.304662,
0.488862,
- 0.325627,
2.664044,
6.269800,
2.363358,
2.253615,
0.461469,
- 0.370708,
- 0.356877,
0.325309,
- 0.095074,
1.224013,
1.076416,
3.245338,
0.724740])
theta = np.array(theta)
temp = np.dot(feature, theta)
temp = expit(temp)
if temp >= 0.5:
return 1;
else:
return 0
class PHISHING_DETECTION_SYSTEM:
def checkurl(self):
url = self.urlInput.get()
tkMessageBox.showinfo("Checking URL", url)
if predict(url):
self.Message2.configure(text='''Phishing''')
else:
self.Message2.configure(text='''Url is safe''')
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X1age1 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
top.geometry("1032x682+581+127")
top.title("PHISHING DETECTION SYSTEM")
top.configure(borderwidth="3")
top.configure(background="#204424")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.inputFrame = Frame(top)
self.inputFrame.place(relx=0.01, rely=0.04, relheight=0.29
, relwidth=0.97)
self.inputFrame.configure(relief=GROOVE)
self.inputFrame.configure(borderwidth="2")
self.inputFrame.configure(relief=GROOVE)
self.inputFrame.configure(background="#d9d9d9")
self.inputFrame.configure(highlightbackground="#d9d9d9")
self.inputFrame.configure(highlightcolor="black")
self.inputFrame.configure(width=1005)
self.urlInput = Entry(self.inputFrame)
self.urlInput.place(relx=0.02, rely=0.46, height=46, relwidth=0.96)
self.urlInput.configure(background="white")
self.urlInput.configure(disabledforeground="#a3a3a3")
self.urlInput.configure(font="TkFixedFont")
self.urlInput.configure(foreground="#000000")
self.urlInput.configure(highlightbackground="#d9d9d9")
self.urlInput.configure(highlightcolor="black")
self.urlInput.configure(insertbackground="black")
self.urlInput.configure(selectbackground="#c4c4c4")
self.urlInput.configure(selectforeground="black")
self.inputMessage = Message(self.inputFrame)
self.inputMessage.place(relx=0.02, rely=0.1, relheight=0.32
, relwidth=0.13)
self.inputMessage.configure(background="#d9d9d9")
self.inputMessage.configure(foreground="#000000")
self.inputMessage.configure(highlightbackground="#d9d9d9")
self.inputMessage.configure(highlightcolor="black")
self.inputMessage.configure(relief=RIDGE)
self.inputMessage.configure(text='''INPUT URL :''')
self.inputMessage.configure(width=127)
self.checkButton = Button(top)
self.checkButton.place(relx=0.22, rely=0.35, height=92, width=558)
self.checkButton.configure(activebackground="#d9d9d9")
self.checkButton.configure(activeforeground="#000000")
self.checkButton.configure(background="#888888")
self.checkButton.configure(disabledforeground="#a3a3a3")
self.checkButton.configure(foreground="#000000")
self.checkButton.configure(highlightbackground="#d9d9d9")
self.checkButton.configure(highlightcolor="black")
self.checkButton.configure(pady="0")
self.checkButton.configure(text='''CHECK URL''')
self.checkButton.configure(command=self.checkurl)
self.messageFrame = Frame(top)
self.messageFrame.place(relx=0.02, rely=0.51, relheight=0.45
, relwidth=0.96)
self.messageFrame.configure(relief=GROOVE)
self.messageFrame.configure(borderwidth="2")
self.messageFrame.configure(relief=GROOVE)
self.messageFrame.configure(background="#d9d9d9")
self.messageFrame.configure(highlightbackground="#d9d9d9")
self.messageFrame.configure(highlightcolor="black")
self.messageFrame.configure(width=995)
self.Message2 = Message(self.messageFrame)
self.Message2.place(relx=0.04, rely=0.13, relheight=0.09, relwidth=0.49)
self.Message2.configure(background="#84d9d9")
self.Message2.configure(foreground="#000000")
self.Message2.configure(highlightbackground="#d9d9d9")
self.Message2.configure(highlightcolor="black")
self.Message2.configure(text='''please Enter Url''')
self.Message2.configure(width=891)
if __name__ == '__main__':
vp_start_gui()
| 2.328125
| 2
|
src/waldur_core/structure/migrations/0007_customer_blocked.py
|
geant-multicloud/MCMS-mastermind
| 26
|
12778458
|
# Generated by Django 1.11.18 on 2019-03-04 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0006_customer_backend_id'),
]
operations = [
migrations.AddField(
model_name='customer',
name='blocked',
field=models.BooleanField(default=False),
),
]
| 1.671875
| 2
|
src/app/c/frontend/__main__.py
|
lfvilella/dunder-interpreter
| 0
|
12778459
|
import logging
import antlr4
from antlr4.error.ErrorListener import ErrorListener
import click
from .antlr.CLexer import CLexer
from .antlr.CParser import CParser
logger = logging.getLogger(__name__)
class MyErrorListener(ErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
if offendingSymbol.text not in ['int']: # Xunxo
raise SyntaxError(f"line {line}:{column} {msg}")
logger.info(f"Syntax Error skip: '{offendingSymbol.text}'. {e}")
def run(filepath: str):
input_stream = antlr4.FileStream(filepath)
lexer = CLexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = CParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(listener=MyErrorListener())
parser.primaryExpression() # tree
@click.command()
@click.option('--filepath', type=str, required=True)
def main(filepath):
run(filepath=filepath)
if __name__ == '__main__':
main()
| 2.421875
| 2
|
p3/management/commands/users_with_unassigned_tickets.py
|
judy2k/epcon
| 0
|
12778460
|
# -*- coding: utf-8 -*-
""" Print information of the users who got unassigned tickets."""
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from p3 import models as p3_models
from assopy import models as assopy_models
from optparse import make_option
### Globals
### Helpers
def conference_year(conference=settings.CONFERENCE_CONFERENCE):
return conference[-2:]
def get_all_order_tickets(conference=settings.CONFERENCE_CONFERENCE):
year = conference_year(conference)
orders = assopy_models.Order.objects.filter(_complete=True)
conf_orders = [order for order in orders if order.code.startswith('O/{}.'.format(year))]
order_tkts = [ordi.ticket
for order in conf_orders
for ordi in order.orderitem_set.all()
if ordi.ticket is not None]
conf_order_tkts = [ot for ot in order_tkts if ot.fare.code.startswith('T')]
return conf_order_tkts
def get_assigned_ticket(ticket_id):
return p3_models.TicketConference.objects.filter(ticket=ticket_id)
def has_assigned_ticket(ticket_id):
return bool(get_assigned_ticket(ticket_id))
# def is_ticket_assigned_to_someone_else(ticket, user):
# tickets = p3_models.TicketConference.objects.filter(ticket_id=ticket.id)
#
# if not tickets:
# return False
# #from IPython.core.debugger import Tracer
# #Tracer()()
# #raise RuntimeError('Could not find any ticket with ticket_id {}.'.format(ticket))
#
# if len(tickets) > 1:
# raise RuntimeError('You got more than one ticket from a ticket_id.'
# 'Tickets obtained: {}.'.format(tickets))
#
# tkt = tickets[0]
# if tkt.ticket.user_id != user.id:
# return True
#
# if not tkt.assigned_to:
# return False
#
# if tkt.assigned_to == user.email:
# return False
# else:
# return True
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--emails',
action='store_true',
dest='emails',
default=False,
help='Will print user emails.',
),
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
print('This script does not work anymore, do not use it.')
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
tkts = get_all_order_tickets(conference)
if not tkts:
raise IndexError('Could not find any tickets for conference {}.'.format(conference))
# unassigned tickets
un_tkts = [t for t in tkts if not t.p3_conference.assigned_to]
# users with unassigned tickets
users = set()
for ut in un_tkts:
users.add(ut.user)
if options['emails']:
output = sorted([usr.email.encode('utf-8') for usr in users])
else:
output = sorted([usr.get_full_name().encode('utf-8') for usr in users])
if output:
print(', '.join(output))
| 2.296875
| 2
|
manila_tempest_tests/tests/api/test_share_types_negative.py
|
openstack/manila-tempest-plugin
| 9
|
12778461
|
<filename>manila_tempest_tests/tests/api/test_share_types_negative.py
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
LATEST_MICROVERSION = CONF.share.max_api_microversion
def generate_long_description(des_length=256):
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz'
length = len(base_str) - 1
for i in range(des_length):
random_str += base_str[random.randint(0, length)]
return random_str
@ddt.ddt
class ShareTypesNegativeTest(base.BaseSharesMixedTest):
@classmethod
def resource_setup(cls):
super(ShareTypesNegativeTest, cls).resource_setup()
cls.st = cls.create_share_type()
cls.st2 = cls.create_share_type()
@decorators.idempotent_id('d6a6ac4d-6582-408d-ba55-6f5128eb940e')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_try_create_share_type_with_user(self):
self.assertRaises(lib_exc.Forbidden,
self.create_share_type,
data_utils.rand_name("used_user_creds"),
client=self.shares_client)
@decorators.idempotent_id('857c664f-e634-4865-ba05-bdcd4336725d')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_try_delete_share_type_with_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.delete_share_type,
self.st["id"])
@decorators.idempotent_id('06203276-f6a3-4a07-a014-8749763395d6')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_try_add_access_to_share_type_with_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.add_access_to_share_type,
self.st['id'],
self.shares_client.tenant_id)
@decorators.idempotent_id('08b2d093-2ad8-46aa-8112-81d50547f36d')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_try_remove_access_from_share_type_with_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.remove_access_from_share_type,
self.st['id'],
self.shares_client.tenant_id)
@utils.skip_if_microversion_not_supported("2.50")
@decorators.idempotent_id('4a22945c-8988-43a1-88c9-eb86e6abcd8e')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@ddt.data(
('2.50', '', None, None),
(LATEST_MICROVERSION, '', None, None),
('2.50', None, None, 'not_bool'),
(LATEST_MICROVERSION, None, None, 'not_bool'),
('2.50', None, generate_long_description(256), None),
(LATEST_MICROVERSION, None, generate_long_description(256), None),
)
@ddt.unpack
def test_share_type_update_bad_request(
self, version, st_name, st_description, st_is_public):
st_id = self.st['id']
# Update share type
self.assertRaises(lib_exc.BadRequest,
self.admin_shares_v2_client.update_share_type,
st_id, st_name, st_is_public, st_description,
version)
@utils.skip_if_microversion_not_supported("2.50")
@decorators.idempotent_id('7193465a-ed8e-44d5-9ca9-4e8a3c5958e0')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@ddt.data('2.50', LATEST_MICROVERSION)
def test_share_type_update_conflict(self, version):
name_1 = self.st['name']
st_id_2 = self.st2['id']
# Update share type
self.assertRaises(lib_exc.Conflict,
self.admin_shares_v2_client.update_share_type,
st_id_2, name_1, None, None, version)
| 1.78125
| 2
|
Training/multipleInheritance.py
|
srikanteswartalluri/pyutils
| 0
|
12778462
|
<reponame>srikanteswartalluri/pyutils<filename>Training/multipleInheritance.py<gh_stars>0
class A:
def Am(self):
print self.__class__, '=>I am in A'
class B(A):
def Am(self):
print self.__class__, '=>I am in Am'
def Bm(self):
print self.__class__, '=>I am in Bm'
class C(A, B):
def Cm(self):
print self.__class__, '=>I am in Cm'
a = A()
b = B()
c = C()
c.Am()
c.Bm()
c.Cm()
| 2.890625
| 3
|
test/test_delete_group.py
|
mihushynmaksym/Python_Training
| 0
|
12778463
|
from random import randrange
__author__ = 'Max'
def test_delete_some_group(app):
app.group.find_group_button() # find button "group" on the page
app.group.if_not_group_create_group() # if not have any groups = create group
old_groups = app.group.get_group_list() # Check lists - 1 (delete group)
index = randrange(len(old_groups)) # add random for delete groups
app.group.delete_group_by_index(index) # add random for delete groups
new_group = app.group.get_group_list() # Check lists - 1 (delete group)
assert len(old_groups) - 1 == len(new_group) # Check lists - 1 (delete group)
old_groups[index:index + 1] = [] # Check lists - 1 (delete group), add random for delete groups
assert old_groups == new_group # Check lists - 1 (delete group)
# def test_delete_all_group(app):
# app.group.find_group_button() # find button "group" on the page
# app.group.if_not_group_create_group() # if not have any groups = create group
# old_groups = app.group.get_group_list() # Check lists - 1 (delete group)
# index =(len(old_groups)) # add random for delete groups
# app.group.delete_all_groups_by_index(index) # add random for delete groups
# new_group = app.group.get_group_list() # Check lists - 1 (delete group)
# assert len(old_groups)== len(new_group) # Check lists - 1 (delete group)
# old_groups[index:index + 1] = [] # Check lists - 1 (delete group), add random for delete groups
# assert old_groups == new_group # Check lists - 1 (delete group)
#need rework
| 2.765625
| 3
|
refinery218/jobs/andes-thermal-perjob-time.py
|
at-aaims/sc21_summit_power_analysis_artifacts
| 1
|
12778464
|
<filename>refinery218/jobs/andes-thermal-perjob-time.py<gh_stars>1-10
#!../.venv.andes/bin/python3
#SBATCH -M andes
#SBATCH -N 32
#SBATCH -J andes-thermal-perjob-time
#SBATCH -t 8:00:00
#SBATCH -A stf218
#SBATCH -o ../logs/andes-thermal-perjob-time-%J.out
import os
import sys
import time
import glob
from itertools import product
from datetime import datetime, timedelta
from loguru import logger
import numpy as np
import pandas as pd
import dask.dataframe as dd
from dask.distributed import as_completed
from refinery218.olcf import andes_dask_batch, watchdog_heartbeat
from refinery218.filestream import DayCursor
COMPUTE_SCRIPT = "andes-thermal-perjob-time.py"
DATA_DEV_DIR = '/gpfs/alpine/stf218/proj-shared/data/lake.dev/'
JOBS = os.path.join(DATA_DEV_DIR, 'summit_jobs/jobs.csv')
PERNODE_JOBS = os.path.join(DATA_DEV_DIR, 'summit_perhost_jobs/pernode_jobs.csv')
CEP = os.path.join(DATA_DEV_DIR, 'summit_cooling_plant/interp_cep_data.parquet')
PERNODE_JOBS_TS = os.path.join(DATA_DEV_DIR, 'summit_perhost_jobs_timeseries/data')
DATA_DIR = '/gpfs/alpine/stf218/proj-shared/data/lake/'
SRC_DIR = os.path.join(DATA_DIR, 'summit_power_temp_openbmc/10s_agg')
DST_DIR = os.path.join(DATA_DIR, 'summit_thermal_perjob_time_cep')
AGG_FREQ = '10s'
JOB_ID = 'allocation_id'
BEGIN = 'begin_time'
END = 'end_time'
TIME = 'timestamp'
NODE_OLD = 'node_name'
NODE = 'hostname'
TEMP_SENSORS = ['core', 'mem']
CORE_TEMPS, MEM_TEMPS = [[f'gpu{gpu}_{sensor}_temp.mean' for gpu in range(6)] for sensor in TEMP_SENSORS]
ALL_TEMPS = CORE_TEMPS + MEM_TEMPS
BANDS = [-np.inf, 80, 83, 87, 90, np.inf]
N_BANDS = len(BANDS) - 1
N_CORES_IN_BANDS, N_MEMS_IN_BANDS = [[f'n_{sensor}s_band{band}' for band in range(N_BANDS)] for sensor in TEMP_SENSORS]
HOT_GPUS = 'hot_gpus'
ANY_NAN = 'any_nan'
EMPTY_MAPPED_PARTITION = pd.DataFrame(columns=[NODE, ANY_NAN, HOT_GPUS] + N_CORES_IN_BANDS + N_MEMS_IN_BANDS)
# If not None, cursor stops at the offset from the beginning
# Useful for initial development & debugging
CURSOR_STOP_OFFSET = None
# andes_dask_batch has a watchdog that restarts the dask cluster
# when no heartbeat (calling watchdog_heartbeat()) is seen within
# WATCHDOG_INTERVAL_SEC
WATCHDOG_INTERVAL_SEC = 450
# Pre-commit tasks to overlap scheduling delay & computation
OVERLAP_DAY_COUNT = 1
def skip_if(cursor, offset, date_key):
"""Return True if we want to skip a particular date"""
# Useful to skip work that already has been done
return False
def compute_partition(df):
if not isinstance(df, pd.DataFrame) and not isinstance(df, dd.DataFrame):
return EMPTY_MAPPED_PARTITION
pernode_jobs_partition_path = os.path.join(PERNODE_JOBS_TS, f'part={df.index.floor("min").max()}')
if os.path.exists(pernode_jobs_partition_path):
pernode_jobs_partition = pd.read_parquet(pernode_jobs_partition_path, columns=[TIME, NODE, JOB_ID])
else:
return EMPTY_MAPPED_PARTITION
if pernode_jobs_partition.empty:
return EMPTY_MAPPED_PARTITION
# Join with per-node job time-series data.
df = pernode_jobs_partition.merge(df.reset_index(), on=[TIME, NODE])
# Detect NaNs in each row.
df[ANY_NAN] = df[ALL_TEMPS].isna().any(axis=1)
# Replace temperature with bands.
df[ALL_TEMPS] = df[ALL_TEMPS].apply(pd.cut, bins=BANDS, right=False, labels=False)
# Count bands for each row.
for n_sensors_in_bands, temps in zip([N_CORES_IN_BANDS, N_MEMS_IN_BANDS], [CORE_TEMPS, MEM_TEMPS]):
for band, n_sensors_in_band in enumerate(n_sensors_in_bands):
df[n_sensors_in_band] = (df[temps] == band).sum(axis=1)
# Encode hot GPUs for each node.
are_hot_gpus = df[ALL_TEMPS] > 1
df[HOT_GPUS] = df[NODE] + ':' + are_hot_gpus.fillna('_').astype(int).astype(str).agg(''.join, axis=1)
df[HOT_GPUS] = df[HOT_GPUS].mask(~are_hot_gpus.any(axis=1))
agg = df.groupby([TIME, JOB_ID]).agg({NODE: 'size', ANY_NAN: 'sum', HOT_GPUS: lambda x: list(x.dropna()),
**{n: 'sum' for n in N_CORES_IN_BANDS + N_MEMS_IN_BANDS}})
return agg
def compute_day(offset, date_key, ddf):
"""Computation of a day worth of data"""
res = ddf.map_partitions(compute_partition).compute().reset_index(level=JOB_ID)
return offset, date_key, res
def handle_result(context, res):
"""Handle results (sink)"""
_, date_key, df = res
try:
df = df.join(context['cep'])
except TypeError:
context['cep'].index = context['cep'].index.tz_localize(df.index.tz)
df = df.join(context['cep'])
df.to_csv(os.path.join(DST_DIR, f'{date_key}.csv'))
logger.info(f'processed {date_key}')
@logger.catch
def compute(client):
"""Main computation loop
Computation is done using futures instead of delays to reduce the impact of
the scheduler delays
"""
cursor = DayCursor(
client, basedir=SRC_DIR, cursor_stop_offset=CURSOR_STOP_OFFSET,
# Data we read (if column is not None and list, it will read that column
index=TIME,
columns=[NODE] + ALL_TEMPS,
# Days to attach prior or after the current date
prev_days=0, next_days=1,
# Skip condition
skip_fn=skip_if,
# Whether we load the per day into memory when iterating
persist=True,
)
# Read job data.
jobs = pd.read_csv(JOBS, usecols=[JOB_ID, BEGIN, END], parse_dates=[BEGIN, END]).set_index(JOB_ID)
jobs[BEGIN] = jobs[BEGIN].dt.round(AGG_FREQ)
jobs[END] = jobs[END].dt.round(AGG_FREQ)
pernode_jobs = pd.read_csv(PERNODE_JOBS, usecols=[JOB_ID, NODE_OLD]).set_index(JOB_ID)
pernode_jobs = pernode_jobs.rename(columns={NODE_OLD: NODE})
# Read interpolated CEP data.
context = {'cep': pd.read_parquet(CEP, engine='pyarrow')}
logger.info("Beginning iteration")
futures = as_completed()
for offset, date_key, ddf in cursor.iter():
# Create per-node level job data for the jobs that started on this day.
jobs_day = jobs[jobs[BEGIN].dt.strftime('%Y%m%d') == date_key]
pernode_jobs_day = pernode_jobs.merge(jobs_day, left_index=True, right_index=True).reset_index()
futures.add(client.submit(compute_day, offset, date_key, ddf))
# Also, skip a beat so that we have at least two or more futures submitted
# Then, dequeue and block on exactly one future at a time
if offset < OVERLAP_DAY_COUNT:
continue
for future in futures:
handle_result(context, future.result())
watchdog_heartbeat()
break
# We wait for the rest
for future in futures:
handle_result(context, future.result())
watchdog_heartbeat()
logger.info("Iteration finished")
return 0
#
# Submission block that takes care of the entrypoints
#
if __name__ == "__main__":
andes_dask_batch(compute, script=COMPUTE_SCRIPT, watchdog=WATCHDOG_INTERVAL_SEC)
| 1.65625
| 2
|
src/server/models/caso.py
|
AssisDev/CT-I
| 0
|
12778465
|
<gh_stars>0
from databases.config import db
# from datetime import datetime
class Caso(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
caso = db.Column(db.String(150), nullable=False)
n_sipia = db.Column(db.String(30))
conselheiro = db.Column(db.String(150))
local_guarda = db.Column(db.String(150))
situacao = db.Column(db.String(150))
data_transferencia = db.Column(db.DateTime)
denuncia_id = db.Column(db.Integer, db.ForeignKey('denuncia.id'))
denuncia = db.relationship('Caso', backref=db.backref('casos', lazy=True))
# criado_em = db.Column(db.Datetime, default=datetime.utcnow)
# atualizado_em = db.Column(db.Datetime)
def __rpr__(self):
return f'Caso {self.caso}'
db.create_all()
| 2.375
| 2
|
askapp/models.py
|
alexpirine/askapp
| 0
|
12778466
|
<reponame>alexpirine/askapp<filename>askapp/models.py
import os
import sys
import logging
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django_countries.fields import CountryField
from django.core.files.storage import FileSystemStorage
from io import BytesIO
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from django.db.models.fields.files import ImageFieldFile
from PIL import Image, ImageOps
from mptt.models import MPTTModel, TreeForeignKey
from django.template.defaultfilters import slugify
from urllib.parse import urlparse
from datetime import datetime
import re
import requests
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from django.forms import model_to_dict
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
import rules_light
from markdownx.models import MarkdownxField
from askapp import settings
class OverwriteStorage(FileSystemStorage):
"""
When saving an image, this storage class deletes existing file, thus implementing the overwriting feature
"""
def get_available_name(self, name, *args, **kwargs):
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
def avatar_name_path(instance, filename):
"""
convert arbitrary file name to the string consisting of username and user ID
"""
extension = filename[filename.rfind('.'):]
new_path = 'user_profile/%s%s%s' % (instance.user.username, instance.user.pk, extension)
return new_path
def favorite_threads(user):
"""
get list of the threads that user has "upvoted"
"""
favorites = ThreadLike.objects.filter(user=user, points__gt=0).order_by('-created')
threads = [f.thread for f in favorites]
return threads
class AskappImageFieldFile(ImageFieldFile):
"""
Return default avatar if there is no image
"""
@property
def url(self):
try:
result = super(AskappImageFieldFile, self).url
if not os.path.isfile(self.path):
raise ValueError
except ValueError:
result = settings.DEFAULT_AVATAR_URL if hasattr(settings, 'DEFAULT_AVATAR_URL') else ''
return result
class AskappImageField(models.ImageField):
attr_class = AskappImageFieldFile
class UserLevel(models.Model):
"""
User "level" object. Shows how many articles a user with this level can upvote/downvote
"""
name = models.CharField(blank=False, max_length=100) # level name
upvotes = models.IntegerField(default=3, verbose_name='Upvotes per day') # how many articles a user can upvote per day
downvotes = models.IntegerField(default=0)
upvote_same = models.IntegerField(default=1, verbose_name='Upvotes of the same article') # can user upvote the same article multiple times?
downvote_same = models.IntegerField(default=1, verbose_name='Downvote same article')
def __str__(self):
return str(self.name)
class Profile(models.Model):
"""
Additional user properties. Attached to the Django user object by 1-to-1 relation
"""
user = models.OneToOneField(User, on_delete=models.CASCADE) # 1-to-1 relation to the builtin Django User object
avatar = AskappImageField(storage=OverwriteStorage(), upload_to=avatar_name_path, blank=True)
country = CountryField(blank=True) # country of residence. 3rd party component, saved as ISO code, displayed as full name. Input control is a dropdown list, supporting localisation
city = models.CharField(max_length=50, blank=True) # city of residence
about = models.TextField(max_length=500, blank=True) # "about me", biography text field
level = models.ForeignKey(UserLevel, on_delete=models.DO_NOTHING, null=True, default=1) # user level, kinda "permissions" in the system
def __init__(self, *args, **kwargs):
super(Profile, self).__init__(*args, **kwargs)
# __original_avatar is used to detect avatar change, to run avatar resize procedure only when needed
self.__original_avatar = self.avatar
def __unicode__(self):
return self.user.username
@cached_property
def email(self):
"""
a helper that allows get email even from the Profile object
"""
return self.user.email
def resize_avatar(self):
# code from with some changes : http://www.yilmazhuseyin.com/blog/dev/create-thumbnails-imagefield-django/
if not self.avatar:
return
try:
AVATAR_SIZE = settings.AVATAR_SIZE
except:
AVATAR_SIZE = (200, 200)
image = Image.open(BytesIO(self.avatar.read()))
if self.avatar.name.lower().endswith('png'):
bg = Image.new("RGB", image.size, (255, 255, 255))
bg.paste(image, image)
else:
bg = image
min_dimension = min(image.size[0], image.size[1])
image = ImageOps.fit(bg, (min_dimension, min_dimension))
image = image.resize(AVATAR_SIZE, Image.ANTIALIAS)
temp_handle = BytesIO()
image.save(temp_handle, 'jpeg')
temp_handle.seek(0)
suf = SimpleUploadedFile(os.path.split(self.avatar.name)[-1], temp_handle.read(), content_type='image/jpeg')
self.avatar.save('%s.%s' % (os.path.splitext(suf.name)[0], 'jpg'), suf, save=False)
def save(self, *args, **kwargs):
if self.avatar != self.__original_avatar:
self.resize_avatar()
super(Profile, self).save(*args, **kwargs)
@cached_property
def favorite_threads(self):
return favorite_threads(self.user)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created or not hasattr(instance, 'profile'):
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=User)
def delete_user_content(sender, instance, created, **kwargs):
"""
Signal handler running after Django.user object is saved.
For disabled user it marks as "deleted" all user's threads and posts
"""
if not instance.is_active:
if kwargs['update_fields'] and 'is_active' in kwargs['update_fields']:
# Perform posts/threads deletion only when is_active field was explicitly mentioned for update
# delete user threads and posts
Post.objects.filter(user_id=instance.id, deleted=False).update(deleted=True)
Thread.objects.filter(user_id=instance.id, deleted=False).update(deleted=True)
class Tag(models.Model):
"""
Thread tags
"""
name = models.CharField(max_length=60, null=False) # full tag name
slug = models.SlugField(max_length=60, null=False) # slugified tag name to use in URLs
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
def __str__(self):
return "{0}".format(self.name)
class Thread(models.Model):
"""
Thread is the main content object of the application
"""
# codes for types of posts
QUESTION = "QQ"
DISCUSSION = "DD"
LINK = "LL"
YOUTUBE = "YT"
DUPLICATE = "DU"
VIDEOSTREAM = "VS"
# iterable collection for types of posts
# must consist of iterables of exactly two items
TYPES_OF_THREAD = (
#(QUESTION, _('Question')),
(DISCUSSION, _('Discussion')),
(LINK, _('Link')),
(YOUTUBE, _('Youtube video')),
(DUPLICATE, _('Duplicate thread')),
(VIDEOSTREAM, _('Video stream')),
)
TYPES_WITH_LINK = [LINK, YOUTUBE, DUPLICATE, VIDEOSTREAM]
#many to many relationship with tags. When a post is created, it needs to be saved and then tags can be added
tags = models.ManyToManyField(Tag, blank=True, verbose_name=_('tags'))
# these fields are taken into account only if the post is thread starter
hidden = models.BooleanField(default=False) # the thread is visible only to the staff and the author
closed = models.BooleanField(default=False) # no one can post comments / answers in this thread
sticky = models.DateField(null=True, blank=True) # this thread will be sticky until the given date
featured = models.BooleanField(default=False) # hopefully one day there will be sponsored threads...
deleted = models.BooleanField(default=False) # the thread is marked as deleted, usually on user blocking
# reference to the user who created the post
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, default=1)
# atomatically added timestamp field when the record is created
created = models.DateTimeField(auto_now_add=True)
# atomatically added timestamp field when the record is modified
modified = models.DateTimeField(auto_now=True)
# Thread must have one of the types defined in TYPES_OF_THREAD
thread_type = models.CharField(
max_length=2,
choices=TYPES_OF_THREAD,
default=LINK,
null=True
)
# thread body with HTML markup
text = MarkdownxField(null=True)
# link field for the Threads of the type Link
link = models.URLField(null=True, blank=True, unique=True)
# link's domain. Used for /domains page by the task #66
domain = models.CharField(max_length=255, null=True, blank=True)
# thread title can be null if the post is not a thread starter
title = models.CharField(max_length=255, null=True)
#image that illustrates the thread
image = models.ImageField(upload_to='uploads/images/%Y/%m/%d', null=True, blank=True)
#smaller version of the image
thumbnail = models.ImageField(upload_to='uploads/images/%Y/%m/%d', null=True, blank=True)
# the current score of the post. It is only calculated for thread posts (no parents)
# that are not older than one week old
score = models.IntegerField(default=0)
# when thread type is "duplicate" this is a link to the original, "main" thread
original = models.ForeignKey("self", blank=True, null=True, on_delete=models.SET_NULL)
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self._old = model_to_dict(self, fields=['id', 'hidden', 'closed', 'sticky', 'sponsored', 'deleted', 'text', 'title'])
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.prepare_images()
self.update_link()
super(Thread, self).save()
AuditThread.audit(self) # log all changes applied to the thread
def resize_image(self, content, size, format='JPEG'):
im = Image.open(BytesIO(content)).convert('RGBA')
if im.size[0] > size[0] or im.size[1] > size[1]:
im.thumbnail(size)
new_image = Image.new('RGBA', im.size, 'WHITE')
new_image.paste(im, (0, 0), im)
new_image = new_image.convert('RGB')
result = BytesIO()
new_image.save(result, format)
return result
def _delete_old_image(self):
try:
this = Thread.objects.get(id=self.id)
if this.image != self.image:
# delete old image explicitly, as new image will have different name
this.image.delete(False)
this.thumbnail.delete(False)
except Exception as ex:
pass
@cached_property
def youtube_id(self):
# url = url.split(/(vi\/|v%3D|v=|\/v\/|youtu\.be\/|\/embed\/)/);
# return undefined !== url[2]?url[2].split(/[^0-9a-z_\-]/i)[0]:url[0];
r = r"(vi\/|v%3D|v=|\/v\/|youtu\.be\/|\/embed\/)"
video_id = re.split(r, self.link)
if len(video_id) == 3:
video_id = re.split(r"[^0-9a-z_\-](?i)", video_id[2])
else:
return None
return video_id[0] if video_id else None
def parse_youtube_url(self):
id = self.youtube_id
item = {}
if id and settings.GOOGLE_API_KEY:
snippet = requests.get(f'https://www.googleapis.com/youtube/v3/videos?part=snippet&id={id}&key={settings.GOOGLE_API_KEY}')
snippet = snippet.json()
if snippet.get('items'):
item = snippet['items'][0]['snippet']
item['image'] = item['thumbnails']['default']['url']
if not item and id: # failed to get video info from googleapis, trying 3rd party service
snippet = requests.get(f'https://noembed.com/embed?url=https://www.youtube.com/watch?v={id}')
snippet = snippet.json()
if snippet.get('title'):
item = snippet
item['image'] = item['thumbnail_url']
item['description'] = ''
result = {'id': id} if id else None
if item:
result.update(**{k: item[k] for k in ['title', 'description', 'image']})
return result
def _load_youtube_thumbnail(self):
yt_info = self.parse_youtube_url()
if yt_info:
filename = os.path.basename(yt_info['image'])
ext = filename.split('.', 1)[-1]
filename = '%s.%s' % (yt_info['id'], ext)
response = requests.get(yt_info['image'])
self.image = SimpleUploadedFile(filename, response.content, response.headers['content-type'])
def prepare_images(self):
if self.thread_type == self.YOUTUBE and not self.image:
self._load_youtube_thumbnail()
self._delete_old_image()
if self.image:
img = self.resize_image(self.image.read(), size=settings.MAX_IMAGE_SIZE, format='JPEG')
self.image = InMemoryUploadedFile(img, 'ImageField', "%s.jpg" % self.image.name.split('.')[0],
'image/jpeg', sys.getsizeof(img), None)
def update_link(self):
"""
extract domain name for threads of type "link"
"""
if self.thread_type not in self.TYPES_WITH_LINK:
self.link = None
if self.thread_type not in [self.LINK, self.DUPLICATE]:
self.domain = None
else:
hostname = urlparse(self.link)
self.domain = hostname.netloc
@cached_property
def comments(self):
if self.thread_type == self.QUESTION: # filter out comments marked as answers, they'll come in another property
params = {
'is_answer': False,
'deleted': False,
'parent_id__isnull': True,
}
result = self.post_set.filter(**params)
else:
result = self.post_set.all()
return result
@cached_property
def answers(self):
x = self.post_set.filter(is_answer=True, deleted=False)
return x
@cached_property
def num_comments(self):
return self.post_set.filter(deleted=False).count()
@cached_property
def points(self):
result = self.threadlike_set.all().aggregate(sum=models.Sum('points'))['sum']
return result or 0
@cached_property
def author(self):
return self.user.username
@cached_property
def answered(self):
"""
check whether the thread of type "question" has an answer, to prevent marking another comment as the answer
"""
return self.answers.filter(accepted__isnull=False).count() > 0
@cached_property
def duplicates(self):
if self.original:
q = models.Q(original__in=[self.original.id, self.id]) | models.Q(id=self.original.id)
q = q & ~models.Q(id=self.id)
else:
q = models.Q(original=self.id)
return Thread.objects.filter(q, deleted=False)
class Post(MPTTModel):
class Meta:
verbose_name = 'Comment'
verbose_name_plural = 'Comments'
"""
Post is a part of the discussion on the levels below Thread
It can be comments, answers organized in several levels
"""
# defines the parent post. If the value is null, the post is a thread starter
parent = TreeForeignKey('self', models.CASCADE, null=True, blank=True, related_name='children', db_index=True)
# the thread that the Post belongs to
thread = models.ForeignKey(Thread, models.CASCADE)
# reference to the user who created the post
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, default=1)
# atomatically added timestamp field when the record is created
created = models.DateTimeField(auto_now_add=True)
# post body with HTML markup
text = MarkdownxField(null=True)
#in question Thread this distinguish answers from comments
is_answer = models.BooleanField(default=False)
#the topic starter or the admin selected this post as "the accepted answer"
accepted = models.DateTimeField(null=True)
# A post should be marked as deleted instead of physical deletion because it can has live descendant posts
deleted = models.BooleanField(default=False)
def __str__(self):
return str(self.id)
@cached_property
def points(self):
result = self.postlike_set.all().aggregate(sum=models.Sum('points'))['sum']
return result or 0
@cached_property
def author(self):
return self.user.username
@cached_property
def comments(self):
return self.get_children().filter(deleted=False)
def accept(self):
"""
accept this comment as the answer for the thread type "question"
"""
self.accepted = datetime.utcnow()
self.save()
class ThreadLike(models.Model):
"""
Users can give up- and down-votes to threads. Upvote = +1, downvote = -1.
Regular users cannot "like" their own thread.
Regular users cannot "like" others' threads more than once.
Threads with positive likes are diplayed in user's "favorites" page.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, default=1)
thread = models.ForeignKey(Thread, on_delete=models.CASCADE)
# atomatically added timestamp field when the record is created
created = models.DateTimeField(auto_now_add=True)
points = models.IntegerField(default=0)
@classmethod
def vote(cls, thread, user, verb):
points = 1 if verb == 'up' else -1
kwargs = {'thread': thread, 'user': user}
try:
obj = cls.objects.get(**kwargs)
except ObjectDoesNotExist:
obj = cls(points=0, **kwargs)
if (not rules_light.registry['askapp.threadlike.%s' % verb](user, None, thread)
or not rules_light.registry['askapp.user.%svote_threads' % verb](user, None)
):
return obj
obj.points += points
obj.created = datetime.utcnow()
obj.save()
return obj
class PostLike(models.Model):
"""
Users can give up- and down-votes to comments. Upvote = +1, downvote = -1.
Regular users cannot "like" their own comments.
Regular users cannot "like" others' comments more than once.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, default=1)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
# atomatically added timestamp field when the record is created
created = models.DateTimeField(auto_now_add=True)
points = models.IntegerField(default=0)
@classmethod
def vote(cls, post, user, verb):
points = 1 if verb == 'up' else -1
kwargs = {'post': post, 'user': user}
try:
obj = cls.objects.get(**kwargs)
if not rules_light.registry['askapp.postlike.%s' % verb](user, None, post):
return obj
except ObjectDoesNotExist:
obj = cls(points=0, **kwargs)
obj.points += points
return obj
class AuditThread(models.Model):
"""
Audit user actions
Actions are taken by users and can describe:
- update post (saving the old text and title)
- close post
- setting post sticky, etc
"""
TYPES_OF_ACTION = (
("update", 'Update'),
("close", 'Close'),
("sticky", 'Sticky'),
("hide", 'Hide'),
("delete", 'Delete'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, default=1)
thread = models.ForeignKey(Thread, models.CASCADE)
action = models.TextField(null=False, choices=TYPES_OF_ACTION, default="update")
created = models.DateTimeField(auto_now_add=True)
content = models.TextField(null=True) # old title or text of the edited post
@classmethod
def audit(cls, instance):
if not instance._old['id'] or not hasattr(instance, 'modified_by'):
return
content = None
if instance._old['deleted'] != instance.deleted:
action = 'delete'
content = instance.delete_reason
elif instance._old['hidden'] != instance.hidden:
action = 'hide'
elif instance._old['sticky'] != instance.sticky:
action = 'sticky'
elif instance._old['closed'] != instance.closed:
action = 'close'
elif instance._old['title'] != instance.title:
action = 'update'
content = instance._old['title']
elif instance._old['text'] != instance.text:
action = 'update'
content = instance._old['text']
else:
return
audit = cls(user=instance.modified_by, thread=instance, action=action, content=content)
audit.save()
| 1.992188
| 2
|
src/aspire/utils/matrix.py
|
janden/ASPIRE-Python
| 0
|
12778467
|
<reponame>janden/ASPIRE-Python
"""
Utilties for arrays/n-dimensional matrices.
"""
import numpy as np
from scipy.linalg import eigh
from aspire.utils import ensure
from aspire.utils.matlab_compat import m_reshape
SQRT2 = np.sqrt(2)
SQRT2_R = 1/SQRT2
def unroll_dim(X, dim):
# TODO: dim is still 1-indexed like in MATLAB to reduce headaches for now
# TODO: unroll/roll are great candidates for a context manager since they're always used in conjunction.
dim = dim - 1
old_shape = X.shape
new_shape = old_shape[:dim]
if dim < len(old_shape):
new_shape += (-1, )
if old_shape != new_shape:
Y = m_reshape(X, new_shape)
else:
Y = X
removed_dims = old_shape[dim:]
return Y, removed_dims
def roll_dim(X, dim):
# TODO: dim is still 1-indexed like in MATLAB to reduce headaches for now
if len(dim) > 0:
old_shape = X.shape
new_shape = old_shape[:-1] + dim
Y = m_reshape(X, new_shape)
return Y
else:
return X
def im_to_vec(im):
"""
Roll up images into vectors
:param im: An N-by-N-by-... array.
:return: An N^2-by-... array.
"""
shape = im.shape
ensure(im.ndim >= 2, "Array should have at least 2 dimensions")
ensure(shape[0] == shape[1], "Array should have first 2 dimensions identical")
return m_reshape(im, (shape[0]**2,) + (shape[2:]))
def vol_to_vec(X):
"""
Roll up volumes into vectors
:param X: N-by-N-by-N-by-... array.
:return: An N^3-by-... array.
"""
shape = X.shape
ensure(X.ndim >= 3, "Array should have at least 3 dimensions")
ensure(shape[0] == shape[1] == shape[2], "Array should have first 3 dimensions identical")
return m_reshape(X, (shape[0]**3,) + (shape[3:]))
def vec_to_im(X):
"""
Unroll vectors to images
:param X: N^2-by-... array.
:return: An N-by-N-by-... array.
"""
shape = X.shape
N = round(shape[0]**(1/2))
ensure(N**2 == shape[0], "First dimension of X must be square")
return m_reshape(X, (N, N) + (shape[1:]))
def vec_to_vol(X):
"""
Unroll vectors to volumes
:param X: N^3-by-... array.
:return: An N-by-N-by-N-by-... array.
"""
shape = X.shape
N = round(shape[0]**(1/3))
ensure(N**3 == shape[0], "First dimension of X must be cubic")
return m_reshape(X, (N, N, N) + (shape[1:]))
def vecmat_to_volmat(X):
"""
Roll up vector matrices into volume matrices
:param X: A vector matrix of size L1^3-by-L2^3-by-...
:return: A volume "matrix" of size L1-by-L1-by-L1-by-L2-by-L2-by-L2-by-...
"""
# TODO: Use context manager?
shape = X.shape
ensure(X.ndim >= 2, "Array should have at least 2 dimensions")
L1 = round(shape[0]**(1/3))
L2 = round(shape[1]**(1/3))
ensure(L1**3 == shape[0], "First dimension of X must be cubic")
ensure(L2**3 == shape[1], "Second dimension of X must be cubic")
return m_reshape(X, (L1, L1, L1, L2, L2, L2) + (shape[2:]))
def volmat_to_vecmat(X):
"""
Unroll volume matrices to vector matrices
:param X: A volume "matrix" of size L1-by-L1-by-L1-by-L2-by-L2-by-L2-by-...
:return: A vector matrix of size L1^3-by-L2^3-by-...
"""
# TODO: Use context manager?
shape = X.shape
ensure(X.ndim >= 6, "Array should have at least 6 dimensions")
ensure(shape[0] == shape[1] == shape[2], "Dimensions 1-3 should be identical")
ensure(shape[3] == shape[4] == shape[5], "Dimensions 4-6 should be identical")
l1 = shape[0]
l2 = shape[3]
return m_reshape(X, (l1**3, l2**3) + (shape[6:]))
def mdim_mat_fun_conj(X, d1, d2, f):
"""
Conjugate a multidimensional matrix using a linear mapping
:param X: An N_1-by-...-by-N_d1-by-N_1...-by-N_d1-by-... array, with the first 2*d1 dimensions corresponding to
matrices with columns and rows of dimension d1.
:param d1: The dimension of the input matrix X
:param d2: The dimension of the output matrix Y
:param f: A function handle of a linear map that takes an array of size N_1-by-...-by-N_d1-by-... and returns an
array of size M_1-by-...-by-M_d2-by-... .
:return: An array of size M_1-by-...-by-M_d2-by-M_1-by-...-by-M_d2-by-... resulting from applying fun to the rows
and columns of the multidimensional matrix X.
TODO: Very complicated to wrap head around this one!
"""
X, sz_roll = unroll_dim(X, 2*d1 + 1)
X = f(X)
# Swap the first d2 axes block of X with the next d1 axes block
X = np.moveaxis(X, list(range(d1+d2)), list(range(d1, d1+d2)) + list(range(d1)))
X = np.conj(X)
X = f(X)
# Swap the first d2 axes block of X with the next d2 axes block
X = np.moveaxis(X, list(range(2*d2)), list(range(d2, 2*d2)) + list(range(d2)))
X = np.conj(X)
X = roll_dim(X, sz_roll)
return X
def symmat_to_vec_iso(mat):
"""
Isometrically maps a symmetric matrix to a packed vector
:param mat: An array of size N-by-N-by-... where the first two dimensions constitute symmetric or Hermitian
matrices.
:return: A vector of size N*(N+1)/2-by-... consisting of the lower triangular part of each matrix, reweighted so
that the Frobenius inner product is mapped to the Euclidean inner product.
"""
mat, sz_roll = unroll_dim(mat, 3)
N = mat.shape[0]
mat = mat_to_vec(mat)
mat[np.arange(0, N ** 2, N + 1)] *= SQRT2_R
mat *= SQRT2
mat = vec_to_mat(mat)
mat = roll_dim(mat, sz_roll)
vec = symmat_to_vec(mat)
return vec
def vec_to_symmat_iso(vec):
"""
Isometrically map packed vector to symmetric matrix
:param vec: A vector of size N*(N+1)/2-by-... describing a symmetric (or Hermitian) matrix.
:return: An array of size N-by-N-by-... which indexes symmetric/Hermitian matrices that occupy the first two
dimensions. The lower triangular parts of these matrices consists of the corresponding vectors in vec,
reweighted so that the Euclidean inner product maps to the Frobenius inner product.
"""
mat = vec_to_symmat(vec)
mat, sz_roll = unroll_dim(mat, 3)
N = mat.shape[0]
mat = mat_to_vec(mat)
mat[np.arange(0, N ** 2, N + 1)] *= SQRT2
mat *= SQRT2_R
mat = vec_to_mat(mat)
mat = roll_dim(mat, sz_roll)
return mat
def symmat_to_vec(mat):
"""
Packs a symmetric matrix into a lower triangular vector
:param mat: An array of size N-by-N-by-... where the first two dimensions constitute symmetric or
Hermitian matrices.
:return: A vector of size N*(N+1)/2-by-... consisting of the lower triangular part of each matrix.
Note that a lot of acrobatics happening here (swapaxes/triu instead of tril etc.) are so that we can get
column-major ordering of elements (to get behavior consistent with MATLAB), since masking in numpy only returns
data in row-major order.
"""
N = mat.shape[0]
ensure(mat.shape[1] == N, "Matrix must be square")
mat, sz_roll = unroll_dim(mat, 3)
triu_indices = np.triu_indices(N)
vec = mat.swapaxes(0, 1)[triu_indices]
vec = roll_dim(vec, sz_roll)
return vec
def vec_to_symmat(vec):
"""
Convert packed lower triangular vector to symmetric matrix
:param vec: A vector of size N*(N+1)/2-by-... describing a symmetric (or Hermitian) matrix.
:return: An array of size N-by-N-by-... which indexes symmetric/Hermitian matrices that occupy the first two
dimensions. The lower triangular parts of these matrices consists of the corresponding vectors in vec.
"""
# TODO: Handle complex values in vec
if np.iscomplex(vec).any():
raise NotImplementedError('Coming soon')
# M represents N(N+1)/2
M = vec.shape[0]
N = int(round(np.sqrt(2 * M + 0.25) - 0.5))
ensure((M == 0.5*N*(N+1)) and N != 0, "Vector must be of size N*(N+1)/2 for some N>0.")
vec, sz_roll = unroll_dim(vec, 2)
index_matrix = np.empty((N, N))
i_upper = np.triu_indices_from(index_matrix)
index_matrix[i_upper] = np.arange(M) # Incrementally populate upper triangle in row major order
index_matrix.T[i_upper] = index_matrix[i_upper] # Copy to lower triangle
mat = vec[index_matrix.flatten('F').astype('int')]
mat = m_reshape(mat, (N, N) + mat.shape[1:])
mat = roll_dim(mat, sz_roll)
return mat
def mat_to_vec(mat, is_symmat=False):
"""
Converts a matrix into vectorized form
:param mat: An array of size N-by-N-by-... containing the matrices to be vectorized.
:param is_symmat: Specifies whether the matrices are symmetric/Hermitian, in which case they are stored in packed
form using symmat_to_vec (default False).
:return: The vectorized form of the matrices, with dimension N^2-by-... or N*(N+1)/2-by-... depending on the value
of is_symmat.
"""
if not is_symmat:
sz = mat.shape
N = sz[0]
ensure(sz[1] == N, "Matrix must be square")
return m_reshape(mat, (N**2,) + sz[2:])
else:
return symmat_to_vec(mat)
def vec_to_mat(vec, is_symmat=False):
"""
Converts a vectorized matrix into a matrix
:param vec: The vectorized representations. If the matrix is non-symmetric, this array has the dimensions
N^2-by-..., but if the matrix is symmetric, the dimensions are N*(N+1)/2-by-... .
:param is_symmat: True if the vectors represent symmetric matrices (default False)
:return: The array of size N-by-N-by-... representing the matrices.
"""
if not is_symmat:
sz = vec.shape
N = int(round(np.sqrt(sz[0])))
ensure(sz[0] == N**2, "Vector must represent square matrix.")
return m_reshape(vec, (N, N) + sz[1:])
else:
return vec_to_symmat(vec)
def make_symmat(A):
"""
Symmetrize a matrix
:param A: A matrix.
:return: The Hermitian matrix (A+A')/2.
"""
return 0.5 * (A + A.T)
def anorm(x, axes=None):
"""
Calculate array norm along given axes
:param x: An array of arbitrary size and shape.
:param axes: The axis along which to compute the norm. If None, the norm is calculated along all axes.
:return: The Euclidean (l^2) norm of x along specified axes.
"""
if axes is None:
axes = range(x.ndim)
return np.sqrt(ainner(x, x, axes))
def acorr(x, y, axes=None):
"""
Calculate array correlation along given axes
:param x: An array of arbitrary shape
:param y: An array of same shape as x
:param axes: The axis along which to compute the correlation. If None, the correlation is calculated along all axes.
:return: The correlation of x along specified axes.
"""
ensure(x.shape == y.shape, "The shapes of the inputs have to match")
if axes is None:
axes = range(x.ndim)
return ainner(x, y, axes) / (anorm(x, axes) * anorm(y, axes))
def ainner(x, y, axes=None):
"""
Calculate array inner product along given axes
:param x: An array of arbitrary shape
:param y: An array of same shape as x
:param axes: The axis along which to compute the inner product. If None, the product is calculated along all axes.
:return:
"""
ensure(x.shape == y.shape, "The shapes of the inputs have to match")
if axes is None:
axes = range(x.ndim)
return np.tensordot(x, y, axes=(axes, axes))
def eigs(A, k):
"""
Multidimensional partial eigendecomposition
:param A: An array of size `sig_sz`-by-`sig_sz`, where `sig_sz` is a size containing d dimensions.
The array represents a matrix with d indices for its rows and columns.
:param k: The number of eigenvalues and eigenvectors to calculate (default 6).
:return: A 2-tuple of values
V: An array of eigenvectors of size `sig_sz`-by-k.
D: A matrix of size k-by-k containing the corresponding eigenvalues in the diagonals.
"""
sig_sz = A.shape[:int(A.ndim/2)]
sig_len = np.prod(sig_sz)
A = m_reshape(A, (sig_len, sig_len))
dtype = A.dtype
w, v = eigh(A.astype('float64'), eigvals=(sig_len-1-k+1, sig_len-1))
# Arrange in descending order (flip column order in eigenvector matrix) and typecast to proper type
w = w[::-1].astype(dtype)
v = np.fliplr(v)
v = m_reshape(v, sig_sz + (k,)).astype(dtype)
return v, np.diag(w)
| 2.953125
| 3
|
dcgan/utils.py
|
verysage/logo-gen
| 80
|
12778468
|
<reponame>verysage/logo-gen<filename>dcgan/utils.py<gh_stars>10-100
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import os
import math
import pprint
import scipy.misc
import scipy.stats as stats
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
# from matplotlib import pyplot as plt
import tensorflow as tf
import tensorflow.contrib.slim as slim
import sys
sys.path.insert(0, '../image-tools')
import metrics
from gauss import gauss_kernel_fixed
pp = pprint.PrettyPrinter()
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imsave(images, size, path):
if images.shape[3] == 1:
merged = merge(images, size)
return scipy.misc.imsave(path, merged.reshape(merged.shape[:2]))
return scipy.misc.imsave(path, merge(images, size))
def merge(images, size):
h, w = images.shape[1], images.shape[2]
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def inverse_transform(images):
return (images+1.)/2.
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1.
Taken from https://github.com/dribnet/plat"""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def interpolate(sess, dcgan, z_start, z_stop, n_steps=62, y_start=None, y_stop=None, transform=True):
"""Interpolates between two samples in z-space
Input parameters:
sess: TF session
dcgan: DCGAN object for sampling
z_start: z-vector of the first sample
z_start: z-vector of the second sample
n_steps: number of intermediate samples to produce
sampling: the sampling method used for training ['uniform']
transform: if True, the pixel values will be transformed to their normal image range [True]
y_start: label for first sample (numerical)
y_stop: label for second sample (numerical)
RETURNS an array of n_steps+2 samples"""
y_dim = 0
if y_start is not None:
y_dim = dcgan.y_dim
if y_stop is None:
y_stop = y_start
if y_start != y_stop:
z_start = np.concatenate((z_start, np.eye(y_dim)[y_start]))
z_stop = np.concatenate((z_stop, np.eye(y_dim)[y_stop]))
# limit to batch size for simplicity
if n_steps > (dcgan.batch_size - 2):
n_steps = dcgan.batch_size - 2
# sample along big circle for all distributions
steps = np.linspace(0, 1, n_steps + 2)
z_samples = [slerp(step, z_start, z_stop) for step in steps]
gauss_filter = gauss_kernel_fixed(dcgan.gauss_sigma, (dcgan.kernel_size - 1) // 2)
if n_steps != (dcgan.batch_size - 2):
z_samples += [np.zeros(dcgan.z_dim + y_dim) for i in range(dcgan.batch_size - n_steps - 2)]
if y_dim > 0:
if y_start != y_stop:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: np.array(z_samples)[:, :dcgan.z_dim],
dcgan.y: np.array(z_samples)[:, dcgan.z_dim:],
dcgan.gauss_kernel: gauss_filter},)[:n_steps + 2]
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: np.array(z_samples),
dcgan.y: np.eye(y_dim)
[np.full(dcgan.batch_size, y_start)],
dcgan.gauss_kernel: gauss_filter})[:n_steps + 2]
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: np.array(z_samples),
dcgan.gauss_kernel: gauss_filter})[:n_steps+2]
if transform:
samples = np.array([((sample + 1) / 2 * 255).astype(np.uint8) for sample in samples])
return samples
def interactive_interp(sess, dcgan, config, sampling='uniform'):
while True:
z_samples = dcgan.z_sampler(config)
has_labels = False
try:
if dcgan.has_labels:
has_labels = True
label = int(raw_input('Class label for first sample: '))
sample_labels = np.eye(dcgan.y_dim)[np.full(dcgan.batch_size, label)]
except Exception: pass
gauss_filter = gauss_kernel_fixed(config.gauss_sigma, config.gauss_trunc)
if has_labels:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_samples, dcgan.y: sample_labels,
dcgan.gauss_kernel: gauss_filter})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_samples, dcgan.gauss_kernel: gauss_filter})
samples = np.array([((sample + 1) / 2 * 255).astype(np.uint8) for sample in samples])
grid_size = int(math.ceil(math.sqrt(dcgan.batch_size)))
scipy.misc.imshow(merge(samples, (grid_size, grid_size)))
# from IPython import embed; embed()
start = int(raw_input('First sample number: '))
if has_labels:
label2 = raw_input('Class label for second sample [same]: ')
if label2 == '':
label2 = label
same = True
else:
label2 = int(label2)
same = False
sample_labels2 = np.eye(dcgan.y_dim)[np.full(dcgan.batch_size, label2)]
if same:
samples2 = samples
else:
samples2 = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_samples, dcgan.y: sample_labels2,
dcgan.gauss_kernel: gauss_filter})
scipy.misc.imshow(merge(samples2, (grid_size, grid_size)))
stop = int(raw_input('Second sample number: '))
n_steps = raw_input('Number of steps [62]: ')
if n_steps == '':
n_steps = 62
else:
n_steps = int(n_steps)
if has_labels:
series = interpolate(sess, dcgan, z_start=z_samples[start - 1], z_stop=z_samples[stop - 1],
n_steps=n_steps, y_start=label, y_stop=label2, transform=True)
else:
series = interpolate(sess, dcgan, z_start=z_samples[start-1], z_stop=z_samples[stop-1],
n_steps=n_steps, transform=True)
scipy.misc.imshow(merge(series, (int(math.ceil((n_steps + 2) / 8)), 8)))
c = raw_input('Continue? [y/n]')
if c != 'y':
break
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size ** .5))
# produce sample uniformly with nearest neighbour
# option 0: additionally sort according to distance
if (option == 1) or (option == 0):
n_images = 20
has_labels = False
try:
if dcgan.has_labels:
# generate one image for each cluster / category
has_labels = True
if option == 0:
n_images = dcgan.y_dim
except Exception: pass
# sample DCGAN from uniform distribution in z
print('sampling...')
z_samples = dcgan.z_sampler(config)
if has_labels:
y_samples = np.eye(dcgan.y_dim)[np.random.choice(dcgan.y_dim, [n_images, config.batch_size])]
samples = (z_samples, y_samples)
samples = np.array([sess.run(dcgan.sampler, {dcgan.z: batch, dcgan.y: batch_y})
for batch, batch_y in samples])
else:
samples = np.array([sess.run(dcgan.sampler, feed_dict={dcgan.z: batch}) for batch in z_samples])
# transform back to normal image value range and reshape to one array instead of batches
print('transforming...')
samples = np.array([((sample + 1) / 2 * 255).astype(np.uint8) for sample in samples]) \
.reshape((samples.shape[0] * samples.shape[1],) + samples.shape[2:])
# load and rescale training data to same size as samples
print('loading and transforming orig data...')
orig_data, _ = fh.load_icon_data(config.data_dir)
orig_data = np.array(
[scipy.misc.imresize(icon, (config.output_height, config.output_height)) for icon in orig_data])
# get nearest neighbour indices from training set
if option == 1:
print('getting nearest neighbours...')
nearest_idxs = metrics.nearest_icons(samples, orig_data)
else:
print('getting nearest neighbours...')
nearest_idxs, distances = metrics.nearest_icons(samples, orig_data, get_dist=True)
print('sorting...')
# normalize distance over whole image content to prevent predominantly white images having low distance
norms = np.sqrt(np.sum(np.power(samples, 2), axis=(1, 2, 3)))
distances = np.array([distance / n for distance, n in zip(distances, norms)])
sorting = np.argsort(distances)
# import ipdb; ipdb.set_trace()
samples = samples[sorting]
nearest_idxs = np.array(nearest_idxs)[sorting]
bs = config.batch_size
for idx in xrange(n_images):
print(" [*] %d" % idx)
combined = []
# combine samples and nearest neighbours for each batch and save as png
for sample, orig in zip(samples[idx * bs:(idx + 1) * bs], orig_data[nearest_idxs[idx * bs:(idx + 1) * bs]]):
combined += [sample, orig]
scipy.misc.imsave(os.path.join(config.sample_dir, 'test_uniform_nearest_%s.png' % (idx)),
merge(np.array(combined), [image_frame_dim, image_frame_dim * 2]))
# sample with uniform distribution
if option == 2:
n_images = 20
has_labels = False
try:
if dcgan.has_labels:
# generate one image for each cluster / category
n_images = dcgan.y_dim
has_labels = True
except Exception: pass
for idx in xrange(n_images):
print(" [*] %d" % idx)
z_sample = dcgan.z_sampler(config)
# create gaussian convolution kernel as defined in run parameters
kernel = gauss_kernel_fixed(config.gauss_sigma, config.gauss_trunc)
if has_labels:
# y = np.random.choice(dcgan.y_dim, config.batch_size)
# y_one_hot = np.zeros((config.batch_size, dcgan.y_dim))
# y_one_hot[np.arange(config.batch_size), y] = 1
y_one_hot = np.eye(dcgan.y_dim)[np.full(config.batch_size, idx)]
# print(y_one_hot)
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot, dcgan.gauss_kernel: kernel})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.gauss_kernel: kernel})
save_images(samples, [image_frame_dim, image_frame_dim],
os.path.join(config.sample_dir, 'test_uniform_%s.png' % (idx)))
# sample with normal distribution
if option == 3:
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.random.normal(size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_normal_%s.png' % (idx))
# single sample with uniform distribution
if option == 4:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim],
os.path.join(config.sample_dir, 'test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime())))
# vary single z-component only
if option == 5:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
save_images(samples, [image_frame_dim, image_frame_dim],
os.path.join(config.sample_dir, 'test_arange_%s.png' % (idx)))
| 1.742188
| 2
|
cv/blog/models.py
|
Trianglium/artist-cv
| 2
|
12778469
|
<reponame>Trianglium/artist-cv
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from base.models import BaseContent, BaseArticle, BaseComment
from ckeditor.fields import RichTextField
from datetime import datetime
import re
class Comment(BaseComment):
creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
def __str__(self):
return self.creator
def days_ago(self):
# Outputs ' _ days ago' depending on how many days ago the comment was created
days_since_pub = datetime.now() - self.created_at
return re.sub(r'(,.+)', ' ago', str(days_since_pub))
class Tag(models.Model):
value = models.TextField(max_length=100, unique=True)
def __str__(self):
return self.value
class Post(BaseArticle):
content= RichTextField(blank=True, null=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
# db_index is required. do not remove.
published_at = models.DateTimeField(blank=True, null=True, db_index=True)
slug = models.SlugField(unique=True)
tags = models.ManyToManyField(Tag, related_name="posts", blank=True)
comments = GenericRelation(Comment)
def __str__(self):
return self.title
# AuthorProfile as to separate info from the User Auth Model
# Allowing for a separation of concerns
# And added security.
class AuthorProfile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="profile"
)
bio = models.TextField(
null=True,
blank=True,
help_text='Optional Author Bio. Separate From Professional Profile Resume Summary and Object',
)
def __str__(self):
return f"{self.__class__.__name__} object for {self.user}"
| 2.203125
| 2
|
uploads/core/forms.py
|
lindsay777/ITRI_hospital_UI
| 0
|
12778470
|
from django import forms
from uploads.core.models import Document
#from uploads.core.models import File
# 創造一個依照model的form,會繼承欄位description document
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = ('description', 'document', )
class nameForm(forms.Form):
rename=forms.CharField()
# class FileForm(forms.ModelForm):
# class Meta:
# model = File
# fields = ('filename',)
# file = forms.FileField()
# pid = forms.CharField(max_length=20)
# name = forms.CharField(max_length=20)
# sex = forms.CharField()
# age = forms.IntegerField()
# mp = forms.IntegerField()
# scanType = forms.CharField(max_length=10)
# fracture = forms.IntegerField()
# tscore = forms.CharField()
# zscore = forms.CharField()
# region = forms.CharField()
# lva = forms.CharField()
# apspine = forms.CharField()
# dualfemur = forms.CharField()
# combination = forms.CharField()
| 2.25
| 2
|
troposphere_mate/cloudformation.py
|
MacHu-GWU/troposphere_mate-project
| 10
|
12778471
|
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.cloudformation
from troposphere.cloudformation import (
InitFileContext as _InitFileContext,
Tags as _Tags,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class Stack(troposphere.cloudformation.Stack, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
TemplateURL=REQUIRED, # type: Union[str, AWSHelperFn]
NotificationARNs=NOTHING, # type: List[Union[str, AWSHelperFn]]
Parameters=NOTHING, # type: dict
Tags=NOTHING, # type: Union[_Tags, list]
TimeoutInMinutes=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
TemplateURL=TemplateURL,
NotificationARNs=NotificationARNs,
Parameters=Parameters,
Tags=Tags,
TimeoutInMinutes=TimeoutInMinutes,
**kwargs
)
super(Stack, self).__init__(**processed_kwargs)
class WaitCondition(troposphere.cloudformation.WaitCondition, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Count=NOTHING, # type: int
Handle=NOTHING, # type: Union[str, AWSHelperFn]
Timeout=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Count=Count,
Handle=Handle,
Timeout=Timeout,
**kwargs
)
super(WaitCondition, self).__init__(**processed_kwargs)
class WaitConditionHandle(troposphere.cloudformation.WaitConditionHandle, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
**kwargs
)
super(WaitConditionHandle, self).__init__(**processed_kwargs)
class InitFile(troposphere.cloudformation.InitFile, Mixin):
def __init__(self,
title=None,
content=NOTHING, # type: Union[str, AWSHelperFn]
mode=NOTHING, # type: Union[str, AWSHelperFn]
owner=NOTHING, # type: Union[str, AWSHelperFn]
encoding=NOTHING, # type: str
group=NOTHING, # type: Union[str, AWSHelperFn]
source=NOTHING, # type: Union[str, AWSHelperFn]
authentication=NOTHING, # type: Union[str, AWSHelperFn]
context=NOTHING, # type: _InitFileContext
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
content=content,
mode=mode,
owner=owner,
encoding=encoding,
group=group,
source=source,
authentication=authentication,
context=context,
**kwargs
)
super(InitFile, self).__init__(**processed_kwargs)
class InitService(troposphere.cloudformation.InitService, Mixin):
def __init__(self,
title=None,
ensureRunning=NOTHING, # type: bool
enabled=NOTHING, # type: bool
files=NOTHING, # type: list
packages=NOTHING, # type: dict
sources=NOTHING, # type: list
commands=NOTHING, # type: list
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ensureRunning=ensureRunning,
enabled=enabled,
files=files,
packages=packages,
sources=sources,
commands=commands,
**kwargs
)
super(InitService, self).__init__(**processed_kwargs)
class InitConfig(troposphere.cloudformation.InitConfig, Mixin):
def __init__(self,
title=None,
groups=NOTHING, # type: dict
users=NOTHING, # type: dict
sources=NOTHING, # type: dict
packages=NOTHING, # type: dict
files=NOTHING, # type: dict
commands=NOTHING, # type: dict
services=NOTHING, # type: dict
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
groups=groups,
users=users,
sources=sources,
packages=packages,
files=files,
commands=commands,
services=services,
**kwargs
)
super(InitConfig, self).__init__(**processed_kwargs)
class AuthenticationBlock(troposphere.cloudformation.AuthenticationBlock, Mixin):
def __init__(self,
title=None,
accessKeyId=NOTHING, # type: Union[str, AWSHelperFn]
buckets=NOTHING, # type: List[Union[str, AWSHelperFn]]
password=NOTHING, # type: Union[str, AWSHelperFn]
secretKey=NOTHING, # type: Union[str, AWSHelperFn]
type=NOTHING, # type: Any
uris=NOTHING, # type: List[Union[str, AWSHelperFn]]
username=NOTHING, # type: Union[str, AWSHelperFn]
roleName=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
accessKeyId=accessKeyId,
buckets=buckets,
password=password,
secretKey=secretKey,
type=type,
uris=uris,
username=username,
roleName=roleName,
**kwargs
)
super(AuthenticationBlock, self).__init__(**processed_kwargs)
| 1.976563
| 2
|
Python/generate-parentheses.py
|
sm2774us/leetcode_interview_prep_2021
| 0
|
12778472
|
#
# A famous ancient question in this context is:
# "How many distinct arrangements of n pairs of left-right parentheses are there all of which close?"
# The answer to this question is called the n-th Catalan number, C(n).
# Here are the first few answers:
# * C(1)=1 ( )
# * C(2)=2 ()() and (())
# * C(3)=5 ()()(), ()(()), (())(), (()()) and ((()))
#
#
# Generating all combinations of well formed parentheses is a typical example of catalan numbers.
# You can use the links at the bottom here if you are not aware of the catalan numbers since they
# are at the heart of the exercise.
# Let time complexity for the generating all combinations of well-formed parentheses is f(n),
# then,
# f(n) = g(n) * h(n) where g(n) is the time complexity for calculating nth catalan number,
# and h(n) is the time required to copy this combination to result array.
#
# Therefore, f(n) = catalan(n) * O(n) which is O((4^n/n^1.5)*(n)).
# Broadly saying just remember that this is a typical example of catalan number
# and it's time complexity is similar to how catalan(n) is got.
# Further readings in to catalan numbers:
#
# https://en.wikipedia.org/wiki/Catalan_number
# https://www.youtube.com/watch?v=GlI17WaMrtw
# https://www.youtube.com/watch?v=eoofvKI_Okg
#
#
# Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# iterative solution
class Solution(object):
# def generateParenthesis(self, n):
# """
# :type n: int
# :rtype: List[str]
# """
# result, curr = [], []
# stk = [(1, (n, n))]
# while stk:
# step, args = stk.pop()
# if step == 1:
# left, right = args
# if left == 0 and right == 0:
# result.append("".join(curr))
# if left < right:
# stk.append((3, tuple()))
# stk.append((1, (left, right-1)))
# stk.append((2, (')')))
# if left > 0:
# stk.append((3, tuple()))
# stk.append((1, (left-1, right)))
# stk.append((2, ('(')))
# elif step == 2:
# curr.append(args[0])
# elif step == 3:
# curr.pop()
# return result
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
stack, result = [('', 0, 0)], []
while stack:
# current parentheses combination, number of left/right parentheses used
parentheses, numLeft, numRight = stack.pop()
if numRight == n:
# done, add to result
result.append(parentheses)
else:
if numLeft < n:
stack.append((parentheses + '(', numLeft+1, numRight))
if numRight < numLeft:
# only add right parentheses if more left parentheses have been used
stack.append((parentheses + ')', numLeft, numRight+1))
return result
# Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# recursive solution
class Solution2(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
def generateParenthesisRecu(left, right, curr, result):
if left == 0 and right == 0:
result.append("".join(curr))
if left > 0:
curr.append('(')
generateParenthesisRecu(left-1, right, curr, result)
curr.pop()
if left < right:
curr.append(')')
generateParenthesisRecu(left, right-1, curr, result)
curr.pop()
result = []
generateParenthesisRecu(n, n, [], result)
return result
if __name__ == "__main__":
sol = Solution2()
result = sol.generateParenthesis(2)
print(result)
| 3.578125
| 4
|
api/src/routes/healthcheck.py
|
rbipin/cidr-ip-calculator
| 0
|
12778473
|
from fastapi import APIRouter
from src.controllers import healthcheck
router = APIRouter(
prefix="/healthcheck",
tags=["healthcheck"]
)
@router.get("", name="Health check", response_model=str)
async def runGetHealthCheck():
return healthcheck.getHealthCheck()
| 2.296875
| 2
|
raco/myrial/query_tests.py
|
uwescience/raco
| 61
|
12778474
|
# -*- coding: UTF-8 -*-
import collections
import math
import md5
from nose.tools import nottest
import raco.algebra
import raco.fakedb
import raco.myrial.interpreter as interpreter
import raco.scheme as scheme
import raco.myrial.groupby
import raco.myrial.myrial_test as myrial_test
from raco.algebra import Apply
from raco import types
from raco.myrial.exceptions import *
from raco.expression import NestedAggregateException
from raco.fake_data import FakeData
from raco.types import LONG_TYPE
class TestQueryFunctions(myrial_test.MyrialTestCase, FakeData):
def setUp(self):
super(TestQueryFunctions, self).setUp()
self.db.add_function(TestQueryFunctions.test_function)
self.db.ingest(TestQueryFunctions.emp_key,
TestQueryFunctions.emp_table,
TestQueryFunctions.emp_schema)
self.db.ingest(TestQueryFunctions.dept_key,
TestQueryFunctions.dept_table,
TestQueryFunctions.dept_schema)
self.db.ingest(TestQueryFunctions.numbers_key,
TestQueryFunctions.numbers_table,
TestQueryFunctions.numbers_schema)
def test_scan_emp(self):
query = """
emp = SCAN(%s);
STORE(emp, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_scan_dept(self):
query = """
dept = SCAN(%s);
STORE(dept, OUTPUT);
""" % self.dept_key
self.check_result(query, self.dept_table)
def test_bag_comp_emit_star(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT *];
STORE(bc, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_bag_comp_emit_table_wildcard(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT emp.*];
STORE(bc, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_hybrid_emit_clause(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
x = [FROM dept, emp as X EMIT 5, X.salary * 2 AS k, X.*, *];
STORE(x, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = [(5, e[3] * 2) + e + d + e for e in self.emp_table
for d in self.dept_table]
self.check_result(query, collections.Counter(expected))
salary_filter_query = """
emp = SCAN(%s);
rich = [FROM emp WHERE %s > 25 * 10 * 10 * (5 + 5) EMIT *];
STORE(rich, OUTPUT);
"""
salary_expected_result = collections.Counter(
[x for x in FakeData.emp_table.elements() if x[3] > 25000])
def test_bag_comp_filter_large_salary_by_name(self):
query = TestQueryFunctions.salary_filter_query % (self.emp_key,
'salary')
self.check_result(query, TestQueryFunctions.salary_expected_result)
def test_bag_comp_filter_large_salary_by_position(self):
query = TestQueryFunctions.salary_filter_query % (self.emp_key, '$3')
self.check_result(query, TestQueryFunctions.salary_expected_result)
def test_bag_comp_filter_empty_result(self):
query = """
emp = SCAN(%s);
poor = [FROM emp WHERE $3 < (5 * 2) EMIT *];
STORE(poor, OUTPUT);
""" % self.emp_key
expected = collections.Counter()
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ge(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 >= $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] >= x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ge2(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 ≥ $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] >= x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_le(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $1 <= 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] <= 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_le2(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE $1 ≤ 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] <= 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_gt(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 > $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] > x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_lt(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $1 < 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] < 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_eq(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 * 2 == $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] * 2 == x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 != $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne2(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 <> $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne3(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 ≠ $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_minus(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 + -$1 == $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] - x[1] == x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_and(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 AND id > dept_id EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 and
x[0] > x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_or(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $3 > 25 * 1000 OR id > dept_id EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] > 25000 or
x[0] > x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_not(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE not salary > 25000 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if not x[3] > 25000])
self.check_result(query, expected)
def test_bag_comp_filter_or_and(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 OR salary == 5000 AND
dept_id == 1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 or
(x[3] == 5000 and x[1] == 1)])
self.check_result(query, expected)
def test_bag_comp_filter_or_and_not(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 OR NOT salary == 5000 AND
dept_id == 1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 or not
x[3] == 5000 and x[1] == 1])
self.check_result(query, expected)
def test_bag_comp_emit_columns(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE dept_id == 1 EMIT $2, salary AS salary];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[2], x[3]) for x in self.emp_table.elements() if x[1] == 1])
self.check_result(query, expected)
def test_bag_comp_emit_literal(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT salary, "bugga bugga"];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3], "bugga bugga") for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_bag_comp_emit_with_math(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT salary + 5000, salary - 5000, salary // 5000,
salary * 5000];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3] + 5000, x[3] - 5000, x[3] / 5000, x[3] * 5000)
for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_bag_comp_rename(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT name, salary * 2 AS double_salary];
out = [FROM out WHERE double_salary > 10000 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[2], x[3] * 2) for x in self.emp_table.elements() if
x[3] * 2 > 10000])
self.check_result(query, expected)
join_expected = collections.Counter(
[('<NAME>', 'human resources'),
('<NAME>', 'accounting'),
('<NAME>', 'accounting'),
('<NAME>', 'human resources'),
('<NAME>', 'accounting'),
('<NAME>', 'engineering'),
('<NAME>', 'accounting')])
def test_explicit_join_unicode(self):
query = u"""
emp = SCAN(%s);
dept = SCAN(%s);
out = JOIN(emp, dept_id, dept, id);
out2 = [FROM out EMIT $2 AS emp_name, $5 AS dept_name];
STORE(out2, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_explicit_join(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = JOIN(emp, dept_id, dept, id);
out2 = [FROM out EMIT $2 AS emp_name, $5 AS dept_name];
STORE(out2, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_explicit_join_twocols(self):
query = """
query = [1 as dept_id, 25000 as salary];
emp = SCAN({emp});
out = JOIN(query, (dept_id, salary), emp, (dept_id, salary));
out2 = [FROM out EMIT name];
STORE(out2, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter([('<NAME>',),
('<NAME>',)])
self.check_result(query, expected)
def test_bagcomp_join_via_names(self):
query = """
out = [FROM SCAN(%s) E, SCAN(%s) AS D WHERE E.dept_id == D.id
EMIT E.name AS emp_name, D.name AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_bagcomp_join_via_pos(self):
query = """
E = SCAN(%s);
D = SCAN(%s);
out = [FROM E, D WHERE E.$1 == D.$0
EMIT E.name AS emp_name, D.$1 AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_two_column_join(self):
query = """
D = [1 as dept_id, 25000 as salary];
out = [FROM D, SCAN({emp}) E
WHERE E.dept_id == D.dept_id AND E.salary == D.salary
EMIT E.name AS emp_name];
STORE(out, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter([('<NAME>',),
('<NAME>',)])
self.check_result(query, expected)
def test_join_with_select(self):
query = """
out = [FROM SCAN(%s) AS D, SCAN(%s) E
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name AS emp_name, D.name AS dept_name];
STORE(out, OUTPUT);
""" % (self.dept_key, self.emp_key)
expected = collections.Counter([('<NAME>', 'accounting'),
('<NAME>', 'human resources')])
self.check_result(query, expected)
def test_join_with_reordering(self):
# Try both FROM orders of the query and verify they both get the
# correct answer.
query = """
out = [FROM SCAN({d}) AS D, SCAN({e}) E
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name, D.id];
STORE(out, OUTPUT);
""".format(d=self.dept_key, e=self.emp_key)
expected = collections.Counter([('<NAME>', 1),
('<NAME>', 2)])
self.check_result(query, expected)
# Swap E and D
query = """
out = [FROM SCAN({e}) E, SCAN({d}) AS D
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name, D.id];
STORE(out, OUTPUT);
""".format(d=self.dept_key, e=self.emp_key)
expected = collections.Counter([('<NAME>', 1),
('<NAME>', 2)])
self.check_result(query, expected)
def test_sql_join(self):
"""SQL-style select-from-where join"""
query = """
E = SCAN(%s);
D = SCAN(%s);
out = SELECT E.name, D.name FROM E, D WHERE E.dept_id = D.id;
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_bagcomp_nested_sql(self):
"""Test nesting SQL inside a bag comprehension"""
query = """
out = [FROM (SELECT name, salary
FROM SCAN(%s) AS X
WHERE salary > 5000) AS Y
WHERE salary < 80000
EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_sql_nested_sql(self):
"""Test nesting SQL inside SQL"""
query = """
out = SELECT Y.name, Y.salary
FROM (SELECT name, salary
FROM SCAN(%s) AS X
WHERE salary > 5000) AS Y
WHERE Y.salary < 80000;
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_sql_nested_bagcomp(self):
"""Test nesting a bag comprehension inside SQL"""
query = """
out = SELECT Y.name, Y.salary FROM
[FROM SCAN(%s) AS X WHERE salary > 5000 EMIT X.*] AS Y
WHERE Y.salary < 80000;
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_bagcomp_projection(self):
"""Test that column names are preserved across projection."""
query = """
E = SCAN(%s);
F = [FROM E EMIT $2];
out = [FROM F EMIT name];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tpls = [tuple([x[2]]) for x in self.emp_table]
expected = collections.Counter(tpls)
self.check_result(query, expected)
def test_bagcomp_no_column_name(self):
"""Test that the system handles an omitted output column name."""
query = """
E = SCAN(%s);
F = [FROM E EMIT salary*E.salary];
out = [FROM F EMIT $0];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tpls = [tuple([x[3] * x[3]]) for x in self.emp_table]
expected = collections.Counter(tpls)
self.check_result(query, expected)
def test_explicit_cross(self):
query = """
out = CROSS(SCAN(%s), SCAN(%s));
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
tuples = [e + d for e in self.emp_table.elements() for
d in self.dept_table.elements()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_bagcomp_cross(self):
query = """
out = [FROM SCAN(%s) E, SCAN(%s) AS D EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
tuples = [e + d for e in self.emp_table.elements() for
d in self.dept_table.elements()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_distinct(self):
query = """
out = DISTINCT([FROM SCAN(%s) AS X EMIT salary]);
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(25000,), (5000,), (90000,)])
self.check_result(query, expected)
def test_sql_distinct(self):
query = """
out = SELECT DISTINCT salary AS salary FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(set([(x[3],) for x in self.emp_table]))
self.check_result(query, expected)
def test_sql_repeated(self):
query = """
out = SELECT salary AS salary FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(x[3],) for x in self.emp_table])
self.check_result(query, expected)
def test_limit_without_orderby_assert(self):
query = """
out = LIMIT(SCAN(%s), 3);
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(Exception): # noqa
self.check_result(query, None)
def test_orderby_without_limit_assert(self):
query = """
out = SELECT * FROM SCAN(%s) as X ORDER BY $0;
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(Exception): # noqa
self.check_result(query, None)
def test_limit_orderby(self):
query = """
out = [FROM SCAN(%s) as X EMIT * ORDER BY $0 ASC LIMIT 3];
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
expectedResult = collections.Counter(
sorted(self.emp_table.elements(), key=lambda emp: emp[0])[:3])
self.assertEquals(result, expectedResult)
def test_sql_limit_orderby(self):
query = """
out = SELECT * FROM SCAN(%s) as X ORDER BY $0 ASC LIMIT 3;
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
expectedResult = collections.Counter(
sorted(self.emp_table.elements(), key=lambda emp: emp[0])[:3])
self.assertEquals(result, expectedResult)
def test_limit_orderby_multikey(self):
query = """
out = [FROM SCAN(%s) as X EMIT *
ORDER BY $1 ASC, $3 DESC, $2 ASC
LIMIT 3];
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
firstSort = sorted(self.emp_table.elements(), key=lambda emp: emp[2])
secondSort = sorted(firstSort, key=lambda emp: emp[3], reverse=True)
thirdSortLimit = sorted(secondSort, key=lambda emp: emp[1])[:3]
expectedResult = collections.Counter(thirdSortLimit)
self.assertEquals(result, expectedResult)
def test_sql_limit_orderby_multikey(self):
query = """
out = SELECT * FROM SCAN(%s) as X
ORDER BY $1 ASC, $3 DESC, $2 ASC
LIMIT 3;
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
firstSort = sorted(self.emp_table.elements(), key=lambda emp: emp[2])
secondSort = sorted(firstSort, key=lambda emp: emp[3], reverse=True)
thirdSortLimit = sorted(secondSort, key=lambda emp: emp[1])[:3]
expectedResult = collections.Counter(thirdSortLimit)
self.assertEquals(result, expectedResult)
def test_table_literal_boolean(self):
query = """
X = [truE as MyTrue, FaLse as MyFalse];
Y = [FROM scan(%s) as E, X where X.MyTrue emit *];
STORE(Y, OUTPUT);
""" % self.emp_key
res = [x + (True, False) for x in self.emp_table]
self.check_result(query, collections.Counter(res))
def test_table_literal_scalar_expression(self):
query = """
X = [FROM ["Andrew", (50 * (500 + 500)) AS salary] Z EMIT salary];
STORE(X, OUTPUT);
"""
expected = collections.Counter([(50000,)])
self.check_result(query, expected)
def test_table_literal_unbox(self):
query = """
A = [1 AS one, 2 AS two, 3 AS three];
B = [1 AS one, 2 AS two, 3 AS three];
C = [*A.two * *B.three];
STORE(C, OUTPUT);
"""
expected = collections.Counter([(6,)])
self.check_result(query, expected)
def test_unbox_from_where_single(self):
query = """
TH = [25 * 1000];
emp = SCAN(%s);
out = [FROM emp WHERE $3 > *TH EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] > 25000])
self.check_result(query, expected)
def test_unbox_from_where_multi(self):
query = """
TWO = [2];
FOUR = [4];
EIGHT = [8];
emp = SCAN(%s);
out = [FROM emp WHERE *EIGHT == *TWO**FOUR EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_unbox_from_where_nary_name(self):
query = """
_CONST = [25 AS twenty_five, 1000 AS thousand];
emp = SCAN(%s);
out = [FROM emp WHERE salary == *_CONST.twenty_five *
*_CONST.thousand EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000])
self.check_result(query, expected)
def test_unbox_from_where_nary_pos(self):
query = """
_CONST = [25 AS twenty_five, 1000 AS thousand];
emp = SCAN(%s);
out = [FROM emp WHERE salary == *_CONST.$0 *
*_CONST.$1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000])
self.check_result(query, expected)
def test_unbox_from_emit_single(self):
query = """
THOUSAND = [1000];
emp = SCAN(%s);
out = [FROM emp EMIT salary * *THOUSAND AS salary];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3] * 1000,) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_unbox_kitchen_sink(self):
query = """
C1 = [25 AS a, 100 AS b];
C2 = [50 AS a, 1000 AS b];
emp = SCAN(%s);
out = [FROM emp WHERE salary==*C1.a * *C2.b OR $3==*C1.b * *C2
EMIT dept_id * *C1.b // *C2.a];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[1] * 2,) for x in self.emp_table.elements() if
x[3] == 5000 or x[3] == 25000])
self.check_result(query, expected)
def test_unbox_arbitrary_expression(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = [FROM emp, COUNTALL(dept) as size WHERE id > *size EMIT emp.id];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements() if
x[0] > len(self.dept_table)])
self.check_result(query, expected)
def test_inline_table_literal(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = [FROM emp, [1,2,3] as tl WHERE id > tl.$2 EMIT emp.id];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements() if
x[0] > 3])
self.check_result(query, expected)
def __aggregate_expected_result(self, apply_func, grouping_col=1,
agg_col=3):
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[grouping_col]].append(t[agg_col])
tuples = [(key, apply_func(values)) for key, values in
result_dict.iteritems()]
return collections.Counter(tuples)
def test_max(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, MAX(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(max))
def test_min(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, MIN(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(min))
def test_sum(self):
query = """
out = [FROM SCAN(%s) as X EMIT dept_id, SUM(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(sum))
def test_avg(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, AVG(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def avg(it):
sum = 0
cnt = 0
for val in it:
sum += val
cnt += 1
return sum / cnt
self.check_result(query, self.__aggregate_expected_result(avg))
self.check_result(query, self.__aggregate_expected_result(avg),
test_logical=True)
def test_stdev(self):
query = """
out = [FROM SCAN(%s) AS X EMIT STDEV(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
res = self.execute_query(query)
tp = res.elements().next()
self.assertAlmostEqual(tp[0], 34001.8006726)
res = self.execute_query(query, test_logical=True)
tp = res.elements().next()
self.assertAlmostEqual(tp[0], 34001.8006726)
def test_count(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNT(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_countall(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNTALL()];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_count_star(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNT(*)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_count_star_sql(self):
query = """
out = SELECT dept_id, COUNT(*) FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_max_reversed(self):
query = """
out = [FROM SCAN(%s) AS X EMIT MAX(salary) AS max_salary, dept_id];
STORE(out, OUTPUT);
""" % self.emp_key
ex = self.__aggregate_expected_result(max)
ex = collections.Counter([(y, x) for (x, y) in ex])
self.check_result(query, ex)
self.check_result(query, ex, test_logical=True)
def test_compound_aggregate(self):
query = """
out = [FROM SCAN(%s) AS X
EMIT (2 * (MAX(salary) - MIN(salary))) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, 2 * (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
self.check_result(query, expected, test_logical=True)
def test_aggregate_with_unbox(self):
query = """
C = [1 AS one, 2 AS two];
out = [FROM SCAN(%s) AS X
EMIT MAX(*C.two * salary) - MIN( *C.$1 * salary) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(2 * t[3])
tuples = [(key, (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
self.check_result(query, expected, test_logical=True)
def test_nary_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, salary, COUNT(name)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[(t[1], t[3])].append(t[2])
tuples = [key + (len(values),)
for key, values in result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_empty_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT MAX(salary), COUNT($0), MIN(dept_id*4)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(90000, len(self.emp_table), 4)])
self.check_result(query, expected)
def test_compound_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id+dept_id, AVG(salary), COUNT(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[0] + t[1]].append(t[3])
tuples1 = [(key, sum(values), len(values)) for key, values
in result_dict.iteritems()]
tuples2 = [(t[0], t[1] / t[2], t[2]) for t in tuples1]
expected = collections.Counter(tuples2)
self.check_result(query, expected)
def test_impure_aggregate_colref(self):
"""Test of aggregate column that refers to a grouping column"""
query = """
out = [FROM SCAN(%s) AS X EMIT
( X.dept_id + (MAX(X.salary) - MIN(X.salary))) AS val,
X.dept_id AS did];
out = [FROM out EMIT did AS dept_id, val AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, key + (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_impure_aggregate_unbox(self):
"""Test of an aggregate column that contains an unbox."""
query = """
TWO = [2];
out = [FROM SCAN(%s) AS X
EMIT (*TWO * (MAX(salary) - MIN(salary))) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, 2 * (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_aggregate_illegal_colref(self):
query = """
out = [FROM SCAN(%s) AS X EMIT
X.dept_id + COUNT(X.salary) AS val];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(raco.myrial.groupby.NonGroupedAccessException): # noqa
self.check_result(query, None)
def test_nested_aggregates_are_illegal(self):
query = """
out = [FROM SCAN(%s) AS X
EMIT id+dept_id, MIN(53 + MAX(salary)) AS foo];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, collections.Counter())
def test_standalone_countall(self):
query = """
out = COUNTALL(SCAN(%s));
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(len(self.emp_table),)])
self.check_result(query, expected)
def test_multiway_bagcomp_with_unbox(self):
"""Return all employees in accounting making less than 30000"""
query = """
Salary = [30000];
Dept = ["accounting"];
out = [FROM SCAN(%s) AS E, SCAN(%s) AS D
WHERE E.dept_id == D.id AND D.name == *Dept
AND E.salary < *Salary EMIT E.$2 AS name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter([
("<NAME>",),
("<NAME>",),
("<NAME>",)])
self.check_result(query, expected)
def test_duplicate_bagcomp_aliases_are_illegal(self):
query = """
X = SCAN(%s);
out = [FROM X, X EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key,)
with self.assertRaises(interpreter.DuplicateAliasException):
self.check_result(query, collections.Counter())
def test_bagcomp_column_index_out_of_bounds(self):
query = """
E = SCAN(%s);
D = SCAN(%s);
out = [FROM E, D WHERE E.$1 == D.$77
EMIT E.name AS emp_name, D.$1 AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
with self.assertRaises(ColumnIndexOutOfBounds):
self.check_result(query, collections.Counter())
def test_abs(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, ABS(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, abs(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_ceil(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, CEIL(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.ceil(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_cos(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, COS(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.cos(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_floor(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, FLOOR(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.floor(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_log(self):
query = """
out = [FROM SCAN(%s) AS X WHERE val > 0 EMIT id, LOG(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.log(b)) for a, b in self.numbers_table.elements()
if b > 0])
self.check_result(query, expected)
def test_sin(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, SIN(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.sin(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_sqrt(self):
query = """
out = [FROM SCAN(%s) X WHERE val >= 0 EMIT id, SQRT(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.sqrt(b)) for a, b in self.numbers_table.elements()
if b >= 0])
self.check_result(query, expected)
def test_tan(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, TAN(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.tan(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_md5(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, md5(name)];
STORE(out, OUTPUT);
""" % self.emp_key
def md5_as_long(x):
m = md5.new()
m.update(x)
return int(m.hexdigest(), 16) >> 64
expected = collections.Counter(
[(x[0], md5_as_long(x[2])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_pow(self):
query = """
THREE = [3];
out = [FROM SCAN(%s) X EMIT id, POW(X.val, *THREE)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, pow(b, 3)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_no_such_relation(self):
query = """
out = [FROM SCAN(foo:bar:baz) x EMIT id, TAN(val)];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_relation_name(self):
query = """
y = empty(a:int);
z = [from s y -- bug: s does not exist
emit y.a];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_alias(self):
query = """
y = empty(a:int);
z = [from y s -- bug: extra s
emit y.a];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_alias_wildcard(self):
query = """
y = empty(a:int);
z = [from y s -- bug: errant s
emit y.*];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_scan_error(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, !!!FROG(val)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, collections.Counter())
def test_relation_scope_error(self):
query = """
out = [FROM EMPTY(x:INT) AS X EMIT z.*];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_relation_scope_error2(self):
query = """
z = EMPTY(z:INT);
out = [FROM EMPTY(x:INT) AS X EMIT z.*];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_parse_error(self):
query = """
out = [FROM SCAN(%s) AS X EMIT $(val)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, collections.Counter())
def test_no_such_udf(self):
query = """
out = [FROM SCAN(%s) AS X EMIT FooFunction(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NoSuchFunctionException):
self.check_result(query, collections.Counter())
def test_reserved_udf(self):
query = """
DEF avg(x, y): (x + y) / 2;
out = [FROM SCAN(%s) AS X EMIT avg(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(ReservedTokenException):
self.check_result(query, collections.Counter())
def test_duplicate_udf(self):
query = """
DEF foo(x, y): x + y;
DEF bar(): 7;
DEF foo(x): -1 * x;
out = [FROM SCAN(%s) AS X EMIT foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(DuplicateFunctionDefinitionException):
self.check_result(query, collections.Counter())
def test_invalid_argument_udf(self):
query = """
DEF Foo(x, y): cos(x) * sin(y);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(InvalidArgumentList):
self.check_result(query, collections.Counter())
def test_undefined_variable_udf(self):
query = """
DEF Foo(x, y): cos(x) * sin(z);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(UndefinedVariableException):
self.check_result(query, collections.Counter())
def test_duplicate_variable_udf(self):
query = """
DEF Foo(x, x): cos(x) * sin(x);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary, X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(DuplicateVariableException):
self.check_result(query, collections.Counter())
def test_nary_udf(self):
query = """
DEF Foo(a,b): [a + b, a - b];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[1] + t[3], t[3] - t[1])
for t in self.emp_table])
self.check_result(query, expected)
def test_nary_udf_name_count(self):
query = """
DEF Foo(a,b): [a + b, a - b];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y, z]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_nary_udf_illegal_nesting(self):
query = """
DEF Foo(x): [x + 3, x - 3];
DEF Bar(a,b): [Foo(x), Foo(b)];
out = [FROM SCAN(%s) AS X EMIT id, Bar(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_nary_udf_illegal_wildcard(self):
query = """
DEF Foo(x): [x + 3, *];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalWildcardException):
self.check_result(query, None)
def test_triangle_udf(self):
query = """
DEF Triangle(a,b): (a*b)//2;
out = [FROM SCAN(%s) AS X EMIT id, Triangle(X.salary, dept_id) AS t];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[1] * t[3] / 2) for t in self.emp_table]) # noqa
self.check_result(query, expected)
def test_noop_udf(self):
expr = "30 + 15 // 7 + -45"
query = """
DEF Noop(): %s;
out = [Noop() AS t];
STORE(out, OUTPUT);
""" % expr
val = eval(expr)
expected = collections.Counter([(val,)])
self.check_result(query, expected)
def test_const(self):
expr = "30 + 15 // 7 + -45"
query = """
CONST myconstant: %s;
out = [myconstant AS t];
STORE(out, OUTPUT);
""" % expr
val = eval(expr)
expected = collections.Counter([(val,)])
self.check_result(query, expected)
def test_composition_udf(self):
query = """
DEF Add7(x): x + 7;
DEF Add6(x): x + 6;
out = [FROM SCAN(%s) AS X EMIT id, Add6(Add7(Add6(X.salary)))];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[3] + 19)
for t in self.emp_table])
self.check_result(query, expected)
def test_nested_udf(self):
query = """
DEF Add7(x): x + 7;
DEF Add10(x): Add7(x) + 3;
out = [FROM SCAN(%s) AS X EMIT id, Add10(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[3] + 10)
for t in self.emp_table])
self.check_result(query, expected)
def test_regression_150(self):
"""Repeated invocation of a UDF."""
query = """
DEF transform(x): pow(10, x/pow(2,16)*3.5);
out = [FROM SCAN(%s) AS X EMIT id, transform(salary),
transform(dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
def tx(x):
return pow(10, float(x) / pow(2, 16) * 3.5)
expected = collections.Counter([(t[0], tx(t[3]), tx(t[1]))
for t in self.emp_table])
self.check_result(query, expected)
def test_safediv_2_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT SafeDiv(X.salary,X.dept_id-1)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 0,)
for t in self.emp_table])
self.check_result(query, expected)
def test_safediv_3_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT SafeDiv(X.salary,X.dept_id-1,42)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 42,)
for t in self.emp_table])
self.check_result(query, expected)
def test_answer_to_everything_function(self):
query = """
out = [TheAnswerToLifeTheUniverseAndEverything()];
STORE(out, OUTPUT);
"""
expected = collections.Counter([(42,)])
self.check_result(query, expected)
def test_least_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT least(X.id,X.dept_id,1)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(min(t[0], t[1], 1),)
for t in self.emp_table])
self.check_result(query, expected)
def test_greatest_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT greatest(X.id,X.dept_id,3)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(max(t[0], t[1], 3),)
for t in self.emp_table])
self.check_result(query, expected)
def test_lesser_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT lesser(X.id,X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(min(t[0], t[1]),)
for t in self.emp_table])
self.check_result(query, expected)
def test_greater_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT greater(X.id,X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(max(t[0], t[1],),)
for t in self.emp_table])
self.check_result(query, expected)
def test_uda_illegal_init(self):
query = """
uda Foo(x,y) {
[0 as A, *];
[A + x, A + y];
A;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Foo(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalWildcardException):
self.check_result(query, None)
def test_uda_illegal_update(self):
query = """
uda Foo(x,y) {
[0 as A, 1 as B];
[A + x + y, *];
A + B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Foo(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_uda_nested_emitter(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[0 as B];
[B + x];
Foo(B);
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_nested_init(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[Foo(0) as B];
[B + x];
B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_nested_update(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[0 as B];
[Foo(B)];
B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_unary_emit_arg_list(self):
query = """
uda MyAvg(val) {
[0 as _sum, 0 as _count];
[_sum + val, _count + 1];
[_sum / _count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyAvg(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def agg_func(x):
return float(sum(x)) / len(x)
expected = self.__aggregate_expected_result(agg_func)
self.check_result(query, expected)
def test_second_max_uda(self):
"""UDA to compute the second largest element in a collection."""
query = """
uda SecondMax(val) {
[0 as _max, 0 as second_max];
[case when val > _max then val else _max end,
case when val > _max then _max when val > second_max then val
else second_max end];
second_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SecondMax(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def agg_func(x):
if len(x) < 2:
return 0
else:
return sorted(x, reverse=True)[1]
expected = self.__aggregate_expected_result(agg_func)
self.check_result(query, expected)
def test_multi_invocation_uda(self):
query = """
uda MaxDivMin(val) {
[9999999 as _min, 0 as _max];
[case when val < _min then val else _min end,
case when val > _max then val else _max end];
_max / _min;
};
out = [FROM SCAN(%s) AS X EMIT
MaxDivMin(id) + dept_id + MaxDivMin(salary), dept_id];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
max_salary = max(t[3] for t in tpls)
min_salary = min(t[3] for t in tpls)
max_id = max(t[0] for t in tpls)
min_id = min(t[0] for t in tpls)
results.append((k + float(max_salary) / min_salary +
float(max_id) / min_id, k))
self.check_result(query, collections.Counter(results))
def test_multiple_uda(self):
query = """
uda MyMax1(val) {
[0 as _max];
[case when val > _max then val else _max end];
_max;
};
uda MyMax2(val) {
[0 as _max];
[case when val > _max then val else _max end];
_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyMax1(salary), MyMax2(id)];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
max_salary = max(t[3] for t in tpls)
max_id = max(t[0] for t in tpls)
results.append((k, max_salary, max_id))
self.check_result(query, collections.Counter(results))
def test_uda_no_emit_clause(self):
query = """
uda MyCount() {
[0 as _count];
[_count + 1];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyCount()];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
def test_uda_no_emit_clause_many_cols(self):
query = """
uda MyAggs(x) {
[0 as _count, 0 as _sum, 0 as _sumsq];
[_count + 1, _sum + x, _sumsq + x*x];
};
out = [FROM SCAN(%s) AS X EMIT MyAggs(salary) as [a, b, c]];
STORE(out, OUTPUT);
""" % self.emp_key
c = len(list(self.emp_table.elements()))
s = sum(d for a, b, c, d in self.emp_table.elements())
sq = sum(d * d for a, b, c, d in self.emp_table.elements())
expected = collections.Counter([(c, s, sq)])
self.check_result(query, expected)
# Test with two different column orders in case the undefined
# order used by Python is correct by chance.
query = """
uda MyAggs(x) {
[0 as _count, 0 as _sumsq, 0 as _sum];
[_count + 1, _sumsq + x*x, _sum + x];
};
out = [FROM SCAN(%s) AS X EMIT MyAggs(salary) as [a, b, c]];
STORE(out, OUTPUT);
""" % self.emp_key
c = len(list(self.emp_table.elements()))
sq = sum(d * d for a, b, c, d in self.emp_table.elements())
s = sum(d for a, b, c, d in self.emp_table.elements())
expected = collections.Counter([(c, sq, s)])
self.check_result(query, expected)
def test_uda_with_udf(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_0(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
out = [from inter emit $0];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
results = [(t[0],) for t in results]
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_1(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
out = [from inter emit $1];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
results = [(t[1],) for t in results]
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_2(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)
, max2(dept_id, id)];
out = [from inter emit $1];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k,
max(t[3] + t[0] for t in tpls),
max(t[1] + t[0] for t in tpls)))
results = [(t[1],) for t in results]
self.check_result(query, collections.Counter(results))
def __run_multiple_emitter_test(self, include_column_names):
if include_column_names:
names = " AS [mysum, mycount, myavg]"
else:
names = ""
query = """
uda SumCountMean(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
[_sum, _count, _sum/_count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SumCountMean(salary) %s,
dept_id+3, max(id) as max_id];
STORE(out, OUTPUT);
""" % (self.emp_key, names)
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
_sum = sum(x[3] for x in tpls)
_count = len(tpls)
_avg = float(_sum) / _count
_max_id = max(x[0] for x in tpls)
results.append((k, _sum, _count, _avg, k + 3, _max_id))
self.check_result(query, collections.Counter(results))
def test_uda_multiple_emitters_default_names(self):
self.__run_multiple_emitter_test(False)
def test_uda_multiple_emitters_provided_names(self):
self.__run_multiple_emitter_test(True)
scheme_actual = self.db.get_scheme('OUTPUT')
scheme_expected = scheme.Scheme([
('dept_id', types.LONG_TYPE), ('mysum', types.LONG_TYPE),
('mycount', types.LONG_TYPE), ('myavg', types.FLOAT_TYPE),
('_COLUMN4_', types.LONG_TYPE), ('max_id', types.LONG_TYPE)])
self.assertEquals(scheme_actual, scheme_expected)
def test_emit_arg_bad_column_name_length(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id AS [dept_id1, dept_id2]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_uda_bad_column_name_length(self):
query = """
uda Fubar(x, y, z) {
[0 as Q];
[Q + 1];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Fubar(1, salary, id)
AS [A, B, C, D]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_uda_init_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[Foo(0) as [A, B, C]];
[Q * 8];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_update_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[0 as Q];
[Foo(Q + 1)];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_result_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[0 as Q];
[Q + 2];
[1,2,Foo(3)];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_multiple_emitters_nested(self):
"""Test that we raise an Exception if a tuple-valued UDA doesn't appear
by itself in an emit expression."""
query = """
uda SumCountMean(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
[_sum, _count, _sum/_count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SumCountMean(salary) + 5];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
__DECOMPOSED_UDA = """
uda LogicalAvg(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
float(_sum); -- Note bogus return value
};
uda LocalAvg(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
};
uda RemoteAvg(_local_sum, _local_count) {
[0 as _sum, 0 as _count];
[_sum + _local_sum, _count + _local_count];
[_sum/_count];
};
uda* LogicalAvg {LocalAvg, RemoteAvg};
"""
__ARG_MAX_UDA = """
def pickval(id, salary, val, _id, _salary, _val):
case when salary > _salary then val
when salary = _salary and id > _id then val
else _val end;
uda ArgMax(id, dept_id, name, salary) {
[0 as _id, 0 as _dept_id, "" as _name, 0 as _salary];
[pickval(id, salary, id, _id, _salary, _id),
pickval(id, salary, dept_id, _id, _salary, _dept_id),
pickval(id, salary, name, _id, _salary, _name),
pickval(id, salary, salary, _id, _salary, _salary)];
[_id, _dept_id, _name, _salary];
};
"""
__ARG_MAX_UDA_UNNECESSARY_EXPR = """
def pickval(id, salary, val, _id, _salary, _val):
case when salary > _salary then val
when salary = _salary and id > _id then val
else _val end;
uda ArgMax(id, dept_id, name, salary) {
[0 as _id, 0 as _dept_id, "" as _name, 0 as _salary];
[pickval(id, salary, greater(id, id), _id, _salary, _id),
pickval(id, salary, lesser(dept_id, dept_id), _id, _salary,
_dept_id),
pickval(id, salary, case when name="" then name else name end, _id,
_salary, _name),
pickval(id, salary, salary * 1, _id, _salary, _salary)];
[_id, _dept_id, _name, _salary];
};
"""
def test_decomposable_average_uda(self):
"""Test of a decomposed average UDA.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """%s
out = [FROM SCAN(%s) AS X EMIT dept_id, LogicalAvg(salary)];
STORE(out, OUTPUT);
""" % (TestQueryFunctions.__DECOMPOSED_UDA, self.emp_key)
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = []
for key, vals in result_dict.iteritems():
_cnt = len(vals)
_sum = sum(vals)
tuples.append((key, float(_sum) / _cnt))
self.check_result(query, collections.Counter(tuples))
def test_decomposable_nary_uda(self):
query = """
uda Sum2(x, y) {
[0 as sum_x, 0 as sum_y];
[sum_x + x, sum_y + y];
};
uda* Sum2 {Sum2, Sum2};
out = [FROM SCAN(%s) AS X EMIT
Sum2(id, salary) AS [id_sum, salary_sum]];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
id_sum = sum(t[0] for t in self.emp_table.elements())
salary_sum = sum(t[3] for t in self.emp_table.elements())
tuples = [(id_sum, salary_sum)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda(self):
"""Test of an arg_max UDA.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_with_references(self):
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_with_functions(self):
"""Test of an arg_max UDA with expressions as inputs.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda(self):
"""Test of a decomposable arg_max UDA.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
def test_decomposable_arg_max_uda_with_references(self):
"""Test of a decomposable arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_with_functions(self):
"""Test of a decomposable arg_max UDA with expressions as inputs.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs(self):
"""Test of an arg_max UDA.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs_with_references(self):
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs_with_functions(self):
"""Test of an arg_max UDA with expressions as inputs.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_internal_exprs(self):
"""Test of a decomposable arg_max UDA.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
def test_decomposable_arg_max_uda_internal_exprs_with_references(self):
"""Test of a decomposable arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_internal_exprs_with_functions(self):
"""Test of a decomposable arg_max UDA with expressions as inputs.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_average_uda_repeated(self):
"""Test of repeated invocations of decomposed UDAs."""
query = """%s
out = [FROM SCAN(%s) AS X EMIT dept_id,
LogicalAvg(salary) + LogicalAvg($0)];
STORE(out, OUTPUT);
""" % (TestQueryFunctions.__DECOMPOSED_UDA, self.emp_key)
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
tuples = []
for key, vals in result_dict.iteritems():
_cnt = len(vals)
_salary_sum = sum(t[3] for t in vals)
_id_sum = sum(t[0] for t in vals)
tuples.append((key, (float(_salary_sum) + float(_id_sum)) / _cnt))
self.check_result(query, collections.Counter(tuples))
def test_decomposable_sum_uda(self):
"""Test of a decomposed sum UDA.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """
uda MySumBroken(x) {
[0 as _sum];
[_sum + x];
17; -- broken
};
uda MySum(x) {
[0 as _sum];
[_sum + x];
};
uda* MySumBroken {MySum, MySum};
out = [FROM SCAN(%s) AS X EMIT dept_id, MySumBroken(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(sum))
def test_decomposable_uda_with_builtin_agg(self):
"""Test of a decomposed UDA + builtin aggregate.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """
uda MySumBroken(x) {
[0 as _sum];
[_sum + x];
17; -- broken
};
uda MySum(x) {
[0 as _sum];
[_sum + x];
};
uda* MySumBroken {MySum, MySum};
out = [FROM SCAN(%s) AS X EMIT dept_id, MySumBroken(salary), SUM(id)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
tuples = []
for key, vals in result_dict.iteritems():
_salary_sum = sum(t[3] for t in vals)
_id_sum = sum(t[0] for t in vals)
tuples.append((key, _salary_sum, _id_sum))
self.check_result(query, collections.Counter(tuples))
def test_duplicate_decomposable_uda(self):
query = """
uda Agg1(x) {
[0 as _sum];
[_sum + x];
};
uda* Agg1 {Agg1, Agg1};
uda* Agg1 {Agg1, Agg1};
"""
with self.assertRaises(DuplicateFunctionDefinitionException):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail1(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Local(x, y) {
[0 as _sum];
[_sum + x];
};
uda* Logical {Local, Logical};
"""
with self.assertRaises(InvalidArgumentList):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail2(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Remote(x, y) {
[0 as _sum];
[_sum + x];
};
uda* Logical {Logical, Remote};
"""
with self.assertRaises(InvalidArgumentList):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail3(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Remote(x) {
[0 as _sum];
[_sum + x];
[1, 2, 3];
};
uda* Logical {Logical, Remote};
"""
with self.assertRaises(InvalidEmitList):
self.check_result(query, None)
def test_running_mean_sapply(self):
query = """
APPLY RunningMean(value) {
[0 AS _count, 0 AS _sum];
[_count + 1, _sum + value];
_sum / _count;
};
out = [FROM SCAN(%s) AS X EMIT id, RunningMean(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
tps = []
_sum = 0
_count = 0
for emp in self.emp_table:
_sum += emp[3]
_count += 1
tps.append((emp[0], float(_sum) / _count))
self.check_result(query, collections.Counter(tps))
def test_sapply_multi_invocation(self):
query = """
APPLY RunningSum(x) {
[0 AS _sum];
[_sum + x];
_sum;
};
out = [FROM SCAN(%s) AS X
EMIT id, RunningSum(X.salary), RunningSum(id)];
STORE(out, OUTPUT);
""" % self.emp_key
tps = []
_sum1 = 0
_sum2 = 0
for emp in self.emp_table:
_sum1 += emp[3]
_sum2 += emp[0]
tps.append((emp[0], _sum1, _sum2))
self.check_result(query, collections.Counter(tps))
def test_118_regression(self):
"""Regression test for https://github.com/uwescience/datalogcompiler/issues/118""" # noqa
query = """
out = [FROM SCAN(%s) AS X WHERE dept_id = 2 AND salary = 5000 EMIT id];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements()
if x[1] == 2 and x[3] == 5000])
self.check_result(query, expected)
def test_scan_emp_empty_statement(self):
"""Test with an empty statement."""
query = """
;;;
emp = SCAN(%s);
STORE(emp, OUTPUT);;;
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_empty_statement_parse(self):
"""Program that contains nothing but empty statements."""
with self.assertRaises(MyrialCompileException):
self.check_result(";", None)
def test_case_binary(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT id, CASE WHEN salary > 15000
THEN salary // salary
ELSE 0 // salary END];
STORE(rich, OUTPUT);
""" % self.emp_key
def func(y):
if y > 15000:
return 1
else:
return 0
expected = collections.Counter(
[(x[0], func(x[3])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_case_ternary(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT id,
CASE WHEN salary <= 5000 THEN "poor"
WHEN salary <= 25000 THEN "middle class"
ELSE "rich"
END];
STORE(rich, OUTPUT);
""" % self.emp_key
def func(y):
if y <= 5000:
return 'poor'
elif y <= 25000:
return 'middle class'
else:
return 'rich'
expected = collections.Counter(
[(x[0], func(x[3])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_case_aggregate(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT SUM(3 * CASE WHEN salary > 15000
THEN 1 ELSE 0 END)];
STORE(rich, OUTPUT);
""" % self.emp_key
_sum = 3 * len([x for x in self.emp_table.elements()
if x[3] > 15000])
self.check_result(query, collections.Counter([(_sum,)]))
def test_case_unbox(self):
query = """
TH = [15000];
A = [1 AS one, 2 AS two, 3 AS three];
emp = SCAN(%s);
rich = [FROM emp EMIT SUM(*A.three * CASE WHEN salary > *TH
THEN 1 ELSE 0 END)];
STORE(rich, OUTPUT);
""" % self.emp_key
_sum = 3 * len([x for x in self.emp_table.elements()
if x[3] > 15000])
self.check_result(query, collections.Counter([(_sum,)]))
def test_default_column_names(self):
with open('examples/groupby1.myl') as fh:
query = fh.read()
self.execute_query(query)
scheme = self.db.get_scheme('OUTPUT')
self.assertEquals(scheme.getName(0), "_COLUMN0_")
self.assertEquals(scheme.getName(1), "id")
def test_worker_id(self):
query = """
X = [FROM SCAN(%s) AS X EMIT X.id, WORKER_ID()];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(x[0], 0) for x
in self.emp_table.elements()])
self.check_result(query, expected)
def test_flip_zero(self):
"""flip(0) should always evaluate to false"""
query = """
X = [FROM SCAN(%s) AS X WHERE flip(0) EMIT *];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter()
self.check_result(query, expected)
def test_flip_one(self):
"""flip(1) should always evaluate to true"""
query = """
X = [FROM SCAN(%s) AS X WHERE flip(1) EMIT *];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter(self.emp_table.elements())
self.check_result(query, expected)
def test_substr(self):
query = """
ZERO = [0];
THREE = [3];
out = [FROM SCAN(%s) AS X EMIT X.id, substr(X.name, *ZERO, *THREE)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], x[2][0:3]) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_concat(self):
query = """
STRS = ["a" as a, "b" as b];
out = [FROM STRS EMIT concat(a, b)];
STORE(out, OUTPUT);
"""
expected = collections.Counter({('ab',): 1})
self.check_result(query, expected)
def test_byterange(self):
query = r"""
BYTES = [b'\xDE\xAD\xBE\xEF' AS bytes];
out = [FROM BYTES AS X EMIT byterange(X.bytes, 2, 4) as res];
STORE(out, OUTPUT);
"""
expected = collections.Counter({(b'\xBE\xEF',): 1})
self.check_result(query, expected)
def test_len(self):
query = """
out = [FROM SCAN(%s) AS X EMIT X.id, len(X.name)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], len(x[2])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_head(self):
query = """
out = [FROM SCAN(%s) AS X EMIT X.id, head(X.name, 10)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], x[2][0:10]) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_tail(self):
query = """
ZERO = [0];
THREE = [3];
out = [FROM SCAN(%s) AS X EMIT X.id, tail(X.name, 10)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], (lambda i: i if len(i) <= 10 else i[len(i) - 10:])(x[2]))
for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_column_name_reserved(self):
query = """
T = EMPTY(x:INT);
A = [FROM T EMIT SafeDiv(x, 3) AS SafeDiv];
STORE (A, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_bug_226(self):
query = """
T = scan({emp});
A = select id, salary from T where 1=1;
B = select id, salary from A where salary=90000;
C = select A.* from B, A where A.salary < B.salary;
STORE (C, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter(
(i, s) for (i, d, n, s) in self.emp_table
for (i2, d2, n2, s2) in self.emp_table
if s2 == 90000 and s < s2)
self.assertEquals(expected, self.execute_query(query))
def test_column_mixed_case_reserved(self):
query = """
T = EMPTY(x:INT);
A = [FROM T EMIT MAX(x) AS maX];
STORE (A, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_variable_name_reserved(self):
query = """
T = EMPTY(x:INT);
avg = COUNTALL(T);
STORE (countall, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_empty_query(self):
query = """
T1 = empty(x:INT);
"""
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_sink(self):
query = """
ZERO = [0];
A = [from ZERO emit *];
SINK(A);
"""
self.evaluate_sink_query(query)
def test_string_cast(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT STRING(emp.dept_id) AS foo];
STORE(bc, OUTPUT);
""" % self.emp_key
ex = collections.Counter((str(d),) for (i, d, n, s) in self.emp_table)
ex_scheme = scheme.Scheme([('foo', types.STRING_TYPE)])
self.check_result(query, ex)
def test_float_cast(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT float(emp.dept_id) AS foo];
STORE(bc, OUTPUT);
""" % self.emp_key
ex = collections.Counter((float(d),) for (i, d, n, s) in self.emp_table) # noqa
ex_scheme = scheme.Scheme([('foo', types.DOUBLE_TYPE)])
self.check_result(query, ex, ex_scheme)
def test_scientific_notation(self):
literals = ["1.0e20", "3e40", "5e-6", ".7e8", ".9e-12",
"-3e45", "-6e-78", "9e+12", "3E4"]
query = """
emp = SCAN({});
bc = [FROM emp EMIT {}];
STORE(bc, OUTPUT);
""".format(self.emp_key, ','.join(literals))
ex = collections.Counter(
(tuple(map(float, literals)),) * len(self.emp_table)) # noqa
ex_scheme = scheme.Scheme([('$%d' % i, types.DOUBLE_TYPE)
for i in xrange(len(literals))])
self.check_result(query, ex, ex_scheme)
def test_sequence(self):
query = """
T1 = scan({rel});
store(T1, OUTPUT);
T2 = scan({rel});
store(T2, OUTPUT2);
""".format(rel=self.emp_key)
physical_plan = self.get_physical_plan(query)
self.assertIsInstance(physical_plan, raco.algebra.Sequence)
self.check_result(query, self.emp_table, output='OUTPUT')
self.check_result(query, self.emp_table, output='OUTPUT2')
def test_238_dont_renumber_columns(self):
# see https://github.com/uwescience/raco/issues/238
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit x2.a, x2.b];
z = [from y emit a];
store(z, OUTPUT);"""
self.check_result(query, collections.Counter([(1,)]))
def test_implicit_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $0, $1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_implicit_column_names2(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $2, $3];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_implicit_column_names3(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $2, $1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_unbox_index_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit x2.$0, x2.$1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_duplicate_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2 emit x1.a, x2.a];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('a1', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 1)]),
scheme=expected_scheme)
def test_distinct_aggregate_combinations(self):
"""Test to make sure that aggregates of different columns are not
combined together by the optimizer."""
query = """
emp = scan(%s);
ans = [from emp emit sum(dept_id) as d, sum(salary) as s];
store(ans, OUTPUT);""" % self.emp_key
sum_dept_id = sum([e[1] for e in self.emp_table])
sum_salary = sum([e[3] for e in self.emp_table])
expected = collections.Counter([(sum_dept_id, sum_salary)])
self.check_result(query, expected)
def test_bug_245_dead_code_with_do_while_plan(self):
"""Test to make sure that a dead program (no Stores) with a DoWhile
throws the correct parse error."""
with open('examples/deadcode2.myl') as fh:
query = fh.read()
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_simple_do_while(self):
"""count to 32 by powers of 2"""
with open('examples/iteration.myl') as fh:
query = fh.read()
expected = collections.Counter([(32, 5)])
self.check_result(query, expected, output="powersOfTwo")
def test_pyUDF_dotted_arguments(self):
query = """
T1=scan(%s);
out = [from T1 emit test(T1.id, T1.dept_id) As output];
store(out, OUTPUT);
""" % self.emp_key
plan = self.get_physical_plan(query, udas=[('test', LONG_TYPE)])
apply = [op for op in plan.walk() if isinstance(op, Apply)][0]
ref = apply.emitters[0][1]
assert str(ref) == "PYUDF(test, ['id', 'dept_id'], LONG_TYPE)"
def test_pyUDF_with_positional_arguments(self):
query = """
T1=scan(%s);
out = [from T1 emit test($0, $1) As output];
store(out, OUTPUT);
""" % self.emp_key
plan = self.get_physical_plan(query, udas=[('test', LONG_TYPE)])
apply = [op for op in plan.walk() if isinstance(op, Apply)][0]
ref = apply.emitters[0][1]
assert str(ref) == "PYUDF(test, ['$0', '$1'], LONG_TYPE)"
def test_pyUDF_uda(self):
query = """
uda Foo(x){
[0 as _count, 0 as _sum];
[ _count+1, test_uda(_sum, x)];
[ test_uda(_sum,_count) ];
};
T1 = [from scan(%s) as t emit Foo(t.id) As mask];
store(T1, out);
""" % self.emp_key
self.get_physical_plan(query, udas=[('test_uda', LONG_TYPE)])
| 2.171875
| 2
|
testing/cdutil/test_gen_mask.py
|
xylar/cdat
| 62
|
12778475
|
import cdms2,sys,cdutil,os,cdat_info
f=cdms2.open(os.path.join(cdat_info.get_sampledata_path(),"navy_land.nc"))
navy_frac = f("sftlf")/100.
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1)).getGrid()
mask = cdutil.generateLandSeaMask(target,navy_frac)
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1))
mask = cdutil.generateLandSeaMask(target,navy_frac)
target=cdms2.createGaussianGrid(64)
mask = cdutil.generateLandSeaMask(target)
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1),latitude=(15,85),longitude=(-175,-65)).getGrid()
mask = cdutil.generateLandSeaMask(target)
#import vcs
#x=vcs.init()
#x.plot(mask)
#raw_input()
| 2.109375
| 2
|
iRODS/scripts/python/validate_json.py
|
wtsi-npg/irods
| 0
|
12778476
|
<gh_stars>0
from __future__ import print_function
import json
import sys
import requests
if len(sys.argv) != 3:
sys.exit('Usage: {0} <configuration_file> <schema_url>'.format(sys.argv[0]))
config_file = sys.argv[1]
schema_uri = sys.argv[2]
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
try:
import jsonschema
except ImportError:
print_error('WARNING: Validation Failed for [%s] -- jsonschema not installed' % config_file)
sys.exit(0)
try:
e = jsonschema.exceptions
except AttributeError:
print_error('WARNING: Validation Failed for [%s] -- jsonschema too old v[%s]' %
(config_file, jsonschema.__version__))
sys.exit(0)
try:
# load configuration file
with open(config_file, 'r') as f:
config = json.load(f)
# load the schema url
response = requests.get(schema_uri)
# check response values
try:
# modern requests
schema = json.loads(response.text)
except AttributeError:
# requests pre-v1.0.0
response.encoding = 'utf8'
schema = json.loads(response.content)
# validate
jsonschema.validate(config, schema)
except (jsonschema.exceptions.RefResolutionError) as e:
# could not resolve recursive schema $ref
print_error('WARNING: Validation Failed for [%s]' % config_file)
print_error(' : against [%s]' % schema_uri)
print_error(" {0}: {1}".format(e.__class__.__name__, e))
sys.exit(0)
except (ValueError) as e:
# most network errors and 404s
print_error('WARNING: Validation Failed for [%s]' % config_file)
print_error(' : against [%s]' % schema_uri)
print_error(" {0}: {1}".format(e.__class__.__name__, e))
sys.exit(0)
except (
jsonschema.exceptions.ValidationError,
jsonschema.exceptions.SchemaError
) as e:
print_error('ERROR: Validation Failed for [%s]' % config_file)
print_error(' : against [%s]' % schema_uri)
print_error(" {0}: {1}".format(e.__class__.__name__, e))
sys.exit(1)
except Exception as e:
print_error('ERROR: Validation Failed for [%s]' % config_file)
print_error(' : against [%s]' % schema_uri)
print_error(" {0}: {1}".format(e.__class__.__name__, e))
sys.exit(1)
except:
sys.exit(1)
else:
print("Validating [" + sys.argv[1] + "]... Success")
sys.exit(0)
| 2.328125
| 2
|
saleor/plugins/webhook/const.py
|
victor-abz/saleor
| 1,392
|
12778477
|
CACHE_EXCLUDED_SHIPPING_KEY = "webhook_exclude_shipping_id_"
CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3
EXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2
| 0.75
| 1
|
pvn3d/eval_icp.py
|
hajar2016/hajar
| 0
|
12778478
|
<reponame>hajar2016/hajar
#!/usr/bin/env python3
import os
import cv2
import random
from random import shuffle
import os.path
import nori2
import numpy as np
import pickle as pkl
from PIL import Image
from queue import Queue
from common import Config
from argparse import ArgumentParser
import sys
from tqdm import tqdm
from lib.utils.my_utils import my_utils
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import MeanShift
import concurrent.futures
# from cv2 import imshow, waitKey
from lib.utils.icp.icp import my_icp, best_fit_transform
import numpy as np
cls_type = open('./cls_type.txt').readline().strip()
config = Config(cls_type)
DEBUG=False #True
SHOW=False
tst_nid_lst = my_utils.read_lines(config.val_nid_ptn.format('allobj'))
xmap = np.array([[j for i in range(640)] for j in range(480)])
ymap = np.array([[i for i in range(640)] for j in range(480)])
n_sample_points = 2000
mininum_cnt = 1500
nf = nori2.Fetcher()
cls_lst = my_utils.read_lines(config.ycb_cls_lst_p)
obj_dict = {}
for cls_id, cls in enumerate(cls_lst, start=1):
obj_dict[cls] = cls_id
pvn3d_poses = np.load(open('./pvn3d_poses.npy', 'rb'))
n_cls = 22
gb_cls_add_dis = [list() for i in range(n_cls)]
gb_cls_adds_dis = [list() for i in range(n_cls)]
gb_cls_add_dis_icp = [list() for i in range(n_cls)]
gb_cls_adds_dis_icp = [list() for i in range(n_cls)]
radius = 0.06
def get_cld_bigest_clus(p3ds):
n_clus_jobs = 8
ms = MeanShift(
bandwidth=radius, bin_seeding=True, n_jobs=n_clus_jobs
)
ms.fit(p3ds)
clus_labels = ms.labels_
bg_clus = p3ds[np.where(clus_labels == 0)[0], :]
return bg_clus
def cal_adds_dis(cls_ptsxyz, pred_pose, gt_pose):
pred_pts = np.dot(cls_ptsxyz.copy(), pred_pose[:, :3].T) + pred_pose[:, 3]
gt_pts = np.dot(cls_ptsxyz.copy(), gt_pose[:, :3].T) + gt_pose[:, 3]
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(gt_pts)
distances, _ = neigh.kneighbors(pred_pts, return_distance=True)
return np.mean(distances)
def cal_add_dis(cls_ptsxyz, pred_pose, gt_pose):
pred_pts = np.dot(cls_ptsxyz.copy(), pred_pose[:, :3].T) + pred_pose[:, 3]
gt_pts = np.dot(cls_ptsxyz.copy(), gt_pose[:, :3].T) + gt_pose[:, 3]
mean_dist = np.mean(np.linalg.norm(pred_pts - gt_pts, axis=-1))
return mean_dist
sv_icp_msk_dir = 'train_log/eval_result/icp'
if not os.path.exists(sv_icp_msk_dir):
os.mkdir(sv_icp_msk_dir)
def sv_mesh(p3ds, sv_pth):
with open(sv_pth, 'w') as f:
for p3d in p3ds:
print('v', p3d[0], p3d[1], p3d[2], file=f)
def eval_item(nid_pvn3d_poses):
nid, pvn3d_poses = nid_pvn3d_poses[0], nid_pvn3d_poses[1]
# print(nid, pvn3d_poses)
data = pkl.loads(nf.get(nid))
obj_info_lst = data['obj_info_lst']
dpt = data['depth'].astype(np.float32).copy()
labels = data['label']
cam_scale = data['meta']['factor_depth'].astype(np.float32)[0][0]
msk_dp = dpt > 1e-6
cls_add_dis = [list() for i in range(n_cls)]
cls_adds_dis = [list() for i in range(n_cls)]
cls_add_dis_icp = [list() for i in range(n_cls)]
cls_adds_dis_icp = [list() for i in range(n_cls)]
K = obj_info_lst[0]['K']
for i, obj_info in enumerate(obj_info_lst):
cls_id = obj_dict[obj_info['cls_typ']]
has_pose = False
for cid, pose in pvn3d_poses:
if cid == cls_id:
has_pose = True
break
if not has_pose:
pose = np.zeros((3, 4), dtype=np.float32)
init_pose = np.identity(4, dtype=np.float32)
init_pose[:3, :] = pose
cls_msk = msk_dp & (labels == cls_id)
if DEBUG and SHOW:
cv2.imshow('cls_msk', cls_msk.astype('uint8') * 255)
# if cls_msk.sum() < n_sample_points:
# print("num pts:", cls_msk.sum())
choose = cls_msk.flatten().nonzero()[0].astype(np.uint32)
if len(choose) > n_sample_points:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:n_sample_points] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
# else:
# choose = np.pad(choose, (0, n_sample_points-len(choose)), 'wrap')
cls_ptsxyz = my_utils.get_pointxyz(cls_lst[cls_id-1])
adds_dis = cal_adds_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
add_dis = cal_add_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
cls_adds_dis[cls_id].append(adds_dis)
cls_add_dis[cls_id].append(add_dis)
cls_adds_dis[0].append(adds_dis)
cls_add_dis[0].append(add_dis)
if len(choose) < mininum_cnt:
cls_adds_dis_icp[cls_id].append(adds_dis)
cls_add_dis_icp[cls_id].append(add_dis)
cls_adds_dis_icp[0].append(adds_dis)
cls_add_dis_icp[0].append(add_dis)
continue
if DEBUG:
pvn3d_p3d = np.dot(cls_ptsxyz.copy(), pose[:3, :3].T) + pose[:3, 3]
pvn3d_p2d = my_utils.project_p3d(pvn3d_p3d, 1)
show_pvn3d_pose = np.zeros((480, 640, 3), dtype='uint8')
show_pvn3d_pose = my_utils.draw_p2ds(show_pvn3d_pose, pvn3d_p2d)
if SHOW:
cv2.imshow('pvn3d', show_pvn3d_pose)
dpt_mskd = dpt.flatten()[choose][:, np.newaxis].astype(np.float32).copy()
xmap_mskd = xmap.flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_mskd = ymap.flatten()[choose][:, np.newaxis].astype(np.float32)
pt2 = dpt_mskd / cam_scale
cam_cx, cam_cy = K[0][2], K[1][2]
cam_fx, cam_fy = K[0][0], K[1][1]
pt0 = (ymap_mskd - cam_cx) * pt2 / cam_fx
pt1 = (xmap_mskd - cam_cy) * pt2 / cam_fy
cld = np.concatenate( (pt0, pt1, pt2), axis=1 )
cld = get_cld_bigest_clus(cld)
if DEBUG:
cld_p2d = my_utils.project_p3d(cld, 1)
show_cld = np.zeros((480, 640, 3), dtype='uint8')
show_cld = my_utils.draw_p2ds(show_cld, cld_p2d)
if SHOW:
cv2.imshow('cld', show_cld)
icp_pose, dis, _ = my_icp(
cls_ptsxyz.copy(), cld, init_pose=init_pose,
max_iterations=500,
tolerance=1e-9
)
# print('dis final icp:', np.mean(dis))
pose = icp_pose[:3, :]
adds_dis_icp = cal_adds_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
add_dis_icp = cal_add_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
cls_adds_dis_icp[cls_id].append(adds_dis_icp)
cls_add_dis_icp[cls_id].append(add_dis_icp)
cls_adds_dis_icp[0].append(adds_dis_icp)
cls_add_dis_icp[0].append(add_dis_icp)
# print(adds_dis, adds_dis_icp, add_dis, add_dis_icp)
if DEBUG:
icp_p3d = np.dot(cls_ptsxyz.copy(), pose[:3, :3].T) + pose[:3, 3]
icp_p2d = my_utils.project_p3d(icp_p3d, 1)
show_icp_pose = np.zeros((480, 640, 3), dtype='uint8')
show_icp_pose = my_utils.draw_p2ds(show_icp_pose, icp_p2d)
if adds_dis_icp - adds_dis > 0.05:
item_name = '{}_{}_{}_'.format(nid, adds_dis_icp, adds_dis)
sv_mesh(
icp_p3d,
os.path.join(sv_icp_msk_dir, item_name+'icp.obj'),
)
sv_mesh(
pvn3d_p3d,
os.path.join(sv_icp_msk_dir, item_name+'pvn3d.obj'),
)
sv_mesh(
cld,
os.path.join(sv_icp_msk_dir, item_name+'cld.obj'),
)
if SHOW:
cv2.imshow('icp', show_icp_pose)
cmd = cv2.waitKey(0)
if cmd == ord('q'):
exit()
return (cls_add_dis, cls_adds_dis, cls_add_dis_icp, cls_adds_dis_icp)
def eval_item_pvn3d_msk(ipic_nid):
ipic, nid, pvn3d_poses = ipic_nid[0], ipic_nid[1], ipic_nid[2]
info_fd = 'train_log/eval_result/004_sugar_box/torch_res/our_msk_info/'
data = pkl.loads(nf.get(nid))
dpt = data['depth'].astype(np.float32).copy()
obj_info_lst = data['obj_info_lst']
labels = cv2.imread(info_fd+'{}_fillpredmsk.png'.format(ipic))[:, :, 0]
cls_add_dis = [list() for i in range(n_cls)]
cls_adds_dis = [list() for i in range(n_cls)]
cls_add_dis_icp = [list() for i in range(n_cls)]
cls_adds_dis_icp = [list() for i in range(n_cls)]
K = obj_info_lst[0]['K']
cam_scale = data['meta']['factor_depth'].astype(np.float32)[0][0]
all_cld, choose = my_utils.dpt_2_cld(dpt, cam_scale, K)
labels = labels.reshape(-1)[choose]
for i, obj_info in enumerate(obj_info_lst):
cls_id = obj_dict[obj_info['cls_typ']]
for cid, pose in pvn3d_poses:
if cid == cls_id:
has_pose = True
break
if not has_pose:
pose = np.zeros((3, 4), dtype=np.float32)
init_pose = np.identity(4, dtype=np.float32)
init_pose[:3, :] = pose
cls_ptsxyz = my_utils.get_pointxyz(cls_lst[cls_id-1])
adds_dis = cal_adds_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
add_dis = cal_add_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
cls_adds_dis[cls_id].append(adds_dis)
cls_add_dis[cls_id].append(add_dis)
cls_adds_dis[0].append(adds_dis)
cls_add_dis[0].append(add_dis)
choose = np.where(labels == cls_id)[0]
if len(choose) > n_sample_points:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:n_sample_points] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
cld = all_cld[choose, :]
if cld.shape[0] < 1500:
cls_adds_dis_icp[cls_id].append(adds_dis)
cls_add_dis_icp[cls_id].append(add_dis)
cls_adds_dis_icp[0].append(adds_dis)
cls_add_dis_icp[0].append(add_dis)
continue
cld = get_cld_bigest_clus(cld)
icp_pose, dis, _ = my_icp(
cls_ptsxyz.copy(), cld, init_pose=init_pose,
max_iterations=500,
tolerance=1e-9
)
pose = icp_pose[:3, :]
adds_dis_icp = cal_adds_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
add_dis_icp = cal_add_dis(cls_ptsxyz.copy(), pose, obj_info['RT'])
cls_adds_dis_icp[cls_id].append(adds_dis_icp)
cls_add_dis_icp[cls_id].append(add_dis_icp)
cls_adds_dis_icp[0].append(adds_dis_icp)
cls_add_dis_icp[0].append(add_dis_icp)
return (cls_add_dis, cls_adds_dis, cls_add_dis_icp, cls_adds_dis_icp)
max_workers= 10
label_type='predmsk'
# label_type='gtmsk'
def cal_pose_icp():
pvn3d_poses = np.load(open('./pvn3d_poses.npy', 'rb'))
idx = 0
with concurrent.futures.ProcessPoolExecutor(
max_workers=max_workers
) as executor:
if label_type == 'predmsk':
exc_map = executor.map(
eval_item_pvn3d_msk, tqdm(
zip(list(range(len(tst_nid_lst))), tst_nid_lst, pvn3d_poses)
)
)
else:
exc_map = executor.map(
eval_item, tqdm(zip(tst_nid_lst, pvn3d_poses))
)
for data in exc_map:
"""
data: (cls_add_dis, cls_adds_dis, cls_add_dis_icp, cls_adds_dis_icp)
"""
for cls_id in range(n_cls):
gb_cls_add_dis[cls_id] += data[0][cls_id]
gb_cls_adds_dis[cls_id] += data[1][cls_id]
gb_cls_add_dis_icp[cls_id] += data[2][cls_id]
gb_cls_adds_dis_icp[cls_id] += data[3][cls_id]
idx += 1
print(idx)
cls_add_auc_icp = []
cls_add_auc = []
cls_adds_auc_icp = []
cls_add_s_auc_icp = []
cls_adds_auc = []
gb_cls_add_s_dis_icp = [list() for i in range(22)]
for cls_id in range(1, 22):
if cls_id in config.ycb_sym_cls_ids:
gb_cls_add_s_dis_icp[cls_id] = gb_cls_adds_dis_icp[cls_id]
else:
gb_cls_add_s_dis_icp[cls_id] = gb_cls_add_dis_icp[cls_id]
gb_cls_add_s_dis_icp[0] += gb_cls_add_s_dis_icp[cls_id]
for cls_id in range(0, 22):
cls_add_auc_icp.append(my_utils.cal_auc(gb_cls_add_dis_icp[cls_id]))
cls_add_auc.append(my_utils.cal_auc(gb_cls_add_dis[cls_id]))
cls_adds_auc_icp.append(my_utils.cal_auc(gb_cls_adds_dis_icp[cls_id]))
cls_adds_auc.append(my_utils.cal_auc(gb_cls_adds_dis[cls_id]))
cls_add_s_auc_icp.append(my_utils.cal_auc(gb_cls_add_s_dis_icp[cls_id]))
if cls_id == 0:
print("all obj:")
else:
print(cls_lst[cls_id-1], ":")
print(
"########## add_icp:\t", cls_add_auc_icp[-1], "\n",
"########## add:\t", cls_add_auc[-1], "\n",
"########## adds_icp:\t", cls_adds_auc_icp[-1], "\n",
"########## adds:\t", cls_adds_auc[-1], "\n"
)
print("icp:")
print_screen("icp_add_auc: ", cls_add_auc_icp)
print_screen("icp_adds_auc: ", cls_adds_auc_icp)
print_screen("icp_add_s_auc: ", cls_add_s_auc_icp)
sv_info = dict(
adds_auc_icp=cls_adds_auc_icp,
adds_auc=cls_adds_auc,
add_auc_icp=cls_add_auc_icp,
add_auc=cls_add_auc,
cls_add_dis_icp=gb_cls_add_dis_icp,
cls_adds_dis_icp=gb_cls_adds_dis_icp,
cls_add_dis=gb_cls_add_dis,
cls_adds_dis=gb_cls_adds_dis,
)
pkl.dump(
sv_info,
open(
'./train_log/eval_result/icp_sv_info_{}_{}_{}_{}_{}_{}.pkl'.format(
n_sample_points, radius,
cls_adds_auc_icp[0], cls_add_auc_icp[0],
label_type, mininum_cnt
),
'wb'
)
)
def print_screen(title, aucs):
print(title)
for i in range(22):
print(aucs[i])
def fill_label_item(ipic_nid):
ipic, nid= ipic_nid[0], ipic_nid[1]
info_fd = 'train_log/eval_result/004_sugar_box/torch_res/our_msk_info/'
info_ptn = info_fd + '{}.pkl'
data = pkl.loads(nf.get(nid))
obj_info_lst = data['obj_info_lst']
dpt = data['depth'].astype(np.float32).copy()
cam_scale = data['meta']['factor_depth'].astype(np.float32)[0][0]
K = obj_info_lst[0]['K']
all_cld, all_choose = my_utils.dpt_2_cld(dpt, cam_scale, K)
data_pred = pkl.load(open(info_ptn.format(ipic), 'rb'))
if 'p3ds' in data_pred.keys():
key_pcld = 'p3ds'
else:
key_pcld = 'pcld'
sample_cld = data_pred[key_pcld]
if 'pred_label' in data_pred.keys():
key_lb = 'pred_label'
else:
key_lb = 'labels'
pred_labels = data_pred[key_lb]
if type(sample_cld) != np.ndarray:
sample_cld = sample_cld.cpu().numpy()
pred_labels = pred_labels.cpu().numpy()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(sample_cld)
distances, indices = neigh.kneighbors(all_cld, return_distance=True)
all_labels = pred_labels[indices]
all_msk = np.zeros((480, 640), dtype="uint8")
all_msk = all_msk.reshape(-1)
all_msk[all_choose] = all_labels[:, 0]
all_msk = all_msk.reshape((480, 640))
cv2.imwrite(info_fd+'{}_fillpredmsk.png'.format(ipic), all_msk)
# cv2.imshow("pred_msk", all_msk * (255 // 22))
# cmd = cv2.waitKey(0)
# if cmd == ord('q'):
# exit()
def fill_label():
idx = 0
with concurrent.futures.ProcessPoolExecutor(
max_workers=max_workers
# max_workers=1
) as executor:
exc_map = executor.map(
fill_label_item, tqdm(enumerate(tst_nid_lst))
)
for data in exc_map:
idx += 1
print(idx)
def main():
cal_pose_icp()
# fill_label()
if __name__ == "__main__":
main()
# vim: ts=4 sw=4 sts=4 expandtab
| 1.703125
| 2
|
tests/load_balancer.py
|
NaHCO314/api-client
| 38
|
12778479
|
<reponame>NaHCO314/api-client<filename>tests/load_balancer.py
#!/usr/bin/env python3
import argparse
import pathlib
def main():
parser = argparse.ArgumentParser(description='Categorize and list test files in the tests/ directory')
parser.add_argument('keyword', choices=('stable', 'unstable'))
args = parser.parse_args()
tests = pathlib.Path('tests')
unstable = [
tests / 'get_problem_codeforces.py',
tests / 'get_contest_codeforces.py',
tests / 'get_problem_poj.py',
tests / 'get_problem_topcoder.py',
tests / 'service_codeforces.py',
tests / 'service_codechef.py',
]
if args.keyword == 'unstable':
files = unstable
elif args.keyword == 'stable':
files = [file for file in tests.glob('*.py') if file not in unstable]
else:
assert False
print('::set-output name=files::', *map(str, files))
if __name__ == '__main__':
main()
| 2.65625
| 3
|
tests/test_api_request.py
|
wkgreen/python-idex
| 0
|
12778480
|
<reponame>wkgreen/python-idex
#!/usr/bin/env python
# coding=utf-8
from idex.client import Client
from idex.exceptions import IdexAPIException, IdexRequestException
import pytest
import requests_mock
client = Client()
def test_invalid_json():
"""Test Invalid response Exception"""
with pytest.raises(IdexRequestException):
with requests_mock.mock() as m:
m.post('https://api.idex.market/returnTicker', text='<head></html>')
client.get_tickers()
def test_api_exception():
"""Test API response Exception"""
with pytest.raises(IdexAPIException):
with requests_mock.mock() as m:
json_obj = {
"error": "Signature verification failed"
}
m.post('https://api.idex.market/returnOrderBook', json=json_obj, status_code=200)
client.get_order_books()
| 2.53125
| 3
|
ooobuild/lo/awt/x_toolkit.py
|
Amourspirit/ooo_uno_tmpl
| 0
|
12778481
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .rectangle import Rectangle as Rectangle_84b109e9
from .window_descriptor import WindowDescriptor as WindowDescriptor_d61e0ceb
from .x_device import XDevice as XDevice_70ba08fc
from .x_region import XRegion as XRegion_70f30910
from .x_window_peer import XWindowPeer as XWindowPeer_99760ab0
class XToolkit(XInterface_8f010a43):
"""
specifies a factory interface for the window toolkit.
This is similar to the abstract window toolkit (AWT) in Java.
See Also:
`API XToolkit <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1awt_1_1XToolkit.html>`_
"""
__ooo_ns__: str = 'com.sun.star.awt'
__ooo_full_ns__: str = 'com.sun.star.awt.XToolkit'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.awt.XToolkit'
@abstractmethod
def createRegion(self) -> 'XRegion_70f30910':
"""
creates a region.
"""
@abstractmethod
def createScreenCompatibleDevice(self, Width: int, Height: int) -> 'XDevice_70ba08fc':
"""
creates a virtual device that is compatible with the screen.
"""
@abstractmethod
def createWindow(self, Descriptor: 'WindowDescriptor_d61e0ceb') -> 'XWindowPeer_99760ab0':
"""
creates a new window using the given descriptor.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def createWindows(self, Descriptors: 'typing.Tuple[WindowDescriptor_d61e0ceb, ...]') -> 'typing.Tuple[XWindowPeer_99760ab0, ...]':
"""
returns a sequence of windows which are newly created using the given descriptors.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getDesktopWindow(self) -> 'XWindowPeer_99760ab0':
"""
returns the desktop window.
"""
@abstractmethod
def getWorkArea(self) -> 'Rectangle_84b109e9':
"""
For LibreOffice versions < 4.1, this method just returned an empty rectangle.
After that, it started returning a valid value.
"""
__all__ = ['XToolkit']
| 1.71875
| 2
|
crispy_forms/tests/urls.py
|
iedparis8/crispy-forms
| 3
|
12778482
|
import django
if django.get_version() >= '1.5':
from django.conf.urls import patterns, url
else:
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^simple/action/$', 'simpleAction', name = 'simpleAction'),
)
| 1.953125
| 2
|
resc/resc.py
|
hboshnak/pipenv-template
| 1
|
12778483
|
<reponame>hboshnak/pipenv-template
"""
Some math functions.
"""
def add(a_num, b_num):
"""Adds two numbers."""
return a_num + b_num
def sub(a_num, b_num):
"""Subtract two numbers."""
return a_num - b_num
| 2.15625
| 2
|
scripts/emit_step08_postprocess_linearity.py
|
emit-sds/emit-sds-l1b
| 0
|
12778484
|
# <NAME>
import numpy as np
import pylab as plt
from spectral.io import envi
import os, sys
sys.path.append('../utils')
from fpa import FPA
I = envi.open('../data/EMIT_LinearityMap_20220117.hdr').load()
thresh = 20
fpa = FPA('../config/tvac2_config.json')
for band in range(I.shape[2]):
x = np.squeeze(I[:,:,band])
# Remove anomalously high or low values
for row in range(1,x.shape[0]):
for col in range(x.shape[1]):
if abs(x[row,col])>thresh:
x[row,col] = x[row-1,col]
# Copy and paste linearity columns over the first aquisition zone,
# which is anomalous
for col in range(24,44):
x[:,col] = x[:,44]
# Copy and paste linearity columns over the goober zone,
# which is anomalous
for col in range(1020,1027):
x[:,col] = x[:,1027]
# Copy and paste linearity rows over the OSF filter,
# which is anomalous
for lo, hi in fpa.osf_seam_positions:
for row in range(lo, hi+1):
x[row,:] = x[lo-1,:]
I[:,:,band] = x.reshape((x.shape[0],x.shape[1],1))
envi.save_image('../data/EMIT_LinearityMap_20220117.hdr',I,ext='',force=True)
| 2.171875
| 2
|
tools/main.py
|
reduf/Headquarter
| 1
|
12778485
|
from process import *
import sys
import time
import signal
def main(argv):
# proc = Process.from_name('Gw.exe')
proc = Process(3356)
dbg = ProcessDebugger(proc)
scanner = ProcessScanner(proc)
def signal_handler(signal, frame):
dbg.detach()
signal.signal(signal.SIGINT, signal_handler)
@Hook.fastcall(LPVOID, DWORD, LPVOID)
def OnSendPacket(ctx, size, packet):
header = proc.read(packet, 'I')[0]
print('Packet {%-3d, %d, 0x%X}' % (size, header, header))
@Hook.fastcall(LPVOID)
def OnRecvPacket(packet):
header = proc.read(packet, 'I')[0]
print(header)
@Hook.fastcall(LPVOID, DWORD)
def OnWriteChatLog(msg, channel):
print(f'New message in channel {channel}')
addr = scanner.find(b'\x55\x8B\xEC\x83\xEC\x2C\x53\x56\x57\x8B\xF9\x85')
dbg.add_hook(addr, OnSendPacket)
# dbg.add_hook(0x007DE540, OnWriteChatLog)
"""
addr = scanner.find(b'\x50\x52\x8B\x55\x0C\xC7\x45\xF8', -0x23)
addr = proc.read(addr)[0] # 0xA2B294
addr = proc.read(addr)[0] # gs = *(GameServer **)0xA2B294, 0xa2b294
addr = proc.read(addr + 8)[0] # gs->consts
gs_srv_codecs, _, gs_srv_codecs_count = proc.read(addr + 44, 'III')
addr_gs_srv = range(gs_srv_codecs, gs_srv_codecs + (12 * gs_srv_codecs_count), 12) # GAME_SERVER
for id, addr in enumerate(addr_gs_srv):
fields_addr, count, handler = proc.read(addr, 'III')
if not handler:
continue
if id in ignored_stoc:
continue
# dbg.add_hook(handler, OnRecvPacket)
"""
print(f'Start debugging process {proc.name}, {proc.id}')
return dbg.run(frequency=250)
if __name__ == '__main__':
error = main(sys.argv[1:])
sys.exit(error)
| 2.265625
| 2
|
tests/__init__.py
|
shizhao/pywikibot-core
| 0
|
12778486
|
<filename>tests/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# (C) Pywikipedia bot team, 2007
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 0487fc48520b7ec1775d23b30a33c986a52eba49 $'
import os
import pywikibot.data.api
from pywikibot.data.api import Request as _original_Request
from pywikibot.data.api import CachedRequest
class TestRequest(CachedRequest):
def __init__(self, *args, **kwargs):
super(TestRequest, self).__init__(0, *args, **kwargs)
def _get_cache_dir(self):
path = os.path.join(os.path.split(__file__)[0], 'apicache')
self._make_dir(path)
return path
def _expired(self, dt):
return False
def submit(self):
cached_available = self._load_cache()
if not cached_available:
print str(self)
return super(TestRequest, self).submit()
def patch_request():
pywikibot.data.api.Request = TestRequest
def unpatch_request():
pywikibot.data.api.Request = _original_Request
| 2.421875
| 2
|
gpio_device_tools/config.py
|
alexeiken/gpio-device-tools
| 0
|
12778487
|
import ConfigParser
import os
import re
# date format
dt_format = "%Y-%m-%dT%H:%M:%S"
# boolean states
_boolean_states = {'1': True, 'yes': True, 'y': True, 'true': True, 't': True, 'on': True, 'high': True,
'0': False, 'no': False, 'n': False, 'false': False, 'f': False, 'off': False, 'low': False}
def is_boolean_value(v):
return v.lower() in _boolean_states
def get_boolean_value(v):
if is_boolean_value(v):
return _boolean_states[v.lower()]
else:
raise ValueError, 'Not a boolean value: {}'.format(str)
# Output format constants
TEXT_FORMAT = 'txt'
CSV_FORMAT = 'csv'
XML_FORMAT = 'xml'
JSON_FORMAT = 'json'
MQTT_FORMAT = 'mqtt'
_output_formats = {TEXT_FORMAT, CSV_FORMAT, XML_FORMAT, JSON_FORMAT, MQTT_FORMAT}
# Channel constants
CONSOLE_CHANNEL = 1
FILE_CHANNEL = 2
CSV_FILE_CHANNEL = 3
MQTT_CHANNEL = 4
tpl_file_path_name = "templates/{0}/template.{1}"
# 1-wire DS1820 defaults
default_w1_dir = '/sys/bus/w1/devices/'
default_w1_file = 'w1_slave'
chip_w1_dir = '/sys/bus/w1/devices'
chip_w1_file = 'eeprom'
def detect_chip():
""" Detect the CHIP computer from Next Thing Co, this could also be used to other
Allwinner Based SBCs
"""
# Open cpuinfo
with open('/proc/cpuinfo','r') as infile:
cpuinfo = infile.read()
# Match a line like 'Hardware : Allwinner sun4i/sun5i Families'
match = re.search('^Hardware\s+:.*$', cpuinfo,
flags=re.MULTILINE | re.IGNORECASE)
if not match:
return False
if "sun4i/sun5i" in match.group(0):
return True
else:
return False
chip_platform = detect_chip()
sensor_defaults = {
'label': 'Label',
'id': 'ID'
}
# MqttChannel defaults
mqtt_defaults = {
'host': 'localhost',
'port': '1883',
'client_id': 'gpio-device-tools',
'qos': '0',
'retain': 'True',
'keepalive': '60'
}
mqtt_bindings_defaults = {
'inverted': '0'
}
mqtt_sensor_topic_format = 'sensor/{}/{}/{}'
mqtt_actuator_topic_format = 'actuator/{}/{}/{}'
class ConfigurationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ConfigItem():
pass
class SensorConfigReader():
def __init__(self, cfg_file=None, mqtt_cfg_file=None, verbose=False):
if cfg_file is None:
raise ConfigurationError('Config file not defined')
if os.path.isabs(cfg_file):
abs_file= cfg_file
else:
abs_file = os.path.abspath(cfg_file)
conf = ConfigParser.SafeConfigParser(
defaults=sensor_defaults,
allow_no_value=True)
if abs_file is not None and os.path.isfile(abs_file):
with open(abs_file) as f:
conf.readfp(f)
else:
raise ConfigurationError('Config file not found: {} '.format(
abs_file))
self.sensors = []
try:
if conf.has_option('default', 'output'):
output = conf.get('default', 'output')
else:
raise ConfigurationError('Value \'output\' not defined in section [default]')
if output.lower() not in _output_formats:
raise ConfigurationError('Unsupported value \'{}\' for option \'output\' in section [default]'
.format(output))
if conf.has_option('default', 'file'):
out_file = conf.get('default', 'file')
else:
out_file = None
if conf.has_option('default', 'hostname'):
hostname = conf.get('default', 'hostname')
else:
hostname = None
if conf.has_option('default', 'verbose'):
verbose = conf.getboolean('default', 'verbose')
if output == CSV_FORMAT and out_file is None:
raise ConfigurationError('Value \'file\' needs to be defined for CSV output')
elif output == MQTT_FORMAT and hostname is None:
raise ConfigurationError('Value \'hostname\' needs to be defined for MQTT output')
if output == CSV_FORMAT:
out_channel = CSV_FILE_CHANNEL
elif output == MQTT_FORMAT:
out_channel = MQTT_CHANNEL
elif out_file is not None:
out_channel = FILE_CHANNEL
else:
out_channel = CONSOLE_CHANNEL
if out_channel == MQTT_CHANNEL and mqtt_cfg_file is None:
mqtt_cfg_file = cfg_file
else:
mqtt_cfg_file = None
i = 1
section_prefix = 'sensor'
while conf.has_section(section_prefix + str(i)):
section = section_prefix + str(i)
if conf.has_option(section, 'type'):
type = conf.get(section, 'type')
else:
raise ConfigurationError('Value \'type\' not defined in section [{}]'.format(
section))
if conf.has_option(section, 'param'):
param = conf.get(section, 'param')
else:
param = None
if conf.has_option(section, 'id'):
id = conf.get(section, 'id')
if conf.has_option(section, 'label'):
label = conf.get(section, 'label')
if conf.has_option(section, 'topic'):
topic = conf.get(section, 'topic')
else:
topic = None
sensor = ConfigItem()
sensor.out_channel = out_channel
sensor.out_file = out_file
sensor.mqtt_cfg_file = mqtt_cfg_file
sensor.hostname = hostname
sensor.sensor_type = type
sensor.sensor_param = param
sensor.id = id
sensor.label = label
sensor.topic = topic
sensor.verbose = verbose
self.sensors.append(sensor)
i += 1
except ConfigParser.NoOptionError:
pass
class MqttServerConfigReader():
def __init__(self, cfg_file=None):
conf = ConfigParser.SafeConfigParser(
defaults=mqtt_defaults,
allow_no_value=True)
abs_file = None
if os.path.isabs(cfg_file):
abs_file= cfg_file
else:
abs_file = os.path.abspath(cfg_file)
if abs_file is not None and os.path.isfile(abs_file):
with open(abs_file) as f:
conf.readfp(f)
section = 'mqtt'
if not conf.has_section('mqtt'):
section = 'DEFAULT'
if conf.has_option(section, 'host'):
self.mqtt_host = conf.get(section, 'host')
else:
raise ConfigurationError('Value \'host\' is not defined')
if conf.has_option(section, 'port'):
self.port = conf.get(section, 'port')
else:
raise ConfigurationError('Value \'port\' is not defined')
if conf.has_option(section, 'client_id'):
self.client_id = conf.get(section, 'client_id')
else:
raise ConfigurationError('Value \'client_id\' is not defined')
self.qos = conf.getint(section, 'qos')
self.retain = conf.getboolean(section, 'retain')
self.keepalive = conf.getint(section, 'keepalive')
class MqttBindingConfigReader():
def __init__(self, cfg_file=None):
if cfg_file is None:
raise ConfigurationError('Config file not defined')
if os.path.isabs(cfg_file):
abs_file= cfg_file
else:
abs_file = os.path.abspath(cfg_file)
conf = ConfigParser.SafeConfigParser(
defaults=mqtt_bindings_defaults,
allow_no_value=True)
if abs_file is not None and os.path.isfile(abs_file):
with open(abs_file) as f:
conf.readfp(f)
else:
raise ConfigurationError('Config file not found: {}'.format(
abs_file))
self.bindings = []
try:
i = 1
section_prefix = 'binding'
while conf.has_section(section_prefix + str(i)):
section = section_prefix + str(i)
if conf.has_option(section, 'topic'):
topic = conf.get(section, 'topic')
else:
raise ConfigurationError('Value \'topic\' not defined in section [{}]'.format(
section))
if conf.has_option(section, 'pin'):
pin = conf.get(section, 'pin')
else:
raise ConfigurationError('Value \'pin\' not defined in section [{}]'.format(
section))
if conf.has_option(section, 'invert'):
inverted = conf.getboolean(section, 'invert')
else:
inverted = False
self.bindings.append(
create_binding_cfg(topic, pin, inverted)
)
i += 1
except ConfigParser.NoOptionError:
pass
def create_binding_cfg(topic, pin, invert):
binding = ConfigItem()
binding.topic = topic
binding.pin = pin
binding.invert = invert
return binding
| 2.515625
| 3
|
subfinder/subfinder_gevent.py
|
ttimasdf/subfinder
| 3
|
12778488
|
# -*- coding: utf8 -*-
""" SunFinder 的协程版本
"""
from __future__ import unicode_literals
from gevent.pool import Pool
from .subfinder import SubFinder
class SubFinderGevent(SubFinder):
""" SubFinder Thread version
"""
def _init_pool(self):
self.pool = Pool(10)
| 1.710938
| 2
|
bot/cogs/birthday.py
|
janaSunrise/HotWired-Bot
| 16
|
12778489
|
<reponame>janaSunrise/HotWired-Bot
import asyncio
import datetime
import itertools
import contextlib
import json
from discord import Color, Embed, Channel, Forbidden, HTTPException, Role, utils
from discord.ext.commands import Cog, Context, group, has_permissions
from bot.core import Bot
BDAY_JSON_PATH = "assets/bday.json"
JSON_CONFIG = {
"roles": {},
"channels": {},
"birthdays": {},
"yesterday": []
}
ROLE_SET = ":white_check_mark: The birthday role on **{s}** has been set to: **{r}**."
BDAY_INVALID = ":x: The birthday date you entered is invalid. It must be `MM-DD`."
BDAY_SET = ":white_check_mark: Your birthday has been set to: **{}**."
CHANNEL_SET = ":white_check_mark: The channel for announcing birthdays on **{s}** has been set to: **{c}**."
BDAY_REMOVED = ":put_litter_in_its_place: Your birthday has been removed."
class Birthday(Cog):
"""Announces birthdays and gives them a special role for the whole day."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.load_data()
self.bday_loop = asyncio.ensure_future(self.initialise())
async def initialise(self) -> None:
await self.bot.wait_until_ready()
with contextlib.suppress(RuntimeError):
while self == self.bot.get_cog(self.__class__.__name__):
now = datetime.datetime.utcnow()
tomorrow = (now + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
await asyncio.sleep((tomorrow - now).total_seconds())
self.clean_yesterday_bdays()
self.do_today_bdays()
self.save_data()
def __unload(self) -> None:
self.bday_loop.cancel()
@group(pass_context=True)
async def bday(self, ctx: Context) -> None:
"""birthday setup."""
pass
@bday.command(no_pm=True)
@has_permissions(manage_roles=True)
async def channel(self, ctx: Context, channel: Channel) -> None:
"""Sets the birthday announcement channel for this server."""
self.config["channels"][ctx.guild.id] = channel.id
self.save_data()
await ctx.send(CHANNEL_SET.format(s=ctx.guild.name, c=channel.name))
@bday.command()
@has_permissions(manage_roles=True)
async def role(self, ctx: Context, role: Role) -> None:
"""Sets the birthday role for this server."""
self.config["roles"][ctx.guild.id] = role.id
self.save_data()
await ctx.send(ROLE_SET.format(s=ctx.guild.name, r=role.name))
@bday.command(aliases=["del", "clear"])
async def remove(self, ctx: Context) -> None:
"""Unsets your birthday date."""
self.remove_user_bday(ctx.author.id)
self.save_data()
await ctx.send(self.BDAY_REMOVED)
@bday.command()
async def set(self, ctx: Context, date: str, year: int = None) -> None:
"""
Sets your birthday date
The given date must be given as: MM-DD
Year is optional. If ungiven, the age won't be displayed.
"""
birthday = self.parse_date(date)
if birthday is None:
await ctx.send(BDAY_INVALID)
else:
self.remove_user_bday(ctx.author.id)
self.config["birthdays"].setdefault(str(birthday.toordinal()), {})[ctx.author.id] = year
self.save_data()
bday_month_str = birthday.strftime("%B")
bday_day_str = birthday.strftime("%d").lstrip("0")
await self.bot.send_message(BDAY_SET.format(f"{bday_month_str} {bday_day_str}"))
@bday.command()
async def list(self, ctx: Context) -> None:
"""
Lists all the birthdays
If a user has their year set, it will display the age they'll get after their birthday this year.
"""
self.clean_bdays()
self.save_data()
bdays = self.config["birthdays"]
this_year = datetime.date.today().year
embed = Embed(title="Birthday List", color=Color.blue())
for k, g in itertools.groupby(
sorted(datetime.datetime.fromordinal(int(o)) for o in bdays.keys()),
lambda i: i.month):
value = "\n".join(date.strftime("%d").lstrip("0") + ": " + ", ".join(f"<@!{u_id}>" + (""
if year is None else f" ({this_year - int(year)})")
for u_id, year in bdays.get(str(date.toordinal()), {}).items())
for date in g if len(bdays.get(str(date.toordinal()))) > 0)
if not value.isspace():
embed.add_field(name=datetime.datetime(year=1, month=k, day=1).strftime("%B"), value=value)
await ctx.send(embed=embed)
async def clean_bday(self, user_id: int) -> None:
for server_id, role_id in self.config["roles"].items():
server = self.bot.get_server(server_id)
if server is not None:
role = utils.find(lambda role: role.id == role_id, server.roles)
member = server.get_member(user_id)
if member is not None and role is not None and role in member.roles:
await self.bot.remove_roles(member, role)
async def handle_bday(self, user_id: int, year: int) -> None:
embed = Embed(color=Color.gold())
if year is not None:
age = datetime.date.today().year - int(year)
embed.description = f"<@!{user_id}> is now **{age} years old**. :tada: "
else:
embed.description = f"It's <@!{user_id}>'s birthday today! :tada: "
for server_id, channel_id in self.config["channels"].items():
server = self.bot.get_server(server_id)
if server is not None:
member = server.get_member(user_id)
if member is not None:
role_id = self.config["roles"].get(server_id)
if role_id is not None:
role = utils.find(lambda r: r.id == role_id, server.roles)
if role is not None:
try:
await self.bot.add_roles(member, role)
except (Forbidden, HTTPException):
pass
else:
self.config["yesterday"].append(member.id)
channel = server.get_channel(channel_id)
if channel is not None:
await channel.send(embed=embed)
def clean_bdays(self) -> None:
"""
Cleans the birthday entries with no user's birthday
Also removes birthdays of users who aren't in any visible server anymore
Happens when someone changes their birthday and there's nobody else in the same day.
"""
birthdays = self.config["birthdays"]
for date, bdays in birthdays.copy().items():
for user_id, _year in bdays.copy().items():
if not any(s.get_member(user_id) is not None for s in self.bot.servers):
del birthdays[date][user_id]
if len(bdays) == 0:
del birthdays[date]
def remove_user_bday(self, user_id: int) -> None:
for date, user_ids in self.config["birthdays"].items():
if user_id in user_ids:
del self.config["birthdays"][date][user_id]
def clean_yesterday_bdays(self) -> None:
for user_id in self.config["yesterday"]:
asyncio.ensure_future(self.clean_bday(user_id))
self.config["yesterday"].clear()
def do_today_bdays(self) -> None:
this_date = datetime.datetime.utcnow().date().replace(year=1)
for user_id, year in self.config["birthdays"].get(str(this_date.toordinal()), {}).items():
asyncio.ensure_future(self.handle_bday(user_id, year))
def parse_date(self, date_str: str) -> datetime.datetime:
result = None
try:
result = datetime.datetime.strptime(date_str, "%m-%d").date().replace(year=1)
except ValueError:
pass
return result
def load_data(self) -> None:
with open(BDAY_JSON_PATH, "r") as file:
self.config = json.load(file)
def save_data(self) -> None:
with open(BDAY_JSON_PATH, "w") as file:
json.dump(self.config, file, indent=4)
def setup(bot: Bot) -> None:
bot.add_cog(Birthday(bot))
| 2.59375
| 3
|
Controls/env/Lib/site-packages/PySide/examples/hyperui/hyperuilib/pageview.py
|
LoicBoileau/Projet-S4---Robot-Delta
| 32
|
12778490
|
"""
/*
* This file is part of PySide: Python for Qt
*
* Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: PySide team <<EMAIL>>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
"""
from PySide.QtCore import *
from PySide.QtGui import *
from hyperuilib.shared.dataresource import *
from hyperuilib.view import *
from hyperuilib.shared.button import *
from hyperuilib.pagemenu import *
class PageSlot(QGraphicsWidget):
def __init__(self, parent=None):
QGraphicsWidget.__init__(self, parent)
self._contents = None
self.setFlags(QGraphicsItem.ItemHasNoContents)
def contents(self):
return self._contents
def setContents(self, contents):
if self._contents and self._contents.parentItem() == self:
self._contents.setParentItem(None)
self._contents = contents
if contents:
contents.setParentItem(self)
contents.setGeometry(0, 0, self.size().width(), self.size().height())
def resizeEvent(self, event):
QGraphicsWidget.resizeEvent(self, event)
if self._contents:
self._contents.resize(event.newSize())
class PageView(QGraphicsWidget):
def __init__(self, parent=None):
QGraphicsWidget.__init__(self, parent)
self._views = []
self._keepAlive = {}
self._isBack = False
self._isAnimating = False
self._topOffset = Resource.intValue("page-view/margin-top")
self.setFlag(QGraphicsItem.ItemHasNoContents)
layout = QGraphicsLinearLayout(Qt.Vertical)
layout.setContentsMargins(20 * 0.75, 40, 20 * 0.75, 0)
topLayout = QGraphicsLinearLayout(Qt.Horizontal)
topLayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
layout.addItem(topLayout)
layout.addStretch(1)
self._menu = PageMenu()
self._backButton = Button(Resource.pixmap("top_bt_back.png"),
QPixmap(),
Resource.pixmap("top_bt_back_disabled.png"))
self._optionsButton = Button(Resource.pixmap("top_bt_options.png"),
QPixmap(),
Resource.pixmap("top_bt_options_disabled.png"))
self.connect(self._backButton, SIGNAL("clicked()"), SLOT("backClicked()"))
self.connect(self._optionsButton, SIGNAL("clicked()"), SLOT("optionsClicked()"))
topLayout.addItem(self._optionsButton)
topLayout.addStretch(1)
topLayout.addItem(self._menu)
topLayout.addStretch(1)
topLayout.addItem(self._backButton)
self._optionsButton.setEnabled(False)
self._oldSlot = PageSlot(self)
self._newSlot = PageSlot(self)
self._oldSlot.setPos(0, self._topOffset)
self._newSlot.setPos(0, self._topOffset)
def add(self, view, keepAlive=False):
if not view or self.isAnimating():
return False
view.setPageView(self)
self._keepAlive[view] = keepAlive
if len(self._views) == 0:
self._views.append(view)
self._menu.setText(view.title())
self._oldSlot.setContents(view)
else:
self.animateTransition(self._views[-1], view, False)
return True
def back(self):
if len(self._views) < 2 or self.isAnimating():
return False
oldView = self._views.pop()
newView = self._views[-1]
self.animateTransition(oldView, newView, True)
return True
def isAnimating(self):
return self._isAnimating
def backClicked(self):
if self.isAnimating():
return
if len(self._views) < 2:
QApplication.quit()
else:
self.back()
def optionsClicked(self):
pass
def transitionFinished(self):
newView = self._newSlot.contents()
oldView = self._oldSlot.contents()
self.disconnect(newView, SIGNAL("transitionInFinished()"),
self.transitionFinished)
self.disconnect(oldView, SIGNAL("transitionOutFinished()"),
newView.doTransitionOut)
if self._isBack:
self._oldSlot.setContents(0)
keepAlive = self._keepAlive[oldView]
del self._keepAlive[oldView]
if not keepAlive:
oldView = None
else:
oldView.hide()
self._views.append(newView)
self._isAnimating = False
self._menu.setText(newView.title())
def animateTransition(self, oldView, newView, isBack):
self._isAnimating = True
self._isBack = isBack
self._oldSlot.setContents(oldView)
self._newSlot.setContents(newView)
newView.show()
self.connect(newView, SIGNAL("transitionInFinished()"),
self.transitionFinished)
self.connect(oldView, SIGNAL("transitionOutFinished()"),
newView.doTransitionIn)
oldView.doTransitionOut()
def resizeEvent(self, event):
QGraphicsWidget.resizeEvent(self, event)
newSize = event.newSize()
newSize.setHeight(newSize.height() - self._topOffset)
self._oldSlot.resize(QSizeF(newSize))
self._newSlot.resize(QSizeF(newSize))
| 1.84375
| 2
|
leetcode/ag_213.py
|
baobei813214232/common-alglib
| 4
|
12778491
|
<filename>leetcode/ag_213.py
import copy
class Solution(object):
def roobbb(self, nums):
dp = []
for i in range(0,len(nums)):
dp.append(0)
if len(nums) == 0: return 0
if len(nums) == 1: return nums[0]
if len(nums) == 2: return max(nums[0],nums[1])
last = len(nums)-1
dp[0] = nums[0]
dp[1] = max(nums[0] ,nums[1])
for i in range(2 , last+1):
dp[i] = max(dp[i-2]+nums[i] , dp[i-1])
return dp[last]
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
aa = copy.deepcopy(nums)
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
del nums[0]
aa.pop()
a1 = self.roobbb(nums)
a2 = self.roobbb(aa)
return max(a1, a2)
def run():
nums = [2,1,1,2]
ns = Solution()
print ns.rob(nums)
| 3.140625
| 3
|
enote.py
|
mperbil/Pretvornik-enot
| 0
|
12778492
|
import bottle
import model
result = ''
kolicina2 = 0
st = 0
vhodna = ''
izhodna = ''
@bottle.get('/')
def index():
return bottle.template('index.tpl', result=result)
@bottle.get('/pretvori/')
def pretvori():
kolicina = kolicina2
global st
st = float(bottle.request.query['st'])
global vhodna
vhodna = bottle.request.query['enota1']
global izhodna
izhodna = bottle.request.query['enota2']
result = model.pretvarjanje(kolicina, st, vhodna, izhodna)
return bottle.template('rezultat.tpl', result = result, st=st, vhodna=vhodna, izhodna=izhodna)
@bottle.get('/na_dolzino/')
def na_dolzino():
global kolicina2
kolicina2 = 1
return bottle.template('dolzina.tpl', result=result)
@bottle.get('/na_prostornino/')
def na_prostornino():
global kolicina2
kolicina2 = 2
return bottle.template('prostornina.tpl', result=result)
@bottle.get('/na_tezo/')
def na_tezo():
global kolicina2
kolicina2 = 3
return bottle.template('teza.tpl', result=result)
@bottle.get('/na_cas/')
def na_cas():
global kolicina2
kolicina2 = 4
return bottle.template('cas.tpl', result=result)
@bottle.get('/nazaj/')
def nazaj():
return bottle.template('index.tpl')
bottle.run(reloader=True, debug=True)
| 2.109375
| 2
|
python/serpent.py
|
niccokunzmann/ping
| 1
|
12778493
|
<reponame>niccokunzmann/ping
"""
ast.literal_eval() compatible object tree serialization.
Serpent serializes an object tree into bytes (utf-8 encoded string) that can
be decoded and then passed as-is to ast.literal_eval() to rebuild it as the
original object tree. As such it is safe to send serpent data to other
machines over the network for instance (because only 'safe' literals are
encoded).
Compatible with Python 2.6+ (including 3.x), IronPython 2.7+, Jython 2.7+.
Serpent handles several special Python types to make life easier:
- str --> promoted to unicode (see below why this is)
- bytes, bytearrays, memoryview, buffer --> string, base-64
(you'll have to manually un-base64 them though)
- uuid.UUID, datetime.{datetime, time, timespan} --> appropriate string/number
- decimal.Decimal --> string (to not lose precision)
- array.array typecode 'c'/'u' --> string/unicode
- array.array other typecode --> list
- Exception --> dict with some fields of the exception (message, args)
- all other types --> dict with __getstate__ or vars() of the object
Note: all str will be promoted to unicode. This is done because it is the
default anyway for Python 3.x, and it solves the problem of the str/unicode
difference between different Python versions. Also it means the serialized
output doesn't have those problematic 'u' prefixes on strings.
Note: the serializer is not thread-safe. Make sure you're not making changes
to the object tree that is being serialized, and don't use the same
serializer in different threads.
Caveat: Python 2.6 cannot deserialize complex numbers (limitation of
ast.literal_eval in 2.6)
Note: because the serialized format is just valid Python source code, it can
contain comments.
Note: set literals are not supported on python <3.2 (ast.literal_eval
limitation). If you need Python < 3.2 compatibility, you'll have to use
set_literals=False when serializing.
Copyright 2013, <NAME> (irm<EMAIL>)
Software license: "MIT software license". See http://opensource.org/licenses/MIT
"""
from __future__ import print_function, division
import __future__
import ast
import base64
import sys
import types
import os
import gc
__version__ = "1.5"
__all__ = ["dump", "dumps", "load", "loads", "register_class", "unregister_class"]
def dumps(obj, indent=False, set_literals=True, module_in_classname=False):
"""Serialize object tree to bytes"""
return Serializer(indent, set_literals, module_in_classname).serialize(obj)
def dump(obj, file, indent=False, set_literals=True, module_in_classname=False):
"""Serialize object tree to a file"""
file.write(dumps(obj, indent=indent, set_literals=set_literals, module_in_classname=module_in_classname))
def loads(serialized_bytes):
"""Deserialize bytes back to object tree. Uses ast.literal_eval (safe)."""
serialized = serialized_bytes.decode("utf-8")
if sys.version_info < (3, 0) and sys.platform != "cli":
if os.name == "java":
# Because of a bug in Jython we have to manually convert all Str nodes to unicode. See http://bugs.jython.org/issue2008
serialized = ast.parse(serialized, "<serpent>", mode="eval")
for node in ast.walk(serialized):
if isinstance(node, ast.Str) and type(node.s) is str:
node.s = node.s.decode("utf-8")
else:
# python 2.x: parse with unicode_literals (promotes all strings to unicode)
serialized = compile(serialized, "<serpent>", mode="eval", flags=ast.PyCF_ONLY_AST | __future__.unicode_literals.compiler_flag)
try:
if os.name != "java" and sys.platform != "cli":
gc.disable()
return ast.literal_eval(serialized)
finally:
gc.enable()
def load(file):
"""Deserialize bytes from a file back to object tree. Uses ast.literal_eval (safe)."""
data = file.read()
return loads(data)
_special_classes_registry = {}
def unregister_class(clazz):
"""Unregister the specialcase serializer for the given class."""
if clazz in _special_classes_registry:
del _special_classes_registry[clazz]
def register_class(clazz, serializer):
"""
Register a specialcase serializer function for objects of the given class.
The function will be called with (object, serpent_serializer, outputstream, indentlevel) arguments.
The function must write the serialized data to outputstream. It doesn't return a value.
"""
_special_classes_registry[clazz] = serializer
class BytesWrapper(object):
"""Wrapper for bytes, bytearray etc. to make them appear as base-64 encoded data."""
def __init__(self, data):
self.data = data
def __getstate__(self):
if sys.platform == "cli":
b64 = base64.b64encode(str(self.data)) # weird IronPython bug?
elif (os.name == "java" or sys.version_info < (2, 7)) and type(self.data) is bytearray:
b64 = base64.b64encode(bytes(self.data)) # Jython bug http://bugs.jython.org/issue2011
else:
b64 = base64.b64encode(self.data)
return {
"data": b64 if type(b64) is str else b64.decode("ascii"),
"encoding": "base64"
}
@staticmethod
def from_bytes(data):
return BytesWrapper(data)
@staticmethod
def from_bytearray(data):
return BytesWrapper(data)
@staticmethod
def from_memoryview(data):
return BytesWrapper(data.tobytes())
@staticmethod
def from_buffer(data):
return BytesWrapper(data)
if sys.version_info < (3, 0):
_repr = repr # python <3.0 won't need explicit encoding to utf-8, so we optimize this
else:
def _repr(obj):
return repr(obj).encode("utf-8")
class Serializer(object):
"""
Serialize an object tree to a byte stream.
It is not thread-safe: make sure you're not making changes to the
object tree that is being serialized.
"""
# noinspection PySetFunctionToLiteral
repr_types = set([
str,
int,
float,
complex,
bool,
type(None)
])
translate_types = {
bytes: BytesWrapper.from_bytes,
bytearray: BytesWrapper.from_bytearray
}
# do some dynamic changes to the types configuration if needed
if bytes is str:
del translate_types[bytes]
if hasattr(types, "BufferType"):
translate_types[types.BufferType] = BytesWrapper.from_buffer
try:
translate_types[memoryview] = BytesWrapper.from_memoryview
except NameError:
pass
if sys.platform == "cli":
repr_types.remove(str) # IronPython needs special str treatment
if sys.version_info < (2, 7):
repr_types.remove(float) # repr(float) prints floating point roundoffs in Python < 2.7
def __init__(self, indent=False, set_literals=True, module_in_classname=False):
"""
Initialize the serializer.
indent=indent the output over multiple lines (default=false)
setLiterals=use set-literals or not (set to False if you need compatibility with Python < 3.2)
module_in_classname = include module prefix for class names or only use the class name itself
"""
self.indent = indent
self.set_literals = set_literals
self.module_in_classname = module_in_classname
def serialize(self, obj):
"""Serialize the object tree to bytes."""
header = "# serpent utf-8 "
if self.set_literals:
header += "python3.2\n" # set-literals require python 3.2+ to deserialize (ast.literal_eval limitation)
else:
header += "python2.6\n"
out = [header.encode("utf-8")]
try:
if os.name != "java" and sys.platform != "cli":
gc.disable()
self._serialize(obj, out, 0)
finally:
gc.enable()
if sys.platform == "cli":
return "".join(out)
return b"".join(out)
def _serialize(self, obj, out, level):
t = type(obj)
if t in self.translate_types:
obj = self.translate_types[t](obj)
t = type(obj)
if t in self.repr_types:
out.append(_repr(obj)) # just a simple repr() is enough for these objects
return
# check special registered types:
for clazz in _special_classes_registry:
if isinstance(obj, clazz):
_special_classes_registry[clazz](obj, self, out, level)
return
# exception?
if isinstance(obj, BaseException):
self.ser_exception_class(obj, out, level)
else:
# serialize dispatch
module = t.__module__
if module == "__builtin__":
module = "builtins" # python 2.x compatibility
method_name = "ser_{0}_{1}".format(module, t.__name__)
getattr(self, method_name, self.ser_default_class)(obj, out, level) # dispatch
def ser_builtins_str(self, str_obj, out, level):
# special case str, for IronPython where str==unicode and repr() yields undesired result
self.ser_builtins_unicode(str_obj, out, level)
def ser_builtins_float(self, float_obj, out, level):
# special case float, for Python < 2.7, to not print the float roundoff errors
out.append(str(float_obj))
def ser_builtins_unicode(self, unicode_obj, out, level):
# for python 2.x
z = unicode_obj.encode("utf-8")
z = z.replace("\\", "\\\\") # double-escape the backslashes
z = z.replace("\a", "\\a")
z = z.replace("\b", "\\b")
z = z.replace("\f", "\\f")
z = z.replace("\n", "\\n")
z = z.replace("\r", "\\r")
z = z.replace("\t", "\\t")
z = z.replace("\v", "\\v")
if "'" not in z:
z = "'" + z + "'"
elif '"' not in z:
z = '"' + z + '"'
else:
z = z.replace("'", "\\'")
z = "'" + z + "'"
out.append(z)
def ser_builtins_long(self, long_obj, out, level):
# used with python 2.x
out.append(str(long_obj))
def ser_builtins_tuple(self, tuple_obj, out, level):
if self.indent and tuple_obj:
indent_chars = b" " * level
indent_chars_inside = indent_chars + b" "
out.append(b"(\n")
for elt in tuple_obj:
out.append(indent_chars_inside)
self._serialize(elt, out, level + 1)
out.append(b",\n")
out[-1] = out[-1].rstrip() # remove the last \n
if len(tuple_obj) > 1:
del out[-1] # undo the last ,
out.append(b"\n" + indent_chars + b")")
else:
out.append(b"(")
for elt in tuple_obj:
self._serialize(elt, out, level + 1)
out.append(b",")
if len(tuple_obj) > 1:
del out[-1] # undo the last ,
out.append(b")")
def ser_builtins_list(self, list_obj, out, level):
if self.indent and list_obj:
indent_chars = b" " * level
indent_chars_inside = indent_chars + b" "
out.append(b"[\n")
for elt in list_obj:
out.append(indent_chars_inside)
self._serialize(elt, out, level + 1)
out.append(b",\n")
del out[-1] # remove the last ,\n
out.append(b"\n" + indent_chars + b"]")
else:
out.append(b"[")
for elt in list_obj:
self._serialize(elt, out, level + 1)
out.append(b",")
if list_obj:
del out[-1] # remove the last ,
out.append(b"]")
def ser_builtins_dict(self, dict_obj, out, level):
if self.indent and dict_obj:
indent_chars = b" " * level
indent_chars_inside = indent_chars + b" "
out.append(b"{\n")
dict_items = dict_obj.items()
try:
sorted_items = sorted(dict_items)
except TypeError: # can occur when elements can't be ordered (Python 3.x)
sorted_items = dict_items
for k, v in sorted_items:
out.append(indent_chars_inside)
self._serialize(k, out, level + 1)
out.append(b": ")
self._serialize(v, out, level + 1)
out.append(b",\n")
del out[-1] # remove last ,\n
out.append(b"\n" + indent_chars + b"}")
else:
out.append(b"{")
for k, v in dict_obj.items():
self._serialize(k, out, level + 1)
out.append(b":")
self._serialize(v, out, level + 1)
out.append(b",")
if dict_obj:
del out[-1] # remove the last ,
out.append(b"}")
def ser_builtins_set(self, set_obj, out, level):
if not self.set_literals:
if self.indent:
set_obj = sorted(set_obj)
self._serialize(tuple(set_obj), out, level) # use a tuple instead of a set literal
return
if self.indent and set_obj:
indent_chars = b" " * level
indent_chars_inside = indent_chars + b" "
out.append(b"{\n")
try:
sorted_elts = sorted(set_obj)
except TypeError: # can occur when elements can't be ordered (Python 3.x)
sorted_elts = set_obj
for elt in sorted_elts:
out.append(indent_chars_inside)
self._serialize(elt, out, level + 1)
out.append(b",\n")
del out[-1] # remove the last ,\n
out.append(b"\n" + indent_chars + b"}")
elif set_obj:
out.append(b"{")
for elt in set_obj:
self._serialize(elt, out, level + 1)
out.append(b",")
del out[-1] # remove the last ,
out.append(b"}")
else:
# empty set literal doesn't exist unfortunately, replace with empty tuple
self.ser_builtins_tuple((), out, level)
def ser_builtins_frozenset(self, set_obj, out, level):
self.ser_builtins_set(set_obj, out, level)
def ser_decimal_Decimal(self, decimal_obj, out, level):
# decimal is serialized as a string to avoid losing precision
self._serialize(str(decimal_obj), out, level)
def ser_datetime_datetime(self, datetime_obj, out, level):
self._serialize(datetime_obj.isoformat(), out, level)
if os.name == "java" or sys.version_info < (2, 7): # jython bug http://bugs.jython.org/issue2010
def ser_datetime_timedelta(self, timedelta_obj, out, level):
secs = ((timedelta_obj.days * 86400 + timedelta_obj.seconds) * 10 ** 6 + timedelta_obj.microseconds) / 10 ** 6
self._serialize(secs, out, level)
else:
def ser_datetime_timedelta(self, timedelta_obj, out, level):
secs = timedelta_obj.total_seconds()
self._serialize(secs, out, level)
def ser_datetime_time(self, time_obj, out, level):
self._serialize(str(time_obj), out, level)
def ser_uuid_UUID(self, uuid_obj, out, level):
self._serialize(str(uuid_obj), out, level)
def ser_exception_class(self, exc_obj, out, level):
if self.module_in_classname:
class_name = "%s.%s" % (exc_obj.__class__.__module__, exc_obj.__class__.__name__)
else:
class_name = exc_obj.__class__.__name__
value = {
"__class__": class_name,
"__exception__": True,
"args": exc_obj.args,
"attributes": vars(exc_obj) # add any custom attributes
}
self._serialize(value, out, level)
def ser_array_array(self, array_obj, out, level):
if array_obj.typecode == 'c':
self._serialize(array_obj.tostring(), out, level)
elif array_obj.typecode == 'u':
self._serialize(array_obj.tounicode(), out, level)
else:
self._serialize(array_obj.tolist(), out, level)
def ser_default_class(self, obj, out, level):
try:
value = obj.__getstate__()
if isinstance(value, dict):
self.ser_builtins_dict(value, out, level)
return
except AttributeError:
if self.module_in_classname:
class_name = "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)
else:
class_name = obj.__class__.__name__
try:
value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict
value["__class__"] = class_name
except TypeError:
if hasattr(obj, "__slots__"):
# use the __slots__ instead of the vars dict
value = {}
for slot in obj.__slots__:
value[slot] = getattr(obj, slot)
value["__class__"] = class_name
else:
raise TypeError("don't know how to serialize class " + str(obj.__class__) + ". Give it vars() or an appropriate __getstate__")
self._serialize(value, out, level)
| 2.078125
| 2
|
mbusi_surveys/surveys/build_survey.py
|
patrick310/flask-wtform-tutorial
| 0
|
12778494
|
<reponame>patrick310/flask-wtform-tutorial
import json
import os
import os.path
import uuid
from .forms import SelectForm, MultiForm, TextForm, EmailForm, PhoneForm
from dotenv import load_dotenv
load_dotenv()
# DATA_DIRECTORY = os.getenv('DATA_DIRECTORY')
DATA_DIRECTORY = os.environ.get("DATA_DIRECTORY")
SURVEY_DIRECTORY = os.path.join(DATA_DIRECTORY, "surveys/")
# Get data from file
def get_custom_questions(file):
path = os.path.join(SURVEY_DIRECTORY, file)
with open(path, "r") as f:
data = json.load(f)
return data
# Get phone questions
def get_phone_data_from_custom_questions(file):
type = "phone"
phone_data = []
custom_questions = get_custom_questions(file)
# store all phone entries from data
for field in custom_questions["fields"]:
if field["type"] == type:
phone_data.append({"key": field["key"],
"priority": field["priority"],
"required": field["required"]})
return phone_data
# Format phone questions for survey
def get_phone_entries(file):
phone_data = get_phone_data_from_custom_questions(file)
all_phone_items = []
# create phone form entries
for item in phone_data:
phone_id = uuid.uuid1()
phone_entry = PhoneForm()
phone_entry.phone.label = item["key"]
phone_entry.phone.name = item["key"]
phone_entry.phone.id = phone_id
phone_entry.priority = item["priority"]
if item["required"] == "True":
phone_entry.phone.flags.required = True
all_phone_items.append(phone_entry)
all_phone_items = sorted(all_phone_items, key=lambda k: k.priority, reverse=True)
return all_phone_items
# Get email questions
def get_email_data_from_custom_questions(file):
type = "email"
email_data = []
custom_questions = get_custom_questions(file)
# store all email entries from data
for field in custom_questions["fields"]:
if field["type"] == type:
email_data.append({"key": field["key"],
"priority": field["priority"],
"required": field["required"]})
return email_data
# Format email questions for survey
def get_email_entries(file):
email_data = get_email_data_from_custom_questions(file)
all_email_items = []
# create email form entries
for item in email_data:
email_id = uuid.uuid1()
email_entry = EmailForm()
email_entry.email.label = item["key"]
email_entry.email.name = item["key"]
email_entry.email.id = email_id
email_entry.priority = item["priority"]
if item["required"] == "True":
email_entry.email.flags.required = True
all_email_items.append(email_entry)
all_email_items = sorted(all_email_items, key=lambda k: k.priority, reverse=True)
return all_email_items
# Get text questions
def get_text_data_from_custom_questions(file):
type = "text"
text_data = []
custom_questions = get_custom_questions(file)
# store all text question entries from data
for field in custom_questions["fields"]:
if field["type"] == type:
text_data.append({"key": field["key"],
"priority": field["priority"],
"required": field["required"]})
return text_data
# Format text questions for survey
def get_text_entries(file):
text_data = get_text_data_from_custom_questions(file)
all_text_items = []
# create text question form entries
for item in text_data:
text_id = uuid.uuid1()
text_entry = TextForm()
text_entry.text.label = item["key"]
text_entry.text.name = item["key"]
text_entry.text.id = text_id
text_entry.priority = item["priority"]
if item["required"] == "True":
text_entry.text.flags.required = True
all_text_items.append(text_entry)
all_text_items = sorted(all_text_items, key=lambda k: k.priority, reverse=True)
return all_text_items
# Get multiselect questions
def get_multi_data_from_custom_questions(file):
type = "checkbox"
multi_data = []
custom_questions = get_custom_questions(file)
# store all multiselect question entries from data
for field in custom_questions["fields"]:
if field["type"] == type:
multi_data.append({"key": field["key"],
"data_list": field["data_list"],
"priority": field["priority"],
"required": field["required"]})
return multi_data
# Format multiselect questions for survey
def get_multi_entries(file):
multi_data = get_multi_data_from_custom_questions(file)
all_multi_items = []
# create multiselect question form entries
for multi_dict in multi_data:
multi_id = uuid.uuid1()
multi_entry = MultiForm()
multi_entry.multi.label = multi_dict["key"]
multi_entry.multi.name = multi_dict["key"]
multi_entry.id = multi_id
multi_entry.multi.choices = multi_dict["data_list"]
multi_entry.priority = multi_dict["priority"]
if multi_dict["required"] == "True":
multi_entry.multi.flags.required = True
all_multi_items.append(multi_entry)
all_multi_items = sorted(all_multi_items, key=lambda k: k.priority, reverse=True)
return all_multi_items
# Get select questions
def get_select_data_from_custom_questions(file):
type = "select"
select_data = []
custom_questions = get_custom_questions(file)
# store all select question entries from data
for field in custom_questions["fields"]:
if field["type"] == type:
select_data.append({"key": field["key"],
"data_list": field["data_list"],
"priority": field["priority"],
"required": field["required"]})
return select_data
# Format select questions for survey
def get_select_entries(file):
select_data = get_select_data_from_custom_questions(file)
all_select_items = []
# create select question form entries
for select_dict in select_data:
select_id = uuid.uuid1() # allows for multiple selects
select_entry = SelectForm()
select_entry.select.label = select_dict["key"]
select_entry.select.name = select_dict["key"]
select_entry.id = select_id
select_entry.select.choices = select_dict["data_list"]
select_entry.priority = select_dict["priority"]
if select_dict["required"] == "True":
select_entry.select.flags.required = True
all_select_items.append(select_entry)
all_select_items = sorted(all_select_items, key=lambda k: k.priority, reverse=True)
return all_select_items
# Get survey title
def get_survey_title(file):
entries = get_custom_questions(file)
for entry in entries["fields"]:
if entry["type"] == "title":
return entry["key"]
return "Survey"
| 3.0625
| 3
|
worktory/connection/__init__.py
|
renatoalmeidaoliveira/Worktory
| 4
|
12778495
|
<gh_stars>1-10
from worktory.connection.wrappers import *
| 1.109375
| 1
|
autoposter/bot.py
|
PhantomInsights/reddit-bots
| 13
|
12778496
|
<reponame>PhantomInsights/reddit-bots
"""
Takes the top 3 Google News and posts them to Reddit.
"""
import xml.etree.ElementTree as ET
import praw
import requests
import config
LOG_FILE = "./processed_urls.txt"
NEWS_URL = "https://news.google.com/rss/search?q=ecatepec+when:1d&hl=es-419&gl=MX"
def load_log():
"""Loads the log file and creates it if it doesn't exist.
Returns
-------
list
A list of urls.
"""
try:
with open(LOG_FILE, "r", encoding="utf-8") as temp_file:
return temp_file.read().splitlines()
except FileNotFoundError:
with open(LOG_FILE, "w", encoding="utf-8") as temp_file:
return []
def update_log(url):
"""Updates the log file.
Parameters
----------
url : str
The url to log.
"""
with open(LOG_FILE, "a", encoding="utf-8") as temp_file:
temp_file.write(url + "\n")
def init_bot():
"""Reads the RSS feed."""
# We create the Reddit instance.
reddit = praw.Reddit(client_id=config.APP_ID, client_secret=config.APP_SECRET,
user_agent=config.USER_AGENT, username=config.REDDIT_USERNAME,
password=config.REDDIT_PASSWORD)
with requests.get(NEWS_URL) as response:
root = ET.fromstring(response.text)
# Only read the first 3 links.
for item in root.findall(".//item")[:3]:
log = load_log()
title = item.find("title").text.split(" - ")[0].split(" | ")[0].strip()
url = item.find("link").text
if url not in log and title not in log:
reddit.subreddit(config.SUBREDDIT).submit(
title=title, url=url)
update_log(url)
update_log(title)
print("Posted:", url)
if __name__ == "__main__":
init_bot()
| 2.921875
| 3
|
xinshuo_io/test/images_io/test_load_image.py
|
xinshuoweng/cv_ml_tool
| 31
|
12778497
|
# Author: <NAME>
# email: <EMAIL>
import numpy as np
import init_paths
from images_io import load_image
from xinshuo_visualization import visualize_image
def test_load_image():
image_path = '../lena.png'
print('basic')
img = load_image(image_path)
assert img.shape == (512, 512, 3)
print('testing for resizing')
img = load_image(image_path, resize_factor=2.0)
assert img.shape == (1024, 1024, 3)
print('testing for resizing')
img = load_image(image_path, target_size=[1033, 1033])
assert img.shape == (1033, 1033, 3)
print('testing for rotation')
img = load_image(image_path, input_angle=45)
visualize_image(img, vis=True)
assert img.shape == (726, 726, 3)
print('testing for rotation')
img = load_image(image_path, input_angle=450)
visualize_image(img, vis=True)
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_load_image()
| 2.5625
| 3
|
utils/utils.py
|
mansum6/iNNfer
| 15
|
12778498
|
<reponame>mansum6/iNNfer
import os.path as osp
from os import walk as osw
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from .colors import *
try:
import rawpy
rawpy_available = True
except ImportError:
rawpy_available = False
MODEL_EXTENSIONS = ['.pth', '.pt']
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.webp',
'tga', '.tif', '.tiff', '.dng']
MAX_VALUES_BY_DTYPE = {
np.dtype("int8"): 127,
np.dtype("uint8"): 255,
np.dtype("int16"): 32767,
np.dtype("uint16"): 65535,
np.dtype("int32"): 2147483647,
np.dtype("uint32"): 4294967295,
np.dtype("int64"): 9223372036854775807,
np.dtype("uint64"): 18446744073709551615,
np.dtype("float32"): 1.0,
np.dtype("float64"): 1.0,
}
def is_ext_file(filename, extensions=IMG_EXTENSIONS):
return any(filename.endswith(extension) for extension in extensions)
def scan_dir(path, extensions=IMG_EXTENSIONS):
if not osp.isdir(path):
raise AssertionError(f'{path:s} is not a valid directory')
files_list = []
for dirpath, _, fnames in sorted(osw(path)):
for fname in sorted(fnames):
if is_ext_file(fname, extensions):
img_path = osp.join(dirpath, fname)
files_list.append(img_path)
return files_list
def get_models_paths(path):
""" Get model path list from model folder"""
models = scan_dir(path, MODEL_EXTENSIONS)
if not models:
raise AssertionError(f'{path:s} has no valid model file')
return models
def get_images_paths(path):
""" Get image path list from image folder"""
images = scan_dir(path, IMG_EXTENSIONS)
if not images:
raise AssertionError(f'{path:s} has no valid image file')
return images
def read_img(path=None):
""" Reads an image using cv2 (or rawpy if dng)
Arguments:
path: image path to read
Output:
Numpy HWC, BGR, [0,255] by default
"""
img = None
if path:
if rawpy_available and path[-3:].lower() == 'dng':
# if image is a DNG
with rawpy.imread(path) as raw:
img = raw.postprocess()
else:
# if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
raise AssertionError("Empty path provided.")
return img
def save_img(img, img_path, mode='RGB', scale=None):
""" Save a single image to the defined path """
if scale:
img = cv2.resize(img, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
cv2.imwrite(img_path, img)
def merge_imgs(img_list):
""" Auxiliary function to horizontally concatenate images in
a list using cv2.hconcat
"""
if isinstance(img_list, list):
img_h = 0
img_v = 0
for img in img_list:
if img.shape[0] > img_v:
img_h = img.shape[0]
if img.shape[1] > img_v:
img_v = img.shape[1]
img_list_res = []
for img in img_list:
if img.shape[1] < img_v or img.shape[0] < img_h:
img_res = cv2.resize(img, (img_v, img_h), interpolation=cv2.INTER_NEAREST)
img_list_res.append(img_res)
else:
img_list_res.append(img)
return cv2.hconcat(img_list_res)
elif isinstance(img_list, np.ndarray):
return img_list
else:
raise NotImplementedError('To merge images img_list should be a list of cv2 images.')
def save_img_comp(img_list, img_path, mode='RGB'):
""" Create a side by side comparison of multiple images in a list
to save to a defined path
"""
# lr_resized = cv2.resize(lr_img, (sr_img.shape[1], sr_img.shape[0]), interpolation=cv2.INTER_NEAREST)
# comparison = cv2.hconcat([lr_resized, sr_img])
comparison = merge_imgs(img_list)
save_img(img=comparison, img_path=img_path, mode=mode)
def denorm(x, min_max=(-1.0, 1.0)):
""" Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
"""
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError(
"Got unexpected object type, expected torch.Tensor or np.ndarray")
def norm(x):
""" Normalize (z-norm) from [0,1] range to [-1,1] """
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError(
"Got unexpected object type, expected torch.Tensor or np.ndarray")
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False,
change_range=True, add_batch=True):
""" Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
# check how many channels the image has, then condition. ie. RGB, RGBA, Gray
# if bgr2rgb:
# img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
dtype = img.dtype
maxval = MAX_VALUES_BY_DTYPE.get(dtype, 1.0)
t_dtype = np.dtype("float32")
img = img.astype(t_dtype)/maxval # ie: uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
# BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
if img.shape[0] % 3 == 0: # RGB or MultixRGB (3xRGB, 5xRGB, etc. For video tensors.)
img = bgr_to_rgb(img)
elif img.shape[0] == 4: # RGBA
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
""" Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
# TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim in (4, 3):
# if n_dim == 4, has to convert to 3 dimensions
if n_dim == 4 and remove_batch:
# remove a fake batch dimension
img = img.squeeze(dim=0)
if img.shape[0] == 3 and rgb2bgr: # RGB
# RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: # RGBA
# RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # CHW to HWC
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
f'Only support 4D, 3D and 2D tensor. But received with dimension: {n_dim:d}')
# if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
# TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) # denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np, 0, data_range).round() # clip to the data_range
# has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def linear_resize(img, st=256):
h, w = img.shape[0:2]
if not (h % st == 0) or not (w % st == 0):
oh = -(-h // st) * st
ow = -(-w // st) * st
linear = srgb2linear(img)
linear = cv2.resize(linear, dsize=(ow, oh), interpolation=cv2.INTER_CUBIC)
img = linear2srgb(linear)
return img
def color_fix(imgA, imgB):
""" Fix coloration changes by adding the difference in
the low frequency of the original image
"""
kernel_size = 3
scaling = False
# convert images to linear space
imgA = srgb2linear(imgA)
imgB = srgb2linear(imgB)
# downscale imgB to imgA size if needed
hA, wA = imgA.shape[0:2]
hB, wB = imgB.shape[0:2]
if hA < hB and wA < wB:
scaling = True
imgB_ds = cv2.resize(
imgB, dsize=(wA, hA), interpolation=cv2.INTER_CUBIC)
else:
imgB_ds = imgB
# compute the difference (ie: LR - SR)
diff = imgA - imgB_ds
# gaussian blur for low frequency information (colors)
#TODO: test with guided filter
blurred = cv2.GaussianBlur(diff, (kernel_size, kernel_size), 0)
# upscale if needed and add diff back to the imgB
if scaling:
blurred = cv2.resize(
blurred, dsize=(wB, hB), interpolation=cv2.INTER_CUBIC)
rlt = blurred + imgB
# rlt = denorm(rlt, min_max=(rlt.min(), rlt.max()))
# back to srgb space and return
return linear2srgb(rlt)
def extract_patches_2d(img, patch_shape, step=None, batch_first=False):
""" Convert a 4D tensor into a 5D tensor of patches (crops) of
the original tensor. Uses unfold to extract sliding local blocks
from an batched input tensor.
Arguments:
img: the image batch to crop
patch_shape: tuple with the shape of the last two dimensions (H,W)
after crop
step: the size of the step used to slide the blocks in each dimension.
If each value 0.0 < step < 1.0, the overlap will be relative to the
patch size * step
batch_first: return tensor with batch as the first dimension or the
second
Reference:
https://gist.github.com/dem123456789/23f18fd78ac8da9615c347905e64fc78
"""
if step is None: step = [1.0, 1.0]
patch_H, patch_W = patch_shape[0], patch_shape[1]
# pad to fit patch dimensions
if(img.size(2) < patch_H):
num_padded_H_Top = (patch_H - img.size(2)) // 2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0, 0, num_padded_H_Top, num_padded_H_Bottom), 0)
img = padding_H(img)
if(img.size(3) < patch_W):
num_padded_W_Left = (patch_W - img.size(3)) // 2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left, num_padded_W_Right, 0, 0), 0)
img = padding_W(img)
# steps to overlay crops of the images
step_int = [0, 0]
step_int[0] = int(patch_H * step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W * step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,
img[:, :, -patch_H:, :].permute(0, 1, 3, 2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,
patches_fold_H[:, :, :, -patch_W:, :].permute(0, 1, 2, 4, 3).unsqueeze(3)), dim=3)
patches = patches_fold_HW.permute(2, 3, 0, 1, 4, 5)
patches = patches.reshape(-1, img.size(0), img.size(1), patch_H, patch_W)
if(batch_first):
patches = patches.permute(1, 0, 2, 3, 4)
return patches
def recompose_tensor(patches, height, width, step=None, scale=1):
""" Reconstruct images that have been cropped to patches.
Unlike reconstruct_from_patches_2d(), this function allows to
use blending between the patches if they were generated a
step between 0.5 (50% overlap) and 1.0 (0% overlap),
relative to the original patch size
Arguments:
patches: the image patches
height: the original image height
width: the original image width
step: the overlap step factor, from 0.5 to 1.0
scale: the scale at which the patches are in relation to the
original image
References:
https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
https://gist.github.com/dem123456789/23f18fd78ac8da9615c347905e64fc78
"""
if step is None: step = [1.0, 1.0]
assert isinstance(step, float) and step >= 0.5 and step <= 1.0
full_height = scale * height
full_width = scale * width
batch_size, channels, patch_size, _ = patches.size()
overlap = scale * int(round((1.0 - step) * (patch_size / scale)))
effective_patch_size = int(step * patch_size)
patch_H, patch_W = patches.size(2), patches.size(3)
img_size = (patches.size(0), patches.size(1), max(full_height, patch_H), max(full_width, patch_W))
step = [step, step]
step_int = [0, 0]
step_int[0] = int(patch_H * step[0])
step_int[1] = int(patch_W * step[1])
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
n_patches_height = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
n_patches_width = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
final_batch_size = batch_size // (n_patches_height * n_patches_width)
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
def normalize_kernel2d(x: torch.Tensor) -> torch.Tensor:
"""Normalizes kernel."""
if len(x.size()) < 2:
raise TypeError("input should be at least 2D tensor. Got {}"
.format(x.size()))
norm: torch.Tensor = x.abs().sum(dim=-1).sum(dim=-1)
return x / (norm.unsqueeze(-1).unsqueeze(-1))
def compute_padding(kernel_size):
""" Computes padding tuple. For square kernels, pad can be an
int, else, a tuple with an element for each dimension.
"""
# 4 or 6 ints: (padding_left, padding_right, padding_top, padding_bottom)
if isinstance(kernel_size, tuple):
kernel_size = list(kernel_size)
if isinstance(kernel_size, int):
return kernel_size//2
elif isinstance(kernel_size, list):
computed = [k // 2 for k in kernel_size]
out_padding = []
for i in range(len(kernel_size)):
computed_tmp = computed[-(i + 1)]
# for even kernels we need to do asymetric padding
if kernel_size[i] % 2 == 0:
padding = computed_tmp - 1
else:
padding = computed_tmp
out_padding.append(padding)
out_padding.append(computed_tmp)
return out_padding
def filter2D(x: torch.Tensor, kernel: torch.Tensor,
border_type: str = 'reflect', dim: int =2,
normalized: bool = False) -> torch.Tensor:
r"""Function that convolves a tensor with a kernel.
The function applies a given kernel to a tensor. The kernel
is applied independently at each depth channel of the tensor.
Before applying the kernel, the function applies padding
according to the specified mode so that the output remains
in the same shape.
Args:
x: the input tensor with shape of :math:`(B, C, H, W)`.
kernel: the kernel to be convolved with the input tensor.
The kernel shape must be :math:`(1, kH, kW)`.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
normalized: If True, kernel will be L1 normalized.
Return:
the convolved tensor of same size and numbers of channels
as the input.
"""
borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular']
if border_type not in borders_list:
raise ValueError("Invalid border_type, we expect the following: {0}."
"Got: {1}".format(borders_list, border_type))
# prepare kernel
b, c, h, w = x.shape
tmp_kernel: torch.Tensor = kernel.unsqueeze(0).to(x.device).to(x.dtype)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
# pad the input tensor
height, width = tmp_kernel.shape[-2:]
padding_shape: List[int] = compute_padding((height, width))
input_pad: torch.Tensor = F.pad(x, padding_shape, mode=border_type)
b, c, hp, wp = input_pad.shape
tmp_kernel = tmp_kernel.expand(c, -1, -1, -1)
# convolve the tensor with the kernel.
if dim == 1:
conv = F.conv1d
elif dim == 2:
conv = F.conv2d
elif dim == 3:
conv = F.conv3d
else:
raise RuntimeError(
f"Only 1, 2 and 3 dimensions are supported. Received {dim}.")
return conv(input_pad, tmp_kernel, groups=c, padding=0, stride=1)
def get_box_kernel(kernel_size: int = 5, dim=2):
if isinstance(kernel_size, (int, float)):
kernel_size = [kernel_size] * dim
kx = kernel_size[0]
ky = kernel_size[1]
box_kernel = torch.Tensor(np.ones((kx, ky)) / (float(kx)*float(ky)))
return box_kernel
def guided_filter(x: torch.Tensor, y: torch.Tensor,
x_HR: torch.Tensor = None, ks=None, r=None, eps:float=1e-2,
box_kernel=None, mode:str='regular', conv_a=None) -> torch.Tensor:
""" Guided filter / FastGuidedFilter function.
This is a kind of edge-preserving smoothing filter that can
filter out noise or texture while retaining sharp edges. One
key assumption of the guided filter is that the relation
between guidance x and the filtering output is linear.
Arguments:
x: guidance image with shape [b, c, h, w].
y: filtering input image with shape [b, c, h, w].
x_HR: optional high resolution guidance map for joint
upsampling (for 'fast' or 'conv' modes).
ks (int): kernel size for the box/mean filter. In reference to
the window radius "r": kx = ky = ks = (2*r)+1
r (int): optional radius for the window. Can use instead of ks.
box_kernel (tensor): precalculated box_kernel (optional).
mode: select between the guided filter types: 'regular',
'fast' or 'conv' (convolutional).
conv_a (nn.Sequential): the convolutional layers to use for
'conv' mode to calculate the 'A' parameter.
eps: regularization ε, penalizing large A values.
eps = 1e-8 in the original paper.
Returns:
output: filtered image
"""
if not isinstance(box_kernel, torch.Tensor):
# get the box_kernel if not provided
if not ks:
if r:
ks = (2*r)+1
else:
raise ValueError("Either kernel size (ks) or radius (r) "
"for the window are required.")
# mean filter. The window size is defined by the kernel size.
box_kernel = get_box_kernel(kernel_size = ks)
x_shape = x.shape
# y_shape = y.shape
if isinstance(x_HR, torch.Tensor):
x_HR_shape = x_HR.shape
box_kernel = box_kernel.to(x.device)
N = filter2D(torch.ones((1, 1, x_shape[-2], x_shape[-1])),
box_kernel).to(x.device)
# note: similar to SSIM calculation
mean_x = filter2D(x, box_kernel) / N
mean_y = filter2D(y, box_kernel) / N
cov_xy = (filter2D(x*y, box_kernel) / N) - mean_x*mean_y
var_x = (filter2D(x*x, box_kernel) / N) - mean_x*mean_x
# linear coefficients A, b
if mode == 'conv':
A = conv_a(torch.cat([cov_xy, var_x], dim=1))
else:
# regular or fast GuidedFilter
A = cov_xy / (var_x + eps)
b = mean_y - A * mean_x # according to original GF paper, needs to add: "+ x"
# mean_A; mean_b
if mode == 'fast' or mode == 'conv':
mean_A = F.interpolate(
A, (x_HR_shape[-2], x_HR_shape[-1]),
mode='bilinear', align_corners=True)
mean_b = F.interpolate(
b, (x_HR_shape[-2], x_HR_shape[-1]),
mode='bilinear', align_corners=True)
output = mean_A * x_HR + mean_b
else:
# regular GuidedFilter
mean_A = filter2D(A, box_kernel) / N
mean_b = filter2D(b, box_kernel) / N
output = mean_A * x + mean_b
return output
def normal2mod(state_dict):
if 'model.0.weight' in state_dict:
print('Converting and loading an RRDB model to modified RRDB')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
crt_net['conv_first.weight'] = state_dict['model.0.weight']
crt_net['conv_first.bias'] = state_dict['model.0.bias']
for k in items.copy():
if 'RDB' in k:
ori_k = k.replace('model.1.sub.', 'RRDB_trunk.')
if '.0.weight' in k:
ori_k = ori_k.replace('.0.weight', '.weight')
elif '.0.bias' in k:
ori_k = ori_k.replace('.0.bias', '.bias')
crt_net[ori_k] = state_dict[k]
items.remove(k)
crt_net['trunk_conv.weight'] = state_dict['model.1.sub.23.weight']
crt_net['trunk_conv.bias'] = state_dict['model.1.sub.23.bias']
crt_net['upconv1.weight'] = state_dict['model.3.weight']
crt_net['upconv1.bias'] = state_dict['model.3.bias']
crt_net['upconv2.weight'] = state_dict['model.6.weight']
crt_net['upconv2.bias'] = state_dict['model.6.bias']
crt_net['HRconv.weight'] = state_dict['model.8.weight']
crt_net['HRconv.bias'] = state_dict['model.8.bias']
crt_net['conv_last.weight'] = state_dict['model.10.weight']
crt_net['conv_last.bias'] = state_dict['model.10.bias']
state_dict = crt_net
return state_dict
def mod2normal(state_dict):
if 'conv_first.weight' in state_dict:
print('Converting and loading a modified RRDB model to normal RRDB')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias']
for k in items.copy():
if 'RDB' in k:
ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')
if '.weight' in k:
ori_k = ori_k.replace('.weight', '.0.weight')
elif '.bias' in k:
ori_k = ori_k.replace('.bias', '.0.bias')
crt_net[ori_k] = state_dict[k]
items.remove(k)
crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']
crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']
crt_net['model.3.weight'] = state_dict['upconv1.weight']
crt_net['model.3.bias'] = state_dict['upconv1.bias']
crt_net['model.6.weight'] = state_dict['upconv2.weight']
crt_net['model.6.bias'] = state_dict['upconv2.bias']
crt_net['model.8.weight'] = state_dict['HRconv.weight']
crt_net['model.8.bias'] = state_dict['HRconv.bias']
crt_net['model.10.weight'] = state_dict['conv_last.weight']
crt_net['model.10.bias'] = state_dict['conv_last.bias']
state_dict = crt_net
return state_dict
def swa2normal(state_dict):
if 'n_averaged' in state_dict:
print('Attempting to convert a SWA model to a regular model\n')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
for k in items.copy():
if 'n_averaged' in k:
print('n_averaged: {}'.format(state_dict[k]))
elif 'module.module.' in k:
ori_k = k.replace('module.module.', '')
crt_net[ori_k] = state_dict[k]
items.remove(k)
state_dict = crt_net
return state_dict
| 2.21875
| 2
|
app/thirdparty/dropbox_upload.py
|
aroranipun04/CloudCV-Old
| 11
|
12778499
|
__author__ = 'dexter'
import dropbox
import os
import sys
path = os.environ.get('CLOUDCVPATH')
if path not in sys.path:
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
def upload_files_to_dropbox(userid, jobid, result_path, dropbox_token=None):
try:
response = ''
if dropbox_token is not None:
client = dropbox.client.DropboxClient(dropbox_token)
try:
client.file_create_folder('/jobs')
except:
print 'Error Response'
client.file_create_folder('/jobs/' + str(jobid))
for file_name in os.listdir(result_path):
if os.path.isfile(os.path.join(result_path, file_name)):
response += os.path.join(result_path, file_name)
response += '\n'
f = open(os.path.join(result_path, file_name), 'rb')
client.put_file('/jobs/' + str(jobid) + '/' + file_name, f)
f.close()
response += 'Output have been stored in your dropbox folder.'
url = 'http://www.dropbox.com/home/Apps/CloudCV/jobs/' + str(jobid)
return response, url
else:
return 'dropbox token not mentioned'
except Exception as e:
raise e
| 2.609375
| 3
|
learner_node.py
|
neka-nat/distributed_rl
| 72
|
12778500
|
<reponame>neka-nat/distributed_rl<filename>learner_node.py
# -*- coding: utf-8 -*-
import argparse
import gym
import torch
import torch.optim as optim
import visdom
from distributed_rl.ape_x.learner import Learner
from distributed_rl.libs import models, wrapped_env
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main():
parser = argparse.ArgumentParser(description='Learner process for distributed reinforcement.')
parser.add_argument('-e', '--env', type=str, default='MultiFrameBreakout-v0', help='Environment name.')
parser.add_argument('-a', '--algorithm', type=str, default='ape_x', choices=['ape_x', 'r2d2'], help='Select an algorithm.')
parser.add_argument('-r', '--redisserver', type=str, default='localhost', help="Redis's server name.")
parser.add_argument('-v', '--visdomserver', type=str, default='localhost', help="Visdom's server name.")
parser.add_argument('-d', '--actordevice', type=str, default='', help="Actor's device.")
parser.add_argument('-s', '--replaysize', type=int, default=100000, help="Replay memory size.")
args = parser.parse_args()
env = gym.make(args.env)
vis = visdom.Visdom(server='http://' + args.visdomserver)
actordevice = ("cuda" if torch.cuda.is_available() else "cpu") if args.actordevice == '' else args.actordevice
if args.algorithm == 'ape_x':
nstep_return = 3
model = models.DuelingDQN(env.action_space.n).to(device)
learner = Learner(model,
models.DuelingDQN(env.action_space.n).to(device),
optim.RMSprop(model.parameters(), lr=0.00025 / 4, alpha=0.95, eps=1.5e-7),
vis, replay_size=args.replaysize, hostname=args.redisserver,
use_memory_compress=True)
learner.optimize_loop(gamma=0.999**nstep_return, actor_device=torch.device(actordevice))
elif args.algorithm == 'r2d2':
batch_size = 64
nstep_return = 5
model = models.DuelingLSTMDQN(env.action_space.n, batch_size,
nstep_return=nstep_return).to(device)
learner = Learner(model,
models.DuelingLSTMDQN(env.action_space.n, batch_size,
nstep_return=nstep_return).to(device),
optim.Adam(model.parameters(), lr=1.0e-4, eps=1.0e-3),
vis, replay_size=args.replaysize, hostname=args.redisserver,
use_memory_compress=True)
learner.optimize_loop(batch_size=batch_size, gamma=0.997**nstep_return,
beta0=0.6, target_update=2500,
actor_device=torch.device(actordevice))
else:
raise ValueError('Unknown the algorithm: %s.' % args.algorithm)
if __name__ == '__main__':
main()
| 2.15625
| 2
|
modules/conditions/keywords.py
|
thevickypedia/jarvis
| 0
|
12778501
|
<gh_stars>0
# noinspection PyUnresolvedReferences
"""List of keywords for each variable which is condition matched in the main module.
>>> Keywords
"""
current_date = ["today's date", "current date", "what is the date", "what's the date", "todays date", "whats the date"]
current_time = ["current time", "time now", "time in", "what is the time", "what's the time", "whats the time"]
weather = ["weather", "temperature", "sunrise", "sun rise", "sunset", "sun set"]
system_info = ["configuration"]
ip_info = ["address"]
wikipedia_ = ["wikipedia", "info", "information"]
news = ["news"]
report = ["report"]
robinhood = ["robinhood", "investment", "portfolio", "summary"]
apps = ["launch"]
repeat = ["repeat", "train"]
location = ["location", "where are you"]
locate = ["locate", "where is my", "where's my", "wheres my"]
music = ["music", "songs", "play"]
read_gmail = ["email", "mail"]
meaning = ["meaning", "dictionary", "definition"]
add_todo = ["add"]
delete_todo = ["remove", "delete"]
todo = ["plan"]
distance = ["far", "distance", "miles"]
avoid = ["sun", "moon", "mercury", "venus", "earth", "mars", "jupiter", "saturn", "uranus", "neptune", "pluto",
"a.m.", "p.m.", "update my to do list", "launch", "safari", "body", "human", "centimeter", "server",
"cloud", "update"]
locate_places = ["where is", "where's", "which city", "which state", "which country", "which county", "wheres"]
directions = ["take me", "get directions"]
set_alarm = ["alarm", "wake me", "timer"]
kill_alarm = ["stop alarm", "stop my alarm", "stop another alarm", "stop an alarm",
"stop timer", "stop my timer", "stop another timer", "stop an timer",
"turn off my alarm", "turn my alarm off", "stop another alarm", "turn off alarm",
"turn off my timer", "turn my timer off", "stop another timer", "turn off timer",
"delete alarm", "delete my alarm", "delete another alarm", "delete an alarm",
"delete timer", "delete my timer", "delete another timer", "delete an timer"]
reminder = ["remind", "reminder"]
google_home = ["google home", "googlehome"]
jokes = ["joke", "jokes", "make me laugh"]
notes = ["notes", "note"]
github = ["git", "github", "clone", "GitHub", "update yourself", "update your self"]
send_sms = ["message", "text", "messages"]
google_search = ["google search"]
television = ["tv", "television"]
volume = ["volume", "mute"]
face_detection = ["face", "recognize", "who am i", "detect", "facial", "recognition", "detection"]
speed_test = ["speed", "fast"]
bluetooth = ["bluetooth"]
brightness = ["brightness", "bright", "dim"]
lights = ["light"]
guard_enable = ["turn on security mode", "enable security mode"]
guard_disable = ["turn off security mode", "disable security mode"]
flip_a_coin = ["head", "tail", "flip"]
facts = ["fact", "facts"]
meetings = ["meeting"]
events = ["event"]
voice_changer = ["voice", "module", "audio"]
system_vitals = ["vitals", "statistics", "readings", "stats"]
vpn_server = ["vpn"]
car = ["car", "vehicle"]
automation = ["automation"]
sprint = ["sprint"]
ok = ["yeah", "yes", "yep", "go ahead", "proceed", "continue", "carry on", "please", "keep going"]
restart_control = ["restart", "reboot"]
exit_ = ["exit", "quit", "no", "nope", "thank you", "Xzibit", "bye", "good bye", "see you later", "talk to you later",
"that's it", "that is it", "never mind", "nevermind", "thats it"]
sleep_control = ["exit", "quit", "lock", "sleep", "Xzibit", "activate sentry mode"]
kill = ["kill", "terminate yourself", "stop running"]
shutdown = ["shutdown", "shut down", "terminate"]
| 2.40625
| 2
|
tests/unit/api/decorators/test_validation.py
|
chadlung/pywebhooks
| 94
|
12778502
|
<gh_stars>10-100
# Standard lib imports
from http import client
import unittest
from unittest.mock import patch
# Third party imports
# None
# Project level imports
from pywebhooks.app import create_wsgi_app
from pywebhooks import DEFAULT_ACCOUNTS_TABLE
from pywebhooks.api.decorators.validation import validate_id_params,\
validate_username_in_header, validate_pagination_params
from pywebhooks.database.rethinkdb.interactions import Interactions
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(WhenTestingValidation())
return test_suite
class WhenTestingValidation(unittest.TestCase):
def setUp(self):
self.app = create_wsgi_app()
self.app.config['TESTING'] = True
def test_validate_id_params_bad_request(self):
@validate_id_params(None)
def test_func():
pass
with self.app.test_request_context():
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
def test_validate_username_in_header_bad_request(self):
@validate_username_in_header()
def test_func():
pass
with self.app.test_request_context():
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
def test_validate_username_in_header_not_found(self):
with patch.object(Interactions, 'query', return_value=False):
@validate_username_in_header()
def test_func():
pass
test_header = [('username', 'johndoe')]
with self.app.test_request_context(headers=test_header):
response = test_func()
self.assertEqual(response.status_code, client.NOT_FOUND)
def test_validate_username_in_header(self):
with patch.object(Interactions, 'query', return_value=True) as \
query_method:
@validate_username_in_header()
def test_func():
pass
test_header = [('username', 'johndoe')]
with self.app.test_request_context(headers=test_header):
test_func()
query_method.assert_called_with(
DEFAULT_ACCOUNTS_TABLE,
filters={'username': 'johndoe'}
)
def test_validate_pagination_params_invalid_start(self):
@validate_pagination_params()
def test_func():
pass
with self.app.test_request_context('/?limit=10&start=-1'):
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
with self.app.test_request_context('/?limit=10'
'&start=9999999999999991'):
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
def test_validate_pagination_params_invalid_limit(self):
@validate_pagination_params()
def test_func():
pass
with self.app.test_request_context('/?limit=-1&start=0'):
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
with self.app.test_request_context('/?limit=101&start=0'):
response = test_func()
self.assertEqual(response.status_code, client.BAD_REQUEST)
def test_validate_pagination_params(self):
@validate_pagination_params()
def test_func():
pass
with self.app.test_request_context('/?limit=1&start=0'):
self.assertIsNone(test_func())
| 2.265625
| 2
|
print.py
|
MileyWright/Intro-Python1-Notes
| 0
|
12778503
|
x = 2
y = 5
z = "Test"
# This way of printing is older
# The number after the '%' represent how much "padding"
# we want to print that value with
print("x is %2d, y is %6f, z is %13s" % (x, y, z))
# x is 2, y is 5.00000, z is Test
# '%d' stands for 'decimal'
# '%f' stands for 'float'
# '%s' stands for 'string'
print("x is %d, y is %f, z is %s" % (x, y, z))
# # x is 2, y is 5.00000, z is Test
| 4.25
| 4
|
workflow/__main__.py
|
trecvt-oss/workflow
| 183
|
12778504
|
<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# License: 3-clause BSD License
# Author: <NAME> <<EMAIL>>
# Read-more: https://github.com/mdipierro/workflow
import sys, os, shelve, glob, time, shlex, subprocess, logging, re, optparse
re_line = re.compile('(?P<n>\w+):\s*(?P<p>.+?)\s*(\[(?P<dt>\w+)\]\s*)?:\s*(?P<c>.*)\s*(?P<a>\&)?')
def daemonize():
if os.fork()==0:
os.setsid()
if os.fork()==0:
return
os._exit(0)
def load_config(config_filename,data):
if not os.path.exists(config_filename): return (None,0)
config_mt = os.path.getmtime(config_filename)
config = []
print '-'*10+' loading rules '+'-'*10
lines = open(config_filename,'r').read()
for line in lines.replace('\\\n','\n').split('\n'):
if not line.startswith('#') and ':' in line:
match = re_line.match(line)
if match:
print line
name = match.group('n')
pattern = match.group('p')
dt = eval((match.group('dt') or '1')\
.replace('s','*1').replace('m','*60')\
.replace('h','*3600').replace('d','*24*3600')\
.replace('w','*7*24*3600'))
command = match.group('c')
ampersand = match.group('a')
config.append((name,pattern,dt,command,ampersand))
if not name in data:
data[name]=[]
print '-'*35
return config, config_mt
def workflow(options):
folder = options.folder or './'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(levelname)-8s: %(message)s',
datefmt='%m-%d %H:%M',
filename=options.logfile)
config_filename = options.config or os.path.join(folder,'workflow.config')
cache_filename = options.cache or os.path.join(folder,'workflow.cache')
data = shelve.open(cache_filename)
config, config_mt = load_config(config_filename,data)
processes = {}
while config:
pause = True
if config_mt < os.path.getmtime(config_filename):
config, config_mt = load_config(config_filename,data)
if not config: return
for clear in glob.glob('.workflow.*.clear'):
rule = clear[10:-6]
logging.info('clearing rule "%s"' % rule)
for key in data.get(rule,[]):
if key in data: del data[key]
os.unlink(clear)
for name,pattern,dt,action,ampersand in config:
filenames = glob.glob(pattern)
for filename in filenames:
mt = os.path.getmtime(filename)
if mt > time.time()-dt: continue
pid_file = filename+'.%s.pid' % name
log_file = filename+'.%s.out' % name
err_file = filename+'.%s.err' % name
key = re.sub('\s+',' ',pattern+'='+filename+':'+action).strip()
if not (os.path.exists(pid_file) or os.path.exists(err_file)):
if data.get(key,None)!=mt:
command = action.replace(options.name,filename)
logging.info('%s -> %s' % (filename, command))
wlg = open(log_file,'wb')
process = subprocess.Popen(command,stdout=wlg,
stderr=wlg,shell=True)
open(pid_file,'w').write(str(process.pid))
processes[pid_file] = (filename,command,process)
if not ampersand: process.wait()
if pid_file in processes and processes[pid_file][2].poll()==0:
filename, command, process = processes[pid_file]
returncode = process.returncode
if returncode !=0:
open(err_file,'w').write(str(returncode))
logging.error('%s -> %s' % (filename, command))
else:
data[key] = mt
data[name] = data[name]+[key]
del processes[pid_file]
os.remove(pid_file)
pause = False
elif os.path.exists(pid_file) and not pid_file in processes:
os.remove(pid_file)
pause = False
if pause: time.sleep(options.sleep)
def main():
usage = """
1. read docs: https://github.com/mdipierro/workflow
2. create a file workflow.config
3. run workflow.py
"""
version = "0.1"
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option("-s", "--sleep", dest="sleep", default=1,
help="sleep interval")
parser.add_option("-c", "--clear", dest="clear", default=None,
help="clear rule")
parser.add_option("-n", "--name", dest="name", default='$0',
help="name")
parser.add_option("-f", "--folder", dest="folder", default='./',
help="folder for workflow")
parser.add_option("-d", "--daemonize", dest="daemonize", default=False,
action="store_true", help="runs as daemon")
parser.add_option("-x", "--config", dest="config", default=None,
help="path of the config filename "\
+"(default=workflow.config)")
parser.add_option("-y", "--cache", dest="cache", default=None,
help="path of the cache filename "\
+"(default=workflow.cache)")
parser.add_option("-l", "--logfile", dest="logfile", default=None,
help="path of the logfile "\
+"(default=/var/tmp/workflow.log when daemonized)")
(options, args) = parser.parse_args()
if options.clear:
open('.workflow.%s.clear' % options.clear,'wb').write(time.ctime())
return
if options.daemonize:
options.logfile = options.logfile or '/var/tmp/workflow.log'
daemonize()
try:
workflow(options)
except KeyboardInterrupt:
return
if __name__=='__main__': main()
| 2.09375
| 2
|
chapter9/django_admin/app01/admin.py
|
MMingLeung/Python_Study
| 3
|
12778505
|
<filename>chapter9/django_admin/app01/admin.py<gh_stars>1-10
from django.contrib import admin
from django.http import HttpResponse
from app01 import models
from django.utils.safestring import mark_safe
from types import FunctionType
# Register your models here.
from django.forms import widgets
from django.utils.html import format_html
from django.forms import ModelForm, fields
#判断是否是函数
# isinstance(xx, FunctionType)
# =====================model_form
class MyForm(ModelForm):
'''
先去UserInfo找到所有字段放到字典,找自定义form的字段,然后update
'''
other = fields.CharField(widget=widgets.TextInput())
user = fields.CharField(widget=widgets.TextInput(),error_messages={"required":"用户名不能为空"})
class Meta:
models = models.UserInfo
fields = '__all__'
# =====================formfield_overrides
class MyTextarea(widgets.Widget):
def __init__(self, attrs=None):
default_attrs = {
'cols': '40',
'rows': '10'
}
if attrs:
default_attrs.update(attrs)
super(MyTextarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs)
return format_html("<textarea {}>\r\n{}</textarea>", final_attrs, value)
# =====================formfield_overrides end
class UserInfoMolerAdmin(admin.ModelAdmin):
form = MyForm
def test(obj):
'''
obj 是循环的当前对象所有行字段
函数的返回就是页面的显示
:return:
'''
return mark_safe("<a href='http://www.baidu.com'>%s-%s</a>" %(obj.user, obj.email))
# 列显示
list_display = ('id','user', 'email', test)
# 列连接
list_display_links = ('email',)
# 右侧快速筛选 and 运算
list_filter = ('user','email')
# 连表,FK字段
# list_select_related =
# 分页
# list_per_page = 10 #每页显示
# list_max_show_all = 200 #显示全部数据
# paginator = Paginator #插件
# 列快速编辑
list_editable = ('user', )
# 模糊搜索
search_fields = ('user', )
# 操作后保留搜索条件
preserve_filters = ('user', )
# def changelist_view(self, request, extra_context=None):
# return HttpResponse('changelistview')
# 详细页面save按钮
save_as = False
# 保存后继续编辑
save_as_continue = True
# save按钮在上面
save_on_top = False
# inlines 为外键连到的表设计的
# action 列表页下拉框里面的功能
def func(self, request, queryset):
print(self, request, queryset)
print(request.POST.getlist('_selected_action'))
func.short_description = "中文显示自定义action"
actions = [func, ]
# for item in actions:
# # Func首字母大写 , item()
# if hasattr(item, 'short_description'):
# print(item.short_description)
# else:
# print(item.__name__.title())
# end action
# 定制模版
# change_list_template = "my_change_list_template.html"
# 默认FK是下拉框现在是搜索框
# raw_id_fields = ('group', )
# 详细编辑页面显示
# fields = ('') 显示
# exclude = ('user') 排除
# readonly_fields = ('user') 只读
# 详细页面划分
# fieldsets = ((
# ('基本数据'),{
# 'fields':('user', 'email')
# }),('其它', {
# 'classes':('collapse', 'wide', 'extrapretty'),
# 'fields':('user', 'email', 'group'),
# })
# )
# M2M
filter_vertical = ('m2m',)
# 列表排序
ordering = ('-id', )
# 编辑时,是否在页面右上角显示view on set
def view_on_site(self, obj):
return 'http://www.baidu.com'
# radio_fields 针对 FK
radio_fields = {'group':admin.VERTICAL}
# 列表显示搜索时,显示个数
show_full_result_count = True
# 设置详细页面的标签
# formfield_overrides = {
# models.models.CharField:{'widget':MyTextarea},
# }
# 添加页面,填入值后,自动填充。email-同步User输入的值
# prepopulated_fields = {'email':("user","user")}
# 为空的时候显示
# empty_value_display = ""
# 修改错误提示
# =====================inline 在增加组的页面能增加用户
class UserInfoInline(admin.StackedInline): #StackedInline/TabularInline
extra = 0
model = models.UserInfo
class UserGroupModelAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
inlines = [UserInfoInline, ]
# =====================inline end
# admin.site.register(models.UserInfo,admin.ModelAdmin)
admin.site.register([models.UserInfo,],UserInfoMolerAdmin)
admin.site.register(models.UserGroup, UserGroupModelAdmin)
admin.site.register(models.Role,admin.ModelAdmin)
# 写法2
# @admin.register([models.UserInfo, ])
# class UserAdmin(admin.ModelAdmin):
# pass
'''
内部就是创造一个这样的字典
_registry = {
models.UserInfo:admin.ModelAdmin(models.UserInfo, self=admin.site)
....
}
'''
| 2.28125
| 2
|
pipe_code/scripts/junction_saturation.py
|
ShaopengLiu1/RNA_seq_analysis_pipe
| 3
|
12778506
|
<reponame>ShaopengLiu1/RNA_seq_analysis_pipe
#!/usr/bin/env python
'''
Check if sequencing depth is saturated or not, based on the idea that when sequencing depth is
approaching saturation, less NEW junctions will be detected.
See http://rseqc.sourceforge.net/ for details.
'''
#import built-in modules
import os,sys
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
print >>sys.stderr, "\nYou are using python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) + " RSeQC needs python2.7!\n"
sys.exit()
import re
import string
from optparse import OptionParser
import warnings
import string
import collections
import math
import sets
from time import strftime
import subprocess
#import third-party modules
from bx.bitset import *
from bx.bitset_builders import *
from bx.intervals import *
#import my own modules
from qcmodule import SAM
#changes to the paths
#changing history to this module
__author__ = "<NAME>"
__copyright__ = "Copyleft"
__credits__ = []
__license__ = "GPL"
__version__="2.6.4"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def printlog (mesg):
'''print progress into stderr and log file'''
mesg="@ " + strftime("%Y-%m-%d %H:%M:%S") + ": " + mesg
LOG=open('class.log','a')
print >>sys.stderr,mesg
print >>LOG,mesg
def main():
usage="%prog [options]" + '\n' + __doc__ + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-i","--input-file",action="store",type="string",dest="input_file",help="Alignment file in BAM or SAM format.[required]")
parser.add_option("-o","--out-prefix",action="store",type="string",dest="output_prefix",help="Prefix of output files(s). [required]")
parser.add_option("-r","--refgene",action="store",type="string",dest="refgene_bed",help="Reference gene model in bed fomat. This gene model is used to determine known splicing junctions. [required]")
parser.add_option("-l","--percentile-floor",action="store",type="int",dest="percentile_low_bound",default=5, help="Sampling starts from this percentile. A integer between 0 and 100. default=%default")
parser.add_option("-u","--percentile-ceiling",action="store",type="int",dest="percentile_up_bound",default=100, help="Sampling ends at this percentile. A integer between 0 and 100. default=%default")
parser.add_option("-s","--percentile-step",action="store",type="int",dest="percentile_step",default=5, help="Sampling frequency. Smaller value means more sampling times. A integer between 0 and 100. default=%default")
parser.add_option("-m","--min-intron",action="store",type="int",dest="minimum_intron_size",default=50, help="Minimum intron size (bp). default=%default")
parser.add_option("-v","--min-coverage",action="store",type="int",dest="minimum_splice_read",default=1, help="Minimum number of supportting reads to call a junction. default=%default")
parser.add_option("-q","--mapq",action="store",type="int",dest="map_qual",default=30,help="Minimum mapping quality (phred scaled) for an alignment to be called \"uniquely mapped\". default=%default")
(options,args)=parser.parse_args()
if not (options.output_prefix and options.input_file and options.refgene_bed):
parser.print_help()
sys.exit(0)
if options.percentile_low_bound <0 or options.percentile_low_bound >100:
print >>sys.stderr, "percentile_low_bound must be larger than 0 and samller than 100"
sys.exit(0)
if options.percentile_up_bound <0 or options.percentile_up_bound >100:
print >>sys.stderr, "percentile_up_bound must be larger than 0 and samller than 100"
sys.exit(0)
if options.percentile_up_bound < options.percentile_low_bound:
print >>sys.stderr, "percentile_up_bound must be larger than percentile_low_bound"
sys.exit(0)
if options.percentile_step <0 or options.percentile_step > options.percentile_up_bound:
print >>sys.stderr, "percentile_step must be larger than 0 and samller than percentile_up_bound"
sys.exit(0)
if os.path.exists(options.input_file):
obj = SAM.ParseBAM(options.input_file)
obj.saturation_junction(outfile=options.output_prefix, refgene=options.refgene_bed, sample_start=options.percentile_low_bound,sample_end=options.percentile_up_bound,sample_step=options.percentile_step,min_intron=options.minimum_intron_size,recur=options.minimum_splice_read, q_cut = options.map_qual)
try:
subprocess.call("Rscript " + options.output_prefix + '.junctionSaturation_plot.r', shell=True)
except:
print >>sys.stderr, "Cannot generate pdf file from " + '.junctionSaturation_plot.r'
pass
else:
print >>sys.stderr, '\n\n' + options.input_file + " does NOT exists" + '\n'
sys.exit(0)
#parser.print_help()
if __name__ == '__main__':
main()
| 2.109375
| 2
|
pypy/module/cpyext/test/test_module.py
|
SeraphRoy/PyPy-Functional
| 0
|
12778507
|
<gh_stars>0
from pypy.module.cpyext.modsupport import PyModule_New
from pypy.module.cpyext.test.test_api import BaseApiTest
from rpython.rtyper.lltypesystem import rffi
class TestModuleObject(BaseApiTest):
def test_module_new(self, space):
with rffi.scoped_str2charp('testname') as buf:
w_mod = PyModule_New(space, buf)
assert space.eq_w(space.getattr(w_mod, space.newtext('__name__')),
space.newtext('testname'))
def test_module_getname(self, space, api):
w_sys = space.wrap(space.sys)
p = api.PyModule_GetName(w_sys)
assert rffi.charp2str(p) == 'sys'
p2 = api.PyModule_GetName(w_sys)
assert p2 == p
self.raises(space, api, SystemError, api.PyModule_GetName, space.w_True)
| 2
| 2
|
UrsinaShaderBuilder/shader_instructions.py
|
Werxzy/UrsinaShaderBuilder
| 3
|
12778508
|
# this may need to represent in a better way
DataTypes = [
'float',
'vec2',
'vec3',
'vec4',
'int',
'ivec2',
'ivec3',
'ivec4',
'uint',
'uvec2',
'uvec3',
'uvec4',
'bool',
'bvec2',
'bvec3',
'bvec4',
'mat2',
'mat3',
'mat4',
'mat2x3',
'mat2x4',
'mat3x2',
'mat3x4',
'mat4x2',
'mat4x3',
'sampler1D',
'sampler2D',
'sampler3D',
'samplerCube',
'sampler1DShadow',
'sampler2DShadow',
]
DataTypeLayouts = {
'float' : {'x':'float'},
'vec2' : {'x':'float','y':'float'},
'vec3' : {'x':'float','y':'float','z':'float'},
'vec4' : {'x':'float','y':'float','z':'float','w':'float'},
'int' : {'x':'int'},
'ivec2' : {'x':'int','y':'int'},
'ivec3' : {'x':'int','y':'int','z':'int'},
'ivec4' : {'x':'int','y':'int','z':'int','w':'int'},
'bool' : {'x':'bool'},
'bvec2' : {'x':'bool','y':'bool'},
'bvec3' : {'x':'bool','y':'bool','z':'bool'},
'bvec4' : {'x':'bool','y':'bool','z':'bool','w':'bool'},
'uint' : {'x':'uint'},
'uvec2' : {'x':'uint','y':'uint'},
'uvec3' : {'x':'uint','y':'uint','z':'uint'},
'uvec4' : {'x':'uint','y':'uint','z':'uint','w':'uint'},
'mat2' : {
'[0][0]':'float', '[0][1]':'float',
'[1][0]':'float', '[1][1]':'float',
},
'mat3' : {
'[0][0]':'float', '[0][1]':'float', '[0][2]':'float',
'[1][0]':'float', '[1][1]':'float', '[1][2]':'float',
'[2][0]':'float', '[2][1]':'float', '[2][2]':'float',
},
'mat4' : {
'[0][0]':'float', '[0][1]':'float', '[0][2]':'float', '[0][3]':'float',
'[1][0]':'float', '[1][1]':'float', '[1][2]':'float', '[1][3]':'float',
'[2][0]':'float', '[2][1]':'float', '[2][2]':'float', '[2][3]':'float',
'[3][0]':'float', '[3][1]':'float', '[3][2]':'float', '[3][3]':'float',
},
}
DataMultiTypes = {
'vec' : ['vec2', 'vec3', 'vec4'],
'mat' : ['mat2', 'mat3', 'mat4'],
'ivec' : ['ivec2', 'ivec3', 'ivec4'],
'uvec' : ['uvec2', 'uvec3', 'uvec4'],
'bvec' : ['bvec2', 'bvec3', 'bvec4'],
'genType' : ['float', 'vec2', 'vec3', 'vec4'],
'genType3' : ['float', 'vec2', 'vec3'],
'intType' : ['int', 'ivec2', 'ivec3', 'ivec4'],
'uintType' : ['uint', 'uvec2', 'uvec3', 'uvec4'],
'boolType' : ['bool', 'bvec2', 'bvec3', 'bvec4'],
'intType3' : ['int', 'ivec2', 'ivec3'],
'samplerND' : ['sampler1D', 'sampler2D', 'sampler3D']
}
# http://mew.cx/glsl_quickref.pdf (bless) (probably outdated and bit unrelated to panda3d)
# https://www.khronos.org/files/opengl-quick-reference-card.pdf (better)
'''
holds all base instructions
'''
# for key, value in GLSL.items: # probably not needed in this way, but just in case
# 1 input and 1 output that are the same type
def simple_func(desc, func, types = ['genType']):
return {
'description' : desc,
'inputs' : {'a_': list(types)},
'outputs': {'result': list(types)},
'function' : f'result={func}(a_);'
}
def build_func(desc, func, names = 'abcdef', inputTypes = [['genType'],], outputTypes = ['genType']):
re = {
'description' : desc,
'inputs' : {},
'outputs': {'result': list(outputTypes)},
'function' : f'result={func}('
}
for i in range(len(inputTypes)):
v = f'{names[i]}{"_" * (len(names[i]) == 1)}'
re['inputs'].update({v : list(inputTypes[i])})
re['function'] += f'{"," * (i > 0)}{v}'
re['function'] += ');'
return re
# instructions like swizzle might need special nodes
# how do loops if needed?
'''
be careful with input/output names in the function, as they might replace the incorrect things
'INSTRUCTION NAME' : {
'description' : '', # text describing the instruction
'inputs' : { # lists all the inputs and their name
'INPUT NAME' : ['TYPE'] # input name and list of possible types (!!! Nth value in each array is paired together, even in output )
},
'outputs' : { # lists all the outputs and their name (there's almost always only one, but with 'out' there could be multiple)
'OUTPUT NAME' : ['TYPE'] # output name and list of possible types
},
'function : 'FUNCTION', # function used to put into GLSL (using str.replace using input names)
}
currently assumes there will be only one of DataMultiTypes in any ith inputs or outputs types
(there can be genType in all inputs and outputs, but no genType and vec together)
if there's only outputs, there should only be one
this can be changed, but would need reworking of ShaderNode.update_connections and what GLSL[]['funciton'] stores
'''
GLSL = {
# Arithmatic instructions
'Add' : {
'description' : 'Add two values.',
'inputs' : {'a_': ['genType', 'intType', 'uintType'],'b_': ['genType', 'intType', 'uintType']},
'outputs': {'result': ['genType', 'intType', 'uintType']},
'function' : 'result=a_+b_;'
},
'Subtract' : {
'description' : 'Subtract a value by another.',
'inputs' : {'a_': ['genType', 'intType', 'uintType'],'b_': ['genType', 'intType', 'uintType']},
'outputs': {'result': ['genType', 'intType', 'uintType']},
'function' : 'result=a_-b_;'
},
'Multiply' : {
'description' : 'Multiply two values.',
'inputs' : {
'a_': ['genType', 'intType', 'uintType', 'mat',
'float', 'int', 'uint',
'vec', 'vec', 'vec',
'float', 'int', 'uint',
'mat', 'mat', 'mat',
'vec', 'ivec', 'uvec',
'mat', 'mat', 'mat'
],
'b_': ['genType', 'intType', 'uintType', 'mat',
'vec', 'vec', 'vec',
'float', 'int', 'uint',
'mat', 'mat', 'mat',
'float', 'int', 'uint',
'mat', 'mat', 'mat',
'vec', 'ivec', 'uvec'
]},
'outputs': {
'result': ['genType', 'intType', 'uintType', 'mat', # v*v, s*s, m*m (n by n)
'vec', 'vec', 'vec', # s*v
'vec', 'vec', 'vec', # v*s
'mat', 'mat', 'mat', # s*m
'mat', 'mat', 'mat', # m*s
'vec', 'vec', 'vec', # v*m
'vec', 'vec', 'vec', # m*v
]},
'function' : 'result=a_*b_;'
},
'Divide' : {
'description' : 'Divide a value by another.',
'inputs' : {'a_': ['genType', 'intType', 'uintType'],'b_': ['genType', 'intType', 'uintType']},
'outputs': {'result': ['genType', 'intType', 'uintType']},
'function' : 'result=a_/b_;'
},
# Angle and trigonometry functions
'Sine' : simple_func('Sine math function.', 'sin'),
'Cosine' : simple_func('Cosine math function.', 'cos'),
'Tangent' : simple_func('Tangent math function.', 'tan'),
'Arc Sine' : simple_func('Arc sine math function.', 'asin'),
'Arc Cosine' : simple_func('Arc cosine math function.', 'acos'),
'Arc Tangent' : simple_func('Arc tangent math function.', 'atan'),
'Arc Tangent 2' : {
'description' : 'Arc tangent math function using\nseperate x and y inputs.',
'inputs' : {'y_': ['genType'], 'x_': ['genType']},
'outputs': {'result': ['genType']},
'function' : 'result=atan(y_,x_);'
},
'Hyperbolic Sine' : simple_func('Hyperbolic sine math function.', 'sinh'),
'Hyperbolic Cosine' : simple_func('Hyperbolic cosine math function.', 'cosh'),
'Hyperbolic Tangent' : simple_func('Hyperbolic tangent math function.', 'tanh'),
'Hyperbolic Arc Sine' : simple_func('Hyperbolic arc sine math function.', 'asinh'),
'Hyperbolic Arc Cosine' : simple_func('Hyperbolic arc cosine math function.', 'acosh'),
'Hyperbolic Arc Tangent' : simple_func('Hyperbolic arc tangent math function.', 'atanh'),
'Radians' : simple_func('Converts degrees to radians.', 'radians'),
'Degrees' : simple_func('Converts radians to degrees.', 'degrees'),
# Exponential functions
'Power' : build_func('Math function being a^b.', 'pow', inputTypes=[['genType'], ['genType']]),
'Exponent' : simple_func('Natural exponential.', 'exp'),
'Logarithm' : simple_func('Natural logarithm.', 'log'),
'Exponent 2' : simple_func('Finds 2^return = a', 'exp2'),
'Logarithm 2' : simple_func('logarithm of 2.', 'log2'),
'Square Root' : simple_func('Square root.', 'sqrt'),
'Inverse Square Root' : simple_func('Inverse square root.', 'inversesqrt'),
# Common functions
'Absolute' : simple_func('Removes the sign of a value.', 'abs', types=['genType', 'intType']),
'Sign' : simple_func('Gets the sign of a value', 'sign', types=['genType', 'intType']),
'Ceiling' : simple_func('Rounds the value(s) up.', 'ceil'),
'Floor' : simple_func('Rounds the value(s) down.', 'floor'),
'Round' : simple_func('Rounds the value(s)\nto nearest integer.', 'round'),
'Round Even' : simple_func('Rounds the value(s)\nto nearest even integer.', 'roundEven'),
'Truncate' : simple_func('Rounds towards zero', 'trunc'),
'Fraction' : simple_func('Gets the decimal values.', 'frac'),
'Modulous' : build_func('Gets the remainder of\n dividing by a value.', 'mod', inputTypes=[['genType','genType'], ['genType','float']], outputTypes=['genType','genType']),
# 'Modf' : { # currently out parameters have no funcitonality in instruction nodes
# 'description' : 'Seperates decimal from the value.',
# 'inputs' : {'a_': ['genType']},
# 'outputs' : {'integer_': ['genType'], 'decimal_': ['genType']},
# 'function' : 'integer_=modf(a_, out decimal_);'
# },
'Maximum' : build_func('Gets the maximum value(s).', 'max',
inputTypes=[['genType', 'genType', 'intType', 'intType', 'uintType', 'uintType'], ['genType', 'float', 'intType', 'int', 'uintType', 'uint']],
outputTypes=['genType', 'genType', 'intType', 'intType', 'uintType', 'uintType']),
'Minimum' : build_func('Gets the minimum value(s).', 'min',
inputTypes=[['genType', 'genType', 'intType', 'intType', 'uintType', 'uintType'], ['genType', 'float', 'intType', 'int', 'uintType', 'uint']],
outputTypes=['genType', 'genType', 'intType', 'intType', 'uintType', 'uintType']),
'Clamp' : build_func('Clamps a value or vector\nbetween a minimum and maximum.', 'clamp',
names=['input_', 'min_', 'max_'],
inputTypes=[['genType','genType', 'intType', 'intType', 'uintType', 'uintType'],
['genType','float', 'intType', 'int', 'uintType', 'uint'],
['genType','float', 'intType', 'int', 'uintType', 'uint']],
outputTypes=['genType','genType', 'intType', 'intType', 'uintType', 'uintType']),
'Interpolate' : build_func('Linearly interpolates between\n two values.', 'mix', names=['from_', 'to_', 'T_'],
inputTypes=[['genType','genType','genType','genType'], ['genType','genType','genType','genType'], ['genType','float','boolType','bool']],
outputTypes=['genType','genType','genType','genType']),
'Smooth Step' : build_func('Smoothly interpolates between 0 and 1.\n(I think?)', 'smoothstep', names=['edge_a_', 'edge_b_', 'x_'],
inputTypes=[['genType','float'], ['genType','float'], ['genType','genType']],
outputTypes=['genType','genType']),
'Step' : build_func('Gives 0 where x is smaller,\n otherwise gives 1.', 'step', names=['edge_', 'x_'],
inputTypes=[['genType','float'], ['genType','genType']], outputTypes=['genType','genType']),
'Is Not A Number' : build_func('For each component,\nreturns true if not a number.', 'isnan', inputTypes=[['genType'],], outputTypes=['boolType']),
'Is Infinite' : build_func('For each component,\nreturns true if infinite', 'isinf', inputTypes=[['genType'],], outputTypes=['boolType']),
# Geometric functions
'Cross Product' : build_func('Cross product of two vectors.', 'cross', inputTypes=[['vec3'], ['vec3']], outputTypes=['vec3']),
'Distance' : build_func('Distance between two values.', 'distance', inputTypes=[['genType'], ['genType']], outputTypes=['float']),
'Dot Product' : build_func('Dot product of two values.', 'dot', inputTypes=[['genType'], ['genType']], outputTypes=['float']),
'Face Forward' : build_func('Flips Direction of V\nif I and N face differently.', 'dot', names='VIN', inputTypes=[['genType'], ['genType'], ['genType']], outputTypes=['genType']),
'Length' : build_func('Length of the vector', 'length', inputTypes=[['genType']], outputTypes=['float']),
'Normalize' : build_func('Normalizes the vector.', 'normalize', inputTypes=[['genType']], outputTypes=['genType']),
'Reflect' : build_func('Reflects a vector.', 'reflect', names=['in_', 'normal_'], inputTypes=[['genType'], ['genType']], outputTypes=['genType']),
'Refract' : build_func('Refracts a vector.', 'refract', names=['in_', 'normal_', 'eta_'], inputTypes=[['genType'], ['genType'], ['float']], outputTypes=['genType']),
'ftransform':{
'description' : 'Invariant vertex transformation.',
'inputs' : {},
'outputs' : {'result': ['vec4']},
'function' : 'result=ftransform();'
},
# Fragment processing functions (Fragment shaders only)
'dFdx' : simple_func('Derivative in x.', 'dFdx'),
'dFdy' : simple_func('Derivative in y.', 'dFdy'),
'fwidth' : simple_func('Sum of the absolute derivative in x and y.', 'fwidth'),
# Matrix functions
'Matrix Comp Multiply' : build_func('Multiplies the components\n of two matricies.', 'matrixCompMult', inputTypes=[['mat'], ['mat']], outputTypes=['mat']),
'Outer Product Matrix' : build_func('Creates a matrix using\nthe output product of two vectors.', 'outerProduct', names='NM',
inputTypes=[['vec', 'vec2', 'vec2', 'vec3', 'vec3', 'vec4', 'vec4'],
['vec', 'vec3', 'vec4', 'vec2', 'vec4', 'vec2', 'vec3']],
outputTypes=['mat', 'mat3x2', 'mat4x2', 'mat2x3', 'mat4x3', 'mat2x4', 'mat3x4']),
'Transpose Matrix' : build_func('Transposes a matrix.', 'transpose', names=['matrix',],
inputTypes=[['mat', 'mat2x3', 'mat2x4', 'mat3x2', 'mat3x4', 'mat4x2', 'mat4x3']],
outputTypes=['mat', 'mat3x2', 'mat4x2', 'mat2x3', 'mat4x3', 'mat2x4', 'mat3x4']),
'Determinant' : build_func('Gets the Deteriminant of a matrix', 'determinant', names=['matrix',], inputTypes=[['mat'],], outputTypes=['float']),
'Inverse Matrix' : build_func('Inverses the matrix.', 'inverse', names=['matrix',], inputTypes=[['mat'],], outputTypes=['mat']),
# Vector Relational Functions
'Boolean All' : build_func('Returns true if all values are true.', 'all', inputTypes=[['bvec']], outputTypes=['bool']),
'Boolean Any' : build_func('Returns true if at least\none value is true.', 'any', inputTypes=[['bvec']], outputTypes=['bool']),
'Boolean Not' : build_func('Inverts the boolean vector.', 'not', inputTypes=[['bvec']], outputTypes=['bvec']),
'Equal' : build_func('For each component,\nreturns true if equal.', 'equal',
inputTypes=[['vec', 'ivec', 'bvec'], ['vec', 'ivec', 'bvec']], outputTypes=['bvec', 'bvec', 'bvec']),
'Not Equal' : build_func('For each component,\nreturns true if not equal.', 'notEqual',
inputTypes=[['vec', 'ivec', 'bvec'], ['vec', 'ivec', 'bvec']], outputTypes=['bvec', 'bvec', 'bvec']),
'Greater Than' : build_func('For each component,\nreturns true if greater than.', 'greaterThan',
inputTypes=[['vec', 'ivec'], ['vec', 'ivec']], outputTypes=['bvec', 'bvec']),
'Greater Than or Equal' : build_func('For each component,\nreturns true if greater than or equal to.', 'greaterThanEqual',
inputTypes=[['vec', 'ivec'], ['vec', 'ivec']], outputTypes=['bvec', 'bvec']),
'Less Than' : build_func('For each component,\nreturns true if less than.', 'lessThan',
inputTypes=[['vec', 'ivec'], ['vec', 'ivec']], outputTypes=['bvec', 'bvec']),
'Less Than or Equal' : build_func('For each component,\nreturns true if less than or equal to.', 'lessThanEqual',
inputTypes=[['vec', 'ivec'], ['vec', 'ivec']], outputTypes=['bvec', 'bvec']),
# noise functions (not supported by panda3d?)
# 'Noise Float' : build_func('Noise value as float', 'noise1', outputTypes=['float']),
# 'Noise Vec2' : build_func('Noise value as vec2', 'noise2', outputTypes=['vec2']),
# 'Noise Vec3' : build_func('Noise value as vec3', 'noise3', outputTypes=['vec3']),
# 'Noise Vec4' : build_func('Noise value as vec4', 'noise4', outputTypes=['vec4']),
# Texture lookup functions
# NOTE, currently only adding sampler#D and not usampler or isampler and missing some other types
'Texture Size LOD' : build_func('returns Texture Size', 'textureSize', names=['sampler_', 'lod_'],
inputTypes=[['samplerND', 'samplerCube', 'sampler1DShadow', 'sampler2DShadow'],
['int', 'int', 'int', 'int']],
outputTypes=['intType3', 'ivec2', 'int', 'ivec2']),
'Texture Sample' : build_func('Samples texture at coodinate.', 'texture', names=['sampler_', 'uv_'],
inputTypes=[['samplerND', 'samplerCube', 'sampler1DShadow', 'sampler2DShadow'],
['genType3', 'vec3', 'vec3', 'vec3']],
outputTypes=['vec4', 'vec4', 'float', 'float']),
'Texture Sample + Bias' : build_func('Samples texture at coodinate with bias.', 'texture', names=['sampler_', 'uv_', 'bias_'],
inputTypes=[['samplerND', 'samplerCube', 'sampler1DShadow', 'sampler2DShadow'],
['genType3', 'vec3', 'vec3', 'vec3'],
['float', 'float', 'float', 'float']],
outputTypes=['vec4', 'vec4', 'float', 'float']),
'Tex Sample + Projection' : build_func('Samples texture at coodinate\nwith projection.', 'textureProj', names=['sampler_', 'uv_'],
inputTypes=[['sampler1D', 'sampler1D', 'sampler2D', 'sampler2D', 'sampler3D', 'sampler1DShadow', 'sampler2DShadow'],
['vec2', 'vec4', 'vec3', 'vec4', 'vec4', 'vec4', 'vec4']],
outputTypes=['vec4', 'vec4', 'vec4', 'vec4', 'vec4', 'float', 'float']),
'Tex Sample + Proj + Bias' : build_func('Samples texture at coodinate\nwith projection.', 'textureProj', names=['sampler_', 'uv_', 'bias_'],
inputTypes=[['sampler1D', 'sampler1D', 'sampler2D', 'sampler2D', 'sampler3D', 'sampler1DShadow', 'sampler2DShadow'],
['vec2', 'vec4', 'vec3', 'vec4', 'vec4', 'vec4', 'vec4'],
['float', 'float', 'float', 'float', 'float', 'float', 'float']],
outputTypes=['vec4', 'vec4', 'vec4', 'vec4', 'vec4', 'float', 'float']),
}
def _get_range(a, b):
_keys = list(GLSL.keys())
return _keys[_keys.index(a):(_keys.index(b) + 1)]
GLSL_catagorized = {
'Arithmatic' : _get_range('Add','Divide'),
'Trigonometry' : _get_range('Sine', 'Degrees'),
'Exponential' : _get_range('Power', 'Inverse Square Root'),
'Common' : _get_range('Absolute', 'Is Infinite'),
'Geometric' : _get_range('Cross Product', 'ftransform'),
'Matrix' : _get_range('Matrix Comp Multiply', 'Inverse Matrix'),
'Logic' : _get_range('Boolean All', 'Less Than or Equal'),
# 'Noise' : _get_range('Noise Float', 'Noise Vec4'),
'Texture' : _get_range('Texture Size LOD', 'Tex Sample + Proj + Bias'),
'Other' : _get_range('dFdx', 'fwidth'),
}
#expands the genType for every version of the function
for inst in GLSL.values():
c = 0
l = list(inst['inputs'].values())
if(len(l) > 0):
c = len(l[0])
else:
c = len(list(inst['outputs'].values())[0])
for _ in range(c):
inouts = [('inputs', k, v.pop(0)) for k,v in inst['inputs'].items()]
inouts += [('outputs', k, v.pop(0)) for k,v in inst['outputs'].items()]
found = ''
for i in inouts:
if i[2] in DataMultiTypes.keys():
found = i[2]
break
if found != '':
for _ in range(len(inouts)):
orig = inouts.pop(0)
if orig[2] == found:
inouts += [(orig[0], orig[1], v) for v in DataMultiTypes[found]]
elif orig[2] in DataMultiTypes.keys() and len(DataMultiTypes[found]) == len(DataMultiTypes[orig[2]]):
inouts += [(orig[0], orig[1], v) for v in DataMultiTypes[orig[2]]]
else:
inouts += [orig for _ in DataMultiTypes[found]]
for v in inouts:
inst[v[0]][v[1]].append(v[2])
l = list(inst['inputs'].values())
if(len(l) > 0):
c = len(l[0])
else:
c = len(list(inst['outputs'].values())[0])
# remove duplicates, if input[i] == input[j]: input.pop(i), output.pop(i)
# only checks inputs, because there shouldn't be any duplicate input data types with different outputs data types
for i in range(c - 1, 0, -1):
keys_i = list(inst['inputs'].keys())
for j in range(i - 1):
dupe = True
for k in keys_i:
if inst['inputs'][k][i] != inst['inputs'][k][j]:
dupe = False
break
if dupe:
for k in keys_i:
inst['inputs'][k].pop(i)
for k in inst['outputs'].keys():
inst['outputs'][k].pop(i)
'''
( example )
'Clamp' : {
'description': 'Clamps a value or vector\nbetween a minimum and maximum.',
'inputs': {
'input': ['genType','genType'],
'min': ['genType','float'],
'max': ['genType','float']},
'outputs': {
'result': ['genType','genType']},
'function' : 'result=clamp(input,min,max);'
}
( turns into )
'Clamp' : {
'description': 'Clamps a value or vector\nbetween a minimum and maximum.',
'inputs': {
'input' : ['float', 'vec2', 'vec3', 'vec4', 'vec2', 'vec3', 'vec4'],
'min': ['float', 'vec2', 'vec3', 'vec4', 'float', 'float', 'float'],
'max': ['float', 'vec2', 'vec3', 'vec4', 'float', 'float', 'float']},
'outputs': {
'result': ['float', 'vec2', 'vec3', 'vec4', 'vec2', 'vec3', 'vec4']},
'function': 'result=clamp(input,min,max);'
}
( there ARE duplicates, don't think it matters for now )
'''
| 2.015625
| 2
|
WrapBigARTM/model.py
|
JessikaSmith/WrapBigARTM
| 1
|
12778509
|
<reponame>JessikaSmith/WrapBigARTM<filename>WrapBigARTM/model.py
# Currently only main and back topics and text modality are supported
import os
import numpy as np
import artm
from WrapBigARTM.scores import return_all_tokens_coherence
class Topic_model:
def __init__(self, experiments_path, S=100, decor_test=False):
# experiments_path:
self.model = None
self.S = S
self.specific = ['main{}'.format(i) for i in range(S)]
self.save_path = os.path.join(experiments_path, 'best_model')
self.decor_test = decor_test
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def set_params(self, params_string):
self.decor = params_string[0]
self.n1 = params_string[1]
if self.decor_test:
return
self.spb = params_string[2]
self.stb = params_string[3]
self.n2 = params_string[4]
self.sp1 = params_string[5]
self.st1 = params_string[6]
self.n3 = params_string[7]
# self.sp2 = params_string[8]
# self.st2 = params_string[9]
# self.n4 = params_string[10]
# self.B = params_string[11]
# self.decor_2 = params_string[14]
self.B = params_string[8]
# self.decor_2 = params_string[8]
def init_model(self, params_string, dict_path):
self.set_params(params_string)
self.back = ['back{}'.format(i) for i in range(self.B)]
self.dictionary = artm.Dictionary()
self.dictionary.load_text(dictionary_path=dict_path)
self.model = artm.ARTM(num_topics=self.S + self.B,
class_ids=['@default_class'],
dictionary=self.dictionary,
show_progress_bars=False,
# cache_theta=True,
topic_names=self.specific + self.back,
num_processors=32)
self.set_scores()
def save_model(self, ii):
self.model.dump_artm_model(os.path.join(self.save_path, 'model_{}'.format(ii)))
def set_scores(self):
self.model.scores.add(artm.PerplexityScore(name='PerplexityScore', dictionary=self.dictionary))
self.model.scores.add(
artm.SparsityPhiScore(name='SparsityPhiScore', class_id='@default_class', topic_names=self.specific))
self.model.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore', topic_names=self.specific))
# Fraction of background words in the whole collection
self.model.scores.add(
artm.BackgroundTokensRatioScore(name='BackgroundTokensRatioScore', class_id='@default_class'))
# Kernel characteristics
self.model.scores.add(
artm.TopicKernelScore(name='TopicKernelScore', class_id='@default_class', topic_names=self.specific,
probability_mass_threshold=0.5, dictionary=self.dictionary))
# Looking at top tokens
self.model.scores.add(artm.TopTokensScore(name='TopTokensScore', class_id='@default_class', num_tokens=100))
def train(self, batch_vectorizer):
if self.model is None:
print('Initialise the model first!')
return
self.model.regularizers.add(artm.DecorrelatorPhiRegularizer(name='decorr',
topic_names=self.specific, tau=self.decor))
# self.model.regularizers.add(artm.DecorrelatorPhiRegularizer(name='decorr_2',
# topic_names=self.back, tau=self.decor_2))
self.model.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=self.n1)
# if ((self.n2 != 0) and (self.B != 0)):
if (self.B != 0):
self.model.regularizers.add(artm.SmoothSparseThetaRegularizer(name='SmoothPhi',
topic_names=self.back, tau=self.spb))
self.model.regularizers.add(artm.SmoothSparseThetaRegularizer(name='SmoothTheta',
topic_names=self.back, tau=self.stb))
self.model.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=self.n2)
self.model.regularizers.add(artm.SmoothSparseThetaRegularizer(name='SparsePhi',
topic_names=self.specific, tau=self.sp1))
self.model.regularizers.add(artm.SmoothSparseThetaRegularizer(name='SparseTheta',
topic_names=self.specific, tau=self.st1))
self.model.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=self.n3)
# if (self.n4 != 0):
# self.model.regularizers['SparsePhi'].tau = self.sp2
# self.model.regularizers['SparseTheta'].tau = self.st2
# self.model.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=self.n4)
print('Training is complete')
def decor_train(self):
if self.model is None:
print('Initialise the model first')
return
self.model.regularizers.add(artm.DecorrelatorPhiRegularizer(name='decorr',
topic_names=self.specific, tau=self.decor))
def get_avg_coherence_score(self, mutual_info_dict, only_specific=True, for_individ_fitness=False):
coherences_main, coherences_back = return_all_tokens_coherence(self.model, S=self.S, B=self.B,
mutual_info_dict=mutual_info_dict)
if for_individ_fitness:
print('COMPONENTS: ', np.mean(list(coherences_main.values())), np.min(list(coherences_main.values())))
return np.mean(list(coherences_main.values())) + np.min(list(coherences_main.values()))
return np.mean(list(coherences_main.values()))
def print_topics(self):
res = self.model.score_tracker['TopTokensScore'].last_tokens
for i, topic in enumerate(self.model.topic_names):
print(topic)
print(" ".join(res[topic][:50]))
print()
def get_prob_mixture(self, batches_path):
theta_test = self.model.transform(batch_vectorizer=batches_path)
theta_test_trans = theta_test.T
theta_test_trans = theta_test_trans.sort_index()
return theta_test_trans
| 2.203125
| 2
|
idaes/tests/test_headers.py
|
OOAmusat/idaes-pse
| 0
|
12778510
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Test that headers are on all files
"""
# stdlib
from pathlib import Path
import os
# third-party
import pytest
import yaml
addheader_add = pytest.importorskip("addheader.add", reason="`addheader` package is not available")
@pytest.fixture
def package_root():
"""Determine package root.
"""
import idaes
return Path(idaes.__file__).parent
@pytest.fixture
def patterns(package_root):
"""Grab glob patterns from config file.
"""
conf_file = package_root.parent / "addheader.yml"
if not conf_file.exists():
print(f"Cannot load configuration file from '{conf_file}'. Perhaps this is not development mode?")
return None
with open(conf_file) as f:
conf_data = yaml.safe_load(f)
print(f"Patterns for finding files with headers: {conf_data['patterns']}")
return conf_data["patterns"]
@pytest.mark.unit
def test_headers(package_root, patterns):
if patterns is None:
print(f"ERROR: Did not get glob patterns: skipping test")
else:
# modify patterns to match the files that should have headers
ff = addheader_add.FileFinder(package_root, glob_patterns=patterns)
has_header, missing_header = addheader_add.detect_files(ff)
# ignore empty files (probably should add option in 'detect_files' for this)
nonempty_missing_header = list(filter(lambda p: p.stat().st_size > 0, missing_header))
#
if len(nonempty_missing_header) > 0:
pfx = str(package_root.resolve())
pfx_len = len(pfx)
file_list = ", ".join([str(p)[pfx_len + 1:] for p in nonempty_missing_header])
print(f"Missing headers from files under '{pfx}{os.path.sep}': {file_list}")
# uncomment to require all files to have headers
assert len(nonempty_missing_header) == 0
| 1.96875
| 2
|
Competitive Programming/System Design/Design Parking System.py
|
shreejitverma/GeeksforGeeks
| 2
|
12778511
|
'''https://leetcode.com/problems/design-parking-system/
1603. Design Parking System
Easy
451217Add to ListShare
Design a parking system for a parking lot. The parking lot has three kinds of parking spaces:
big, medium, and small, with a fixed number of slots for each size.
Implement the ParkingSystem class:
• ParkingSystem(int big, int medium, int small) Initializes object of the ParkingSystem class.
The number of slots for each parking space are given as part of the constructor.
• bool addCar(int carType) Checks whether there is a parking space of carType
for the car that wants to get into the parking lot.
carType can be of three kinds: big, medium, or small,
which are represented by 1, 2, and 3 respectively.
A car can only park in a parking space of its carType.
If there is no space available, return false, else park the car in that size space and return true.
Example 1:
Input
["ParkingSystem", "addCar", "addCar", "addCar", "addCar"]
[[1, 1, 0], [1], [2], [3], [1]]
Output
[null, true, true, false, false]
Explanation
ParkingSystem parkingSystem = new ParkingSystem(1, 1, 0);
parkingSystem.addCar(1); // return true because there is 1 available slot for a big car
parkingSystem.addCar(2); // return true because there is 1 available slot for a medium car
parkingSystem.addCar(3); // return false because there is no available slot for a small car
parkingSystem.addCar(1); // return false because there is no available slot for a big car. It is already occupied.
Constraints:
• 0 <= big, medium, small <= 1000
• carType is 1, 2, or 3
• At most 1000 calls will be made to addCar'''
# Time: O(1)
# Space: O(1)
class ParkingSystem(object):
def __init__(self, big, medium, small):
"""
:type big: int
:type medium: int
:type small: int
"""
self.__space = [0, big, medium, small]
def addCar(self, carType):
"""
:type carType: int
:rtype: bool
"""
if self.__space[carType] > 0:
self.__space[carType] -= 1
return True
return False
| 4.03125
| 4
|
aspects.py
|
alexeifigueroa/ASEPetSynthesizer
| 0
|
12778512
|
'''
Created on Feb 5, 2018
@author: <NAME>
'''
import CONSTANTS,time
def log_arguments(f):
"""
@f: Function to be wrapped with the logging of it's arguments
"""
def logger(*args,**kwargs):
"""
wrapping function, it logs the arguments of the decorated function
"""
if CONSTANTS.LOG_ARGUMENTS:
#print("Function "+f.__name__+" called:")
print("Positional Arguments ")
for a in args:
print(a)
print("keyword arguments ")
for k,v in kwargs.items():
print(k+" = "+v)
return f(*args,**kwargs)
return logger
def time_execution(f):
"""
@f: Function to be wrapped with the logging of it's execution time
"""
def timing_wrapper(*args,**kwargs):
"""
wrapping function, it logs the execution time of the decorated function
"""
if CONSTANTS.TIME_EXECUTION:
start=time.time()
f(*args,**kwargs)
if CONSTANTS.TIME_EXECUTION:
end=time.time()
print("Execution time: "+str(end-start)+" seconds.")
return timing_wrapper
| 3.609375
| 4
|
botlistbot/search.py
|
Rostgnom/BotListBot
| 66
|
12778513
|
<filename>botlistbot/search.py
import re
from peewee import fn
from botlistbot import settings
from botlistbot.models import Bot
from botlistbot.models import Category
from botlistbot.models import Keyword
from botlistbot.models.revision import Revision
def search_bots(query):
query = query.lower().strip()
split = query.split(' ')
# easter egg
if query in ('awesome bot', 'great bot', 'superb bot', 'best bot', 'best bot ever'):
return [Bot.by_username('@botlistbot')]
# exact results
where_query = (
(fn.lower(Bot.username).contains(query) |
fn.lower(Bot.name) << split |
fn.lower(Bot.extra) ** query) &
(Bot.revision <= Revision.get_instance().nr &
Bot.approved == True & Bot.disabled == False)
)
results = set(Bot.select().distinct().where(where_query))
# keyword results
keyword_results = Bot.select(Bot).join(Keyword).where(
(fn.lower(Keyword.name) << split) &
(Bot.revision <= Revision.get_instance().nr) &
(Bot.approved == True & Bot.disabled == False)
)
results.update(keyword_results)
# many @usernames
usernames = re.findall(settings.REGEX_BOT_ONLY, query)
if usernames:
try:
bots = Bot.many_by_usernames(usernames)
print([b.username for b in bots])
results.update(bots)
except Bot.DoesNotExist:
pass
return list(results)
def search_categories(query):
query = query.lower().strip()
categories = Category.select().where(
(fn.lower(Category.name).contains(query)) |
(fn.lower(Category.extra).contains(query))
)
return categories
| 2.390625
| 2
|
mae_envs/modules/__init__.py
|
bglick13/multi-agent-emergence-environments
| 1,317
|
12778514
|
from .module import *
from .util import *
| 1.140625
| 1
|
mopidy_mopify/mem.py
|
dirkgroenen/mopidy-mopify
| 504
|
12778515
|
<reponame>dirkgroenen/mopidy-mopify
queuemanager = None
localfiles = None
| 1.0625
| 1
|
nose_config.py
|
alazanman/py_epg_tests
| 0
|
12778516
|
# -*- coding: utf-8 -*-
# from nose.tools import *
# import nose
# from nose import *
# import pytest
import nose
import json
import os.path
import importlib
import jsonpickle
from time import sleep
from fixture.app import Application
from fixture.db import DbFixture
from fixture.rest import RestApi
config_file = None
config_file_name = "config_file.json"
app = None
db = None
rest = None
def load_config(file_name='config_file.json'):
global config_file
if config_file is None:
config_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
with open(config_file_path) as f:
config_file = json.load(f)
return config_file
def set_app():
global app
# browser = request.config_file.getoption("--browser")
# web_config = load_config(request.config.getoption("--config_file"))['web']
web_config = load_config(config_file_name)['web']
other_config = load_config(config_file_name)['other']
if app is None or not app.is_valid():
app = Application(browser=other_config['browser'], base_url=web_config['baseUrl'])
print 'SET_APP', app
app.session.ensure_login(web_config['username'], web_config['password'])
return app
def stop_app():
sleep(1)
if app:
app.session.ensure_logout()
app.destroy()
print 'STOP_APP', app
return app
def set_db():
global db
# db_config = load_config(request.config.getoption("--config"))['db']
db_config = load_config(config_file_name)['db']
if db is None or not db.is_valid():
db = DbFixture(database=db_config['database'], user=db_config['user'], password=db_config['password'], host=db_config['host'], port=db_config['port'])
print 'SET_DB', db
return db
def stop_db():
if db:
db.destroy()
print 'STOP_DB', db
return db
def set_rest():
global rest
# rest_config = load_config(request.config.getoption("--config"))['web']
web_config = load_config(config_file_name)['web']
# if restfixture is None or not restfixture.is_valid():
if rest is None:
rest = RestApi(base_url=web_config['baseUrl'])
print 'SET_REST', rest
rest.auth(web_config['username'], web_config['password'])
return rest
def stop_rest():
if rest:
rest.destroy()
print 'STOP_REST', rest
return rest
def check_ui():
# global config_file
return load_config(config_file_name)['other']['check_ui'] == 'True'
# def nosetests_addoption(parser):
# parser.add_option("--browser", action="store", default="chrome")
# parser.add_option("--target", action="store", default="target.json")
# parser.add_option("--check_ui", action="store_true")
# print nose.config.Config().options
# # nose.config.Config().options = {"--browser":"chrome"}
# print nose.config.Config().options
# parser = nose.config.Config().getParser()
# # parser.add_option("--browser")
# parser.add_option("--browser", action="store", default="chrome")
# parser.add_option("--config_file", action="store", default="config_file.json")
# parser.add_option("--check_ui", action="store_true", default=True)
# print nose.config.Config().options
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s" % file)) as f:
return jsonpickle.decode(f.read())
def write_generated_data(file_path, data_list):
json_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_path)
with open(json_file, "w") as f:
jsonpickle.set_encoder_options("json", indent=2)
f.write(jsonpickle.encode(data_list))
| 1.953125
| 2
|
Project 1 - Data Modeling with PostgreSQL/sql_queries.py
|
gitHub882000/Data-Engineering-Nanodegree
| 2
|
12778517
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id SERIAL PRIMARY KEY,
start_time TIMESTAMP NOT NULL REFERENCES time(start_time),
user_id INT NOT NULL REFERENCES users(user_id),
level VARCHAR,
song_id VARCHAR REFERENCES songs(song_id),
artist_id VARCHAR REFERENCES artists(artist_id),
session_id INT,
location TEXT,
user_agent TEXT
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id INT PRIMARY KEY,
first_name VARCHAR NOT NULL,
last_name VARCHAR NOT NULL,
gender CHAR(1) NOT NULL CHECK (gender = 'F' OR gender='M'),
level VARCHAR NOT NULL
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id VARCHAR PRIMARY KEY,
title VARCHAR NOT NULL,
artist_id VARCHAR NOT NULL REFERENCES artists(artist_id),
year INT NOT NULL,
duration FLOAT NOT NULL
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id VARCHAR PRIMARY KEY,
name VARCHAR NOT NULL,
location TEXT NOT NULL,
latitude FLOAT NOT NULL,
longitude FLOAT NOT NULL
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time TIMESTAMP PRIMARY KEY,
hour INT NOT NULL,
day INT NOT NULL,
week INT NOT NULL,
month INT NOT NULL,
year INT NOT NULL,
weekday VARCHAR NOT NULL
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays(start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
user_table_insert = ("""
INSERT INTO users(user_id, first_name, last_name, gender, level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id) DO
UPDATE SET level = EXCLUDED.level
""")
song_table_insert = ("""
INSERT INTO songs(song_id, title, artist_id, year, duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (song_id) DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artists(artist_id, name, location, latitude, longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (artist_id) DO NOTHING
""")
time_table_insert = ("""
INSERT INTO time(start_time, hour, day, week, month, year, weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (start_time) DO NOTHING
""")
# FIND SONGS
song_select = ("""
SELECT song_id, artist_id
FROM songs NATURAL JOIN artists
WHERE title=%s AND name=%s AND duration=%s
""")
# QUERY LISTS
create_table_queries = [user_table_create, artist_table_create, song_table_create, time_table_create, songplay_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
| 2.625
| 3
|
examples/shadowing2.py
|
joeldentici/python_stepper
| 1
|
12778518
|
(lambda x: lambda x: x + x)(5)(5)
| 1.414063
| 1
|
tests/testDifferences.py
|
yusufsarikaya/Image_Comparison
| 19
|
12778519
|
<filename>tests/testDifferences.py<gh_stars>10-100
###############################
#
# (c) <NAME> 2017
# Student No: C14714071
# Course: DT228
# Date: 14-10-2017
#
# Title: Testing methods for finding differences between images
import numpy as np
import cv2
import easygui
imagesPath = 'images/'
outputPath = 'output/'
fileExtension = '.jpg'
pcb1 = cv2.imread(imagesPath + 'pcb1.jpg')
pcb2 = cv2.imread(imagesPath + 'pcb2.jpg')
# pcb1 = cv2.fastNlMeansDenoisingColored(pcb1, None, 10, 10, 7, 21)
# pcb2 = cv2.fastNlMeansDenoisingColored(pcb2, None, 10, 10, 7, 21)
# pcb1 = cv2.bilateralFilter(pcb1, 9, 75, 75)
# pcb2 = cv2.bilateralFilter(pcb2, 9, 75, 75)
img1Size = pcb1.shape
img2Size = pcb2.shape
def getDistance(x1, y1, x2, y2):
# return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
return ((x1 - x2)**2 + (y1 - y2)**2)**(1/2.0)
def getMatches(img1, img2):
akaze = cv2.AKAZE_create()
kp1, desc1 = akaze.detectAndCompute(img1, None)
kp2, desc2 = akaze.detectAndCompute(img2, None)
bf = cv2.BFMatcher(normType = cv2.NORM_HAMMING, crossCheck = True)
matches = bf.match(desc1, desc2)
matches = sorted(matches, key = lambda match:match.distance)
matchedCoordinates = []
for match in matches:
keyPoint1 = kp1[match.queryIdx]
keyPoint2 = kp2[match.trainIdx]
currentMatch = {
'pt1': {
'x': keyPoint1.pt[0],
'y': keyPoint1.pt[1]
},
'pt2': {
'x': keyPoint2.pt[0],
'y': keyPoint2.pt[1]
},
'distance': match.distance
}
matchedCoordinates.append(currentMatch)
return matchedCoordinates
def getDifferences(img1, img2):
akaze = cv2.AKAZE_create()
kp1, desc1 = akaze.detectAndCompute(img1, None)
kp2, desc2 = akaze.detectAndCompute(img2, None)
# https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
bf = cv2.BFMatcher(normType = cv2.NORM_HAMMING, crossCheck = True)
matches = bf.match(desc1, desc2)
for match in matches:
kp1[match.queryIdx] = None
kp2[match.trainIdx] = None
differences1 = []
differences2 = []
for keyPoint in kp1:
if keyPoint != None:
currentKP = {
'x': keyPoint.pt[0],
'y': keyPoint.pt[1]
}
differences1.append(currentKP)
for keyPoint in kp2:
if keyPoint != None:
currentKP = {
'x': keyPoint.pt[0],
'y': keyPoint.pt[1]
}
differences2.append(currentKP)
return (differences1, differences2)
# matches = getMatches(pcb1, pcb2)
(img1Dif, img2Dif) = getDifferences(pcb1, pcb2)
mask = np.zeros((img1Size[0], img1Size[1], 1), np.uint8)
mask[:, :] = 0
for dif in img1Dif:
mask[dif['y'], dif['x']] = 255
cv2.imwrite(outputPath + 'mask1' + fileExtension, mask)
# shape = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
shape = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
shape = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
mask = cv2.erode(mask, shape, iterations = 1)
mask = cv2.dilate(mask, shape, iterations = 10)
cv2.imshow('Mask', mask)
cv2.waitKey(0)
cv2.imwrite(outputPath + 'mask2' + fileExtension, mask)
_, contours, _ = cv2.findContours(image = mask.copy(), mode = cv2.RETR_TREE, method = cv2.CHAIN_APPROX_NONE)
bestContour = contours[0]
maxArea = 0
for contour in contours:
# [1] OpenCV, 'Contour Approximation', 2015. [Online].
# Available: http://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
# [Accessed: 2017-10-20]
arcPercentage = 0.01
epsilon = cv2.arcLength(curve = contour, closed = True) * arcPercentage
corners = cv2.approxPolyDP(curve = contour, epsilon = epsilon, closed = True)
x, y, w, h = cv2.boundingRect(points = corners)
currentArea = w * h
if currentArea > maxArea:
maxArea = currentArea
bestContour = corners
# Ignore points
if currentArea > 1:
cv2.rectangle(pcb1, (x, y), (x + w, y + h), (0, 0, 255), 3)
# x, y, w, h = cv2.boundingRect(points = bestContour)
# cv2.rectangle(pcb1, (x, y), (x + w, y + h), (0, 0, 255), 3)
# cv2.imshow('mask', mask)
cv2.imshow('pcb1', pcb1)
cv2.imwrite(outputPath + 'diffs' + fileExtension, pcb1)
# cv2.imshow('pcb2', pcb2)
cv2.waitKey(0)
# Creating a mask with white pixels
# mask = np.zeros((img1Size[0], img1Size[1], 1), np.uint8)
# mask[:, :] = 0
# for match in matches:
# pt2 = match['pt2']
# mask[int(pt2['y']), int(pt2['x'])] = 255
# Useful for combining pixels
# shape = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 10))
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
# shape = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 30))
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
# shape = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30, 10))
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
# shape = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 30))
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, shape)
# mask = cv2.bitwise_not(mask)
# _, contours, h = cv2.findContours(image = mask.copy(), mode = cv2.RETR_TREE, method = cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(pcb2, contours, -1, (0, 0, 255), 3)
# WORKS ???
# TODO Invert colours so the differences are detected instead of the same things
# imgArea = img2Size[0] * img2Size[1]
# maxArea = 0
# bestContour = contours[0]
# for contour in contours:
# arcPercentage = 0.01
# epsilon = cv2.arcLength(curve = contour, closed = True) * arcPercentage
# corners = cv2.approxPolyDP(curve = contour, epsilon = epsilon, closed = True)
# x, y, w, h = cv2.boundingRect(points = corners)
# currentArea = w * h
#
# if currentArea < imgArea and maxArea < currentArea:
# maxArea = currentArea
# bestContour = corners
#
# cv2.rectangle(pcb2, (x, y), (x + w, y + h), (0, 0, 255), 3)
# cv2.drawContours(pcb2, bestContour, -1, (0, 0, 255), 3)
# x, y, w, h = cv2.boundingRect(points = bestContour)
# cv2.rectangle(pcb2, (x, y), (x + w, y + h), (0, 0, 255), 3)
# newImg = pcb2[y : y + h, x : x + w]
# newMask = mask[y : y + h, x : x + w]
#
# _, contours, h = cv2.findContours(image = newMask.copy(), mode = cv2.RETR_TREE, method = cv2.CHAIN_APPROX_NONE)
# for contour in contours:
# arcPercentage = 0.01
# epsilon = cv2.arcLength(curve = contour, closed = True) * arcPercentage
# corners = cv2.approxPolyDP(curve = contour, epsilon = epsilon, closed = True)
# x, y, w, h = cv2.boundingRect(points = corners)
# cv2.rectangle(newImg, (x, y), (x + w, y + h), (0, 0, 255), 3)
#
# cv2.imshow('mask', newMask)
# cv2.imshow('img', newImg)
# _, contours, hierarchy = cv2.findContours(image = mask.copy(), mode = cv2.RETR_CCOMP, method = cv2.CHAIN_APPROX_NONE)
# for i, contour in enumerate(contours):
# if hierarchy[0][i][0] > 0:
# cv2.drawContours(pcb2, contour, -1, (0, 0, 255), 3)
# cv2.imshow('mask', mask)
# # cv2.imshow('pcb1', pcb1)
# cv2.imshow('pcb2', pcb2)
# cv2.waitKey(0)
| 3
| 3
|
ctutils/driver/parallel_free_flame.py
|
Combustion-Zhen/pyutils
| 0
|
12778520
|
<reponame>Combustion-Zhen/pyutils
import numpy as np
from mpi4py import MPI
from pyutils.ctutils.driver.chem_uq import free_flame
import matplotlib.pyplot as plt
# parallel init
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
n = 10
sl, dl = free_flame(n, 'H2', T=300, p=101325., phi=0.6)
# data collection
sendbuf = sl
recvbuf = None
if rank == 0:
recvbuf = np.empty([size, n])
comm.Gather(sendbuf, recvbuf, root=0)
sl_all = recvbuf
sendbuf = dl
recvbuf = None
if rank == 0:
recvbuf = np.empty([size, n])
comm.Gather(sendbuf, recvbuf, root=0)
dl_all = recvbuf
if rank == 0:
np.savez('free_flame', sl=sl_all.flatten(), dl=dl_all.flatten())
"""
if rank == 0:
print(sl_all)
print(dl_all)
plt.scatter(dl_all, sl_all)
plt.savefig('fig_test.png')
"""
| 2.015625
| 2
|
util.py
|
samirsen/image-generator
| 10
|
12778521
|
'''
util.py
'''
import os.path
import h5py
import numpy as np
import constants
import skimage.io
import skimage.transform
from scipy.io import loadmat
import glob
import os
import cPickle as pickle
import torch
from itertools import izip_longest
from glove import Glove
import torch
import torch.nn as nn
# Makes the directories of they don't already exist
def make_directories():
output_path = constants.SAVE_PATH
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Made output directory")
else:
print("WARNING: starting training with an existing outputs directory")
if not os.path.exists(output_path + 'weights/'):
os.makedirs(output_path + 'weights/')
print("Made weights directory")
if not os.path.exists(output_path + 'images/'):
os.makedirs(output_path + 'images/')
print("Made images directory")
# Loads a map from image file names to 'test', 'train', or 'val'
# Used in other functions to split data
def load_dataset_map():
ids = loadmat('data_constants/setid.mat')
# Flip train and test examples since otherwise there would be 6000 test
train_ids = ids['tstid'][0] - 1
test_ids = ids['trnid'][0] - 1
val_ids = ids['valid'][0] - 1
print(len(train_ids), len(val_ids), len(test_ids), "Train, val, test examples, respectively")
filenames = [name for name in os.listdir('Data/' + constants.ENTIRE_DATASET) if name.endswith('.jpg')]
image_paths = sorted(filenames)
dataset_map = {}
for i, name in enumerate(image_paths):
if i in train_ids:
dataset_map[name] = 'train'
elif i in test_ids:
dataset_map[name] ='test'
elif i in val_ids:
dataset_map[name] ='val'
else:
print("Invalid ID!")
return dataset_map
def load_flowers_capt_dict():
"""Use pickle to load the flowers captions"""
flowers_capt_dict = pickle.load(open( constants.FLOWERS_CAP_DICT, "rb" ))
return flowers_capt_dict
def load_coco_capt_dict():
"""Use pickle to load the MSCOCO captions"""
coco_capt_dict = pickle.load(open(constants.COCO_CAP_DICT, "rb"))
return coco_capt_dict
# Adapted from https://github.com/paarthneekhara/text-to-image
# Takes the directoy and file name of the hdf5 file that contains the word vectors
# Returns a dict from image to list of captions
def load_text_vec(directory, file_name, dataset_map):
h = h5py.File(os.path.join(directory, file_name))
train_captions, val_captions, test_captions = {}, {}, {}
for item in h.iteritems():
name = item[0]
if dataset_map[name] == 'train':
train_captions[name] = np.array(item[1])
elif dataset_map[name] =='val':
val_captions[name] = np.array(item[1])
elif dataset_map[name] =='test':
test_captions[name] = np.array(item[1])
else:
print("Invalid name")
return train_captions, val_captions, test_captions
# Gets images for the main function
def get_images(directory, file_name, save_path):
if os.path.exists(save_path):
image_dicts = torch.load(save_path)
train_image_dict, val_image_dict, test_image_dict = image_dicts
print("Loaded images")
else:
print("Loading images and separating into train/val/test sets")
path = os.path.join(directory, file_name)
filenames = train_captions.keys() + val_captions.keys() + test_captions.keys()
train_image_dict, val_image_dict, test_image_dict = util.load_images(path, filenames, dataset_map)
image_dicts = [train_image_dict, val_image_dict, test_image_dict]
torch.save(image_dicts, save_path)
return train_image_dict, val_image_dict, test_image_dict
# Takes in the directory and a list of file names and returns a dict of file name -> images
def load_images(directory, filenames, dataset_map):
train_image_dict, val_image_dict, test_image_dict = {}, {}, {}
for name in filenames:
image_file = os.path.join(directory + name)
curr_image = skimage.io.imread(image_file)
# Resize image to correct size as float 32
resized_image = skimage.transform.resize(curr_image, (constants.IMAGE_SIZE, constants.IMAGE_SIZE)).astype('float32')
if dataset_map[name] =='train':
train_image_dict[name] = resized_image
elif dataset_map[name] =='val':
val_image_dict[name] = resized_image
elif dataset_map[name] =='test':
test_image_dict[name] = resized_image
else:
print("Invalid name")
return train_image_dict, val_image_dict, test_image_dict
# custom weights initialization called on netG and netD
# from https://github.com/pytorch/examples/blob/master/dcgan/main.py
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Embedding') != -1:
m.weight.data.fill_(1.0)
elif classname.find('LSTM') != -1:
nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0)
def preprocess2(batch_input):
"""Inputs for self.embeddings in TextModel(). Batch_input must be numpy padded"""
batch_size, sent_len = batch_input.shape
offsets = [sent_len * i for i in range(batch_size)]
return batch_input.flatten(), offsets
def preprocess(batch_input):
"""If batch_input isn't numpy"""
glove = Glove()
flatten, offsets = [], []
index = 0
for ex in batch_input:
ex = ex.replace(',', ' ')
words = ex.strip('.').split()
result = []
for w in words:
try:
idx = glove.get_index(w)
result.append(idx)
except:
continue
# words = [glove.get_index(word) for word in words]
offsets.append(index)
flatten.extend(result)
index += len(result)
return torch.LongTensor(flatten), torch.LongTensor(offsets)
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
# From https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
# Iterates over an array in chunks
def grouper(array, n):
args = [iter(array)] * n
return izip_longest(*args)
# Show the generated image improves over time
def print_images(generated):
for img in generated:
image_done = img.data.numpy()
swap_image = np.swapaxes(image_done,1,2)
swap_image = np.swapaxes(swap_image,2,3)
plt.imshow(swap_image[0])
plt.show()
def get_text_description(text_caption_dict, batch_keys):
g_idx = [np.random.randint(len(text_caption_dict[batch_keys[0]])) for i in range(len(batch_keys))]
g_text_des = np.array([text_caption_dict[k][i] for k,i in zip(batch_keys, g_idx)])
# g_text_des = np.expand_dims(g_text_des, axis=0) ONLY NEED FOR 1 DIM
return g_text_des
def choose_wrong_image(image_dict, batch_keys):
wrong_image = []
for k in batch_keys:
wrong_key = np.random.choice(image_dict.keys())
while wrong_key == k:
wrong_key = np.random.choice(image_dict.keys())
wrong_image.append(image_dict[wrong_key])
wrong_image = np.array(wrong_image)
wrong_image = augment_image_batch(wrong_image)
wrong_image = np.swapaxes(wrong_image, 2, 3)
wrong_image = np.swapaxes(wrong_image, 1, 2)
return wrong_image
# Finds the real image for the given batch data
def choose_real_image(image_dict, batch_keys):
real_img = np.array([image_dict[k] for k in batch_keys])
real_img = augment_image_batch(real_img)
real_img = np.swapaxes(real_img, 2, 3)
real_img = np.swapaxes(real_img, 1, 2)
return real_img
def augment_image_batch(images):
batch_size = images.shape[0]
for i in range(batch_size):
curr = images[i, :, :, :]
if np.random.rand() > .5:
curr = np.flip(curr, 1)
images[i, :, :, :] = curr
return images
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
| 2.1875
| 2
|
setup.py
|
flyingcircusio/pinner
| 0
|
12778522
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pinner',
version='0.1.0',
description='',
long_description=long_description,
url='',
author='<NAME>',
author_email='<EMAIL>',
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
'Programming Language :: Python :: 3',
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
python_requires='>=3.5, <4',
install_requires=[
'github-api-v3',
'py-dotenv'
],
extras_require={
'dev': ['check-manifest'],
'test': ['pytest']
},
tests_require=['pytest'],
setup_requires=['pytest-runner'],
entry_points={
'console_scripts': [
'pinner=pinner.main:main',
],
},
)
| 1.492188
| 1
|
erin/db/abc.py
|
DiscordFederation/Enigma
| 6
|
12778523
|
from abc import ABC, abstractmethod
class DatabaseDriverBase(ABC):
"""
Force implementation of some methods in the event a new driver is
added to the database package.
"""
@abstractmethod
async def upsert(self, entity, states):
pass
@abstractmethod
async def get(self, entity, state):
pass
@abstractmethod
async def increment(self, entity, state, value):
pass
| 3.234375
| 3
|
kodi_watched_2_plex.py
|
stevezau/kodi_watched_2_plex
| 0
|
12778524
|
#!/usr/bin/env python
import re
import logging
import argparse
import requests
from plexapi.myplex import MyPlexAccount
logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.getLogger('plexapi').setLevel(logging.CRITICAL)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("kodi_api_url", type=str, help="Kodi API URL IE: http://192.168.0.190:8080")
parser.add_argument("plex_username", type=str, help="Plex Account Username")
parser.add_argument("plex_password", type=str, help="Plex Account Password")
parser.add_argument("plex_server_name", type=str, help="Plex Server Name IE: media")
def get_json(rsp):
rsp.raise_for_status()
data = rsp.json()
if 'error' in data:
raise Exception('Kodi API Error: %s', data['error']['message'])
return data.get('result', {})
def get_movies(api_url):
payload = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetMovies',
'filter': {'field': 'playcount', 'operator': 'greaterthan', 'value': '0'},
'params': {'properties': ['playcount', 'imdbnumber', 'lastplayed']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload))
return dict((m['imdbnumber'], m) for m in data.get('movies', []))
def get_tv(api_url):
tv_shows = {}
payload_tv = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetTVShows',
'params': {'properties': ['uniqueid']},
'id': 'libTVShows'
}
data = get_json(requests.post(api_url, json=payload_tv))
tv_shows_data = dict((m['tvshowid'], m) for m in data.get('tvshows', []))
payload_ep = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetEpisodes',
'params': {'properties': ['season', 'episode', 'uniqueid', 'playcount', 'tvshowid']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload_ep))
for ep in data.get('episodes', []):
tvdb_id = tv_shows_data.get(ep['tvshowid'], {}).get('uniqueid', {}).get('unknown')
if not tvdb_id:
continue
if tvdb_id not in tv_shows:
tv_shows[tvdb_id] = {}
tv_show = tv_shows[tvdb_id]
if ep['season'] not in tv_show:
tv_show[ep['season']] = {}
tv_show_season = tv_show[ep['season']]
tv_show_season[ep['episode']] = ep
return tv_shows
if __name__ == '__main__':
args = parser.parse_args()
kodi_api_url = '%s/jsonrpc' % args.kodi_api_url.rstrip('/')
plex = None
try:
account = MyPlexAccount(args.plex_username, args.plex_password)
plex = account.resource(args.plex_server_name).connect()
except Exception as e:
log.exception('Error connecting to Plex %s' % str(e))
exit(1)
# TVShows
try:
log.info('Getting Kodi Episodes List')
kodi_episodes = get_tv(kodi_api_url)
log.info('Getting Plex TVShows')
plex_episodes = plex.library.section('TV Shows').search(unwatched=True, libtype='episode')
log.info('Sorting through Plex Episodes to detect watched from Kodi')
for epsiode in plex_episodes:
# Only support TheTVDB parsed shows
tvdb_match = re.search(r'thetvdb://([0-9]+)/', epsiode.guid)
if tvdb_match:
kodi_ep = kodi_episodes.get(tvdb_match.group(1), {}).get(epsiode.seasonNumber, {}).get(epsiode.index)
if kodi_ep:
if kodi_ep.get('playcount') > 0 and not epsiode.isWatched:
log.info('Marking epsiode %s S%sE%s as watched' %
(epsiode.grandparentTitle, epsiode.seasonNumber, epsiode.index))
epsiode.markWatched()
except Exception as e:
log.exception('Error processing TVShows %s' % str(e))
exit(1)
# Movies
try:
log.info('Getting Kodi Movie List')
kodi_movies = []
kodi_movies = get_movies(kodi_api_url)
log.info('Getting Plex Movies')
plex_movies = plex.library.section('Movies').search(unwatched=True)
log.info('Sorting through Plex Movies to detect watched from Kodi')
for movie in plex_movies:
# Only support IMDB parsed movies
imdb_match = re.search(r'((?:nm|tt)[\d]{7})', movie.guid)
if imdb_match:
imdb_id = imdb_match.group(1)
kodi_movie = kodi_movies.get(imdb_id)
if kodi_movie:
if kodi_movie.get('playcount') > 0 and not movie.isWatched:
log.info('Marking movie %s as watched' % movie.title)
movie.markWatched()
except Exception as e:
log.critical('Error processing Movies %s' % str(e))
exit(1)
| 2.53125
| 3
|
code/Experiments/Tutorials/nn-from-scratch/Stanford_linearClassifier.py
|
matthijsvk/convNets
| 53
|
12778525
|
import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in xrange(K):
ix = range(N*j,N*(j+1))
r = np.linspace(0.0,1,N) # radius
t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
# lets visualize the data:
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
# Train a Linear Classifier
# initialize parameters randomly
W = 0.01 * np.random.randn(D, K)
b = np.zeros((1, K))
# some hyperparameters
step_size = 1e-0
reg = 1e-3 # regularization strength
# gradient descent loop
num_examples = X.shape[0]
for i in xrange(200):
# evaluate class scores, [N x K]
scores = np.dot(X, W) + b
# compute the class probabilities
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
# compute the loss: average cross-entropy loss and regularization
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs) / num_examples
reg_loss = 0.5 * reg * np.sum(W * W)
loss = data_loss + reg_loss
if i % 10 == 0:
print "iteration %d: loss %f" % (i, loss)
# compute the gradient on scores
dscores = probs
dscores[range(num_examples), y] -= 1
dscores /= num_examples
# backpropate the gradient to the parameters (W,b)
dW = np.dot(X.T, dscores)
db = np.sum(dscores, axis=0, keepdims=True)
dW += reg * W # regularization gradient
# perform a parameter update
W += -step_size * dW
b += -step_size * db
# evaluate training set accuracy
scores = np.dot(X, W) + b
predicted_class = np.argmax(scores, axis=1)
print 'training accuracy: %.2f' % (np.mean(predicted_class == y))
# plot the resulting classifier
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(np.c_[xx.ravel(), yy.ravel()], W) + b
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
#fig.savefig('spiral_linear.png')
| 3.46875
| 3
|
articles/imp/genfigs/flights_pdp.py
|
parrt/stratx
| 54
|
12778526
|
<reponame>parrt/stratx
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestRegressor
from timeit import default_timer as timer
from sklearn.utils import resample
import shap
from support import *
from stratx.featimp import *
from stratx.partdep import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.set_printoptions(precision=2, suppress=True, linewidth=300)#, threshold=1e10)
np.random.seed(3)
X, y, X_train, X_test, y_train, y_test = load_dataset("flights", "ARRIVAL_DELAY")
print(f"Avg arrival delay {y.mean()}")
# plt.scatter(X['DEPARTURE_TIME'], y, s=1, alpha=.5)
# plt.plot([0,np.max(X['DEPARTURE_TIME'])], [0,0], c='k', lw=.5)
# plt.xlabel("DEPARTURE_TIME")
# plt.ylabel("ARRIVAL_DELAY")
# plt.show()
#
# plt.scatter(X['SCHEDULED_DEPARTURE'], y, s=1, alpha=.5)
# plt.plot([0,np.max(X['SCHEDULED_DEPARTURE'])], [0,0], c='k', lw=.5)
# plt.xlabel("SCHEDULED_DEPARTURE")
# plt.ylabel("ARRIVAL_DELAY")
# plt.show()
#
I = importances(#X, y,
X_train, y_train,
catcolnames={'AIRLINE',
'ORIGIN_AIRPORT',
'DESTINATION_AIRPORT',
'FLIGHT_NUMBER',
'TAIL_NUMBER',
'DAY_OF_WEEK'},
normalize=False,
n_trials=1,
min_samples_leaf=20,
cat_min_samples_leaf=20
)
print(I)
exit()
#
# col = 'ORIGIN_AIRPORT'
# col = 'SCHEDULED_DEPARTURE'
# col = 'TAXI_OUT'
# col = 'FLIGHT_NUMBER'
# plot_stratpd_gridsearch(X, y, colname=col, targetname='delay',
# show_slope_lines=False,
# min_samples_leaf_values=(15,20,30),
# # min_slopes_per_x_values=(5,10,15,20),
# # min_samples_leaf=10,
# n_trials=10,
# show_impact=True,
# show_x_counts=True,
# # min_slopes_per_x=1
# )
# plt.show()
# df_test = pd.read_csv(f'data/flights-test.csv')
# X = df_test.drop('ARRIVAL_DELAY', axis=1)
# y = df_test['ARRIVAL_DELAY']
# print(f"Avg arrival delay {y.mean()}, sigma={np.std(y)}")
# col = 'AIR_TIME'
# plot_stratpd(X, y, colname=col, targetname='delay',
# min_samples_leaf=20,
# n_trials=1,
# show_impact=True,
# show_x_counts=True,
# yrange=(-20, 140)
# # min_slopes_per_x=1
# )
# plt.show()
# exit()
# for i in range(10):
# np.random.seed(i)
#
# col = 'ORIGIN_AIRPORT'
# uniq_catcodes, avg_per_cat, ignored, merge_ignored = \
# plot_catstratpd(X, y, colname=col, targetname='delay',
# leftmost_shifted_to_zero=False,
# min_y_shifted_to_zero=False,
# min_samples_leaf=5,
# n_trials=1,
# show_xticks=False,
# show_all_pdp=False,
# show_impact=True,
# yrange=(-50,450),
# figsize=(20,10))
#
# abs_avg = np.abs(avg_per_cat)
# a, b = np.nanmin(avg_per_cat), np.nanmax(avg_per_cat)
# m = np.nanmean(abs_avg)
# straddle_mean = np.nanmean(np.abs(avg_per_cat - np.nanmean(avg_per_cat)))
# print(f"mean {np.nanmean(avg_per_cat):6.1f}, abs mean {m:5.1f}, {straddle_mean :5.1f}, range {a:5.1f}..{b:5.1f} = {(b - a):5.1f}")
# plt.tight_layout()
# plt.savefig(f"/Users/parrt/Desktop/flight-{col}.pdf", pad_inches=0)
# plt.show()
# plot_catstratpd_gridsearch(X, y, 'ORIGIN_AIRPORT', 'ARRIVAL_DELAY',
# min_samples_leaf_values=(2, 5, 10, 15, 20, 30, 35, 40),
# show_all_cat_deltas=False, show_impact=True,
# show_xticks=False)
# plt.show()
# exit()
colname = 'TAIL_NUMBER'
uniq_catcodes, combined_avg_per_cat, ignored = \
plot_catstratpd(X, y, colname, 'ARRIVAL_DELAY',
min_samples_leaf=2,
yrange=(-125,125),
figsize=(14,4),
n_trials=1,
min_y_shifted_to_zero=False,
show_unique_cat_xticks=False,
show_impact=True,
verbose=False)
print("IGNORED", ignored)
plt.tight_layout()
# plt.savefig(f"/Users/parrt/Desktop/flight-fnum-cat-most_common.pdf", pad_inches=0)
plt.show()
# plot_stratpd_gridsearch(X, y, 'TAXI_IN', 'ARRIVAL_DELAY',
# min_samples_leaf_values=(3,5,10,15),
# min_slopes_per_x_values=(15,20,25,30,40,50),
# show_slope_lines=False,
# yrange=(-10,90)
# )
# I = importances(X, y,
# catcolnames={'AIRLINE',
# 'ORIGIN_AIRPORT',
# 'DESTINATION_AIRPORT',
# 'FLIGHT_NUMBER',
# 'DAY_OF_WEEK'},
# min_samples_leaf=5,
# cat_min_samples_leaf=2,
# n_trials=1,
# normalize=False)
# print(I)
# plot_stratpd(X, y, colname=col, targetname='delay',
# show_slope_lines=False,
# min_samples_leaf=5,
# n_trials=3,
# show_impact=False,
# show_x_counts=True,
# )
# # yrange=(-10,100))
# plt.tight_layout()
# plt.savefig(f"/Users/parrt/Desktop/{col}.pdf", pad_inches=0)
# plt.show()
# plot_stratpd_gridsearch(X, y, 'DEPARTURE_TIME', 'ARRIVAL_DELAY',
# show_slope_lines=False,
# show_impact=True)
# plt.tight_layout()
# plt.savefig(f"/Users/parrt/Desktop/flight-dep-time-4.pdf", pad_inches=0)
# plt.show()
# I = spearmans_importances(X, y)
# print(I)
# plot_stratpd_gridsearch(X, y, col, 'ARRIVAL_DELAY',
# min_samples_leaf_values=(10,15,20,30),
# min_slopes_per_x_values=(5,10,15,20,25),
# show_slope_lines=False,
# yrange=(-10,90)
# )
# #
# plot_catstratpd(X, y, 'SCHEDULED_DEPARTURE_HOUR', 'ARRIVAL_DELAY',
# min_samples_leaf=10,
# # sort=None,
# yrange=(-110,250),
# show_xticks=False,
# style='scatter')
# plt.title(f"X range {r[0]}..{r[1]} with {n} records")
# I = importances(X, y,
# min_samples_leaf=10, # default
# min_slopes_per_x=20,
# catcolnames={'AIRLINE',
# 'ORIGIN_AIRPORT', 'DESTINATION_AIRPORT',
# 'FLIGHT_NUMBER',
# 'DAY_OF_WEEK', 'dayofyear'},
# )
# print(I)
# plt.tight_layout()
# # rent_pdp()
# plt.show()
| 2.375
| 2
|
planner/web/api.py
|
anzodev/planner
| 0
|
12778527
|
<gh_stars>0
import logging
from flask import Blueprint
from flask import Flask
from flask import Response
from flask import abort
from flask import current_app
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import HTTPException
from planner.core.models import Task
from planner.core.models import User
from planner.core.usecase import tasks as task_usecase
from planner.core.usecase import users as user_usecase
from planner.web import app_runtime_helpers
from planner.web import auth
from planner.web import notify
def init_app(app: Flask) -> None:
pages_bp = Blueprint("pages", __name__)
forms_bp = Blueprint("forms", __name__, url_prefix="/forms")
# fmt: off
page = lambda url, ep: pages_bp.route(url, endpoint=ep, methods=["GET"]) # noqa: E731, E501
form = lambda url, ep: forms_bp.route(url, endpoint=ep, methods=["POST"]) # noqa: E731, E501
# fmt: on
@page("/login", "login")
def _():
if auth.is_user_in_session():
return redirect(url_for("pages.index"))
return render_template("pages/login.html")
@page("/sign-up", "sign_up")
def _():
return render_template("pages/sign-up.html")
@page("/", "index")
@auth.has_access
def _():
return redirect(url_for("pages.active_tasks"))
@page("/active-tasks", "active_tasks")
@auth.has_access
def _():
# fmt: off
active_tasks = (
Task
.select()
.where(
(Task.progress != 100)
& (Task.parent_task.is_null())
& (Task.user_id == g.user_id)
)
.order_by(Task.created_at.desc())
)
# fmt: on
return render_template("pages/active-tasks.html", tasks=active_tasks)
@page("/completed-tasks", "completed_tasks")
@auth.has_access
def _():
args = request.args
offset = int(args.get("offset", "0"))
limit = int(args.get("limit", current_app.config["COMPLETED_TASKS_LIMIT"]))
# fmt: off
completed_tasks_plus_one = list(
Task
.select()
.where(
(Task.progress == 100)
& (Task.parent_task.is_null())
& (Task.user_id == g.user_id)
)
.order_by(Task.created_at.desc())
.offset(offset)
.limit(limit + 1)
)
# fmt: on
completed_tasks = completed_tasks_plus_one[:limit]
has_next = len(completed_tasks_plus_one) > limit
has_previous = offset > 0
return render_template(
"pages/completed-tasks.html",
tasks=completed_tasks,
has_next=has_next,
has_previous=has_previous,
offset=offset,
limit=limit,
)
@page("/tasks/<int:task_id>", "task")
@auth.has_access
def _(task_id: int):
task = Task.get_or_none(id=task_id, user_id=g.user_id)
if task is None:
abort(404)
# fmt: off
subtasks = (
Task
.select()
.where(
Task.parent_task == task
)
)
# fmt: on
return render_template(
"pages/task.html",
task=task,
parent_task=task.parent_task,
subtasks=subtasks,
)
@form("/sign-up", "sign_up")
def _():
form = request.form
username = form["username"]
password = form["password"]
password_copy = form["password_copy"]
if password != password_copy:
notify.error("Passwords missmatch!")
return redirect(url_for("pages.sign_up"))
if User.select(User.username).where(User.username == username).exists():
notify.error(f"User '{username}' exists already.")
return redirect(url_for("pages.sign_up"))
password_hasher = app_runtime_helpers.init_password_hasher()
user_usecase.create_user(username, password_hasher, password)
return redirect(url_for("pages.login"))
@form("/login", "login")
def _():
form = request.form
username = form["username"]
password = form["password"]
user = User.get_or_none(username=username)
if user is None:
notify.error("Username/password is invalid.")
return redirect(url_for("pages.login"))
password_hasher = app_runtime_helpers.init_password_hasher()
if not password_hasher.is_hash_correct(user.password_hash, password):
notify.error("Username/password is invalid.")
return redirect(url_for("pages.login"))
auth.authorize_user(user)
return redirect(url_for("pages.index"))
@form("/logout", "logout")
@auth.has_access
def _():
auth.forget_user()
return redirect(url_for("pages.login"))
@form("/tasks/create", "create_task")
@auth.has_access
def _():
form = request.form
name = form["name"]
note = form.get("note")
parent_task_id = form.get("parent_task_id")
user = User.get_by_id(g.user_id)
parent_task = None
if parent_task_id is not None:
parent_task = Task.get_or_none(id=parent_task_id, user=user)
task_usecase.create_task(
user, name, note=note, parent_task=parent_task
)
url = (
url_for("pages.task", task_id=parent_task_id)
if parent_task_id is not None
else url_for("pages.active_tasks")
)
return redirect(url)
@form("/tasks/remove", "remove_task")
@auth.has_access
def _():
form = request.form
task_id = form["task_id"]
task = Task.get_or_none(id=task_id, user_id=g.user_id)
if task is None:
abort(400)
parent_task = task.parent_task
task_usecase.remove_task(task)
return redirect(
url_for("pages.task", task_id=parent_task.id)
if parent_task is not None
else url_for("pages.active_tasks")
)
@form("/tasks/complete", "complete_task")
@auth.has_access
def _():
form = request.form
task_id = form["task_id"]
task = Task.get_or_none(id=task_id, user_id=g.user_id)
if task is None:
abort(400)
parent_task = task.parent_task
task_usecase.complete_task(task)
return redirect(
url_for("pages.task", task_id=parent_task.id)
if parent_task is not None
else url_for("pages.active_tasks")
)
@app.errorhandler(Exception)
def errorhandler(exc: Exception) -> Response:
if isinstance(exc, auth.NotAuthorized):
return redirect(url_for("pages.login"))
status_code = 500
description = (
"Sorry, something goes wrong."
" Try to repeat your request a few minutes later."
)
if isinstance(exc, HTTPException):
status_code = exc.code
description = exc.description
if status_code == 500:
logger = logging.getLogger(__name__)
logger.exception("unexpected error:")
return render_template(
"pages/error.html", status_code=status_code, description=description
)
app.register_blueprint(pages_bp)
app.register_blueprint(forms_bp)
| 2.03125
| 2
|
puppeteer/fileops.py
|
haani-niyaz/puppeteer
| 25
|
12778528
|
import os
import yaml
class FileOps(object):
def __init__(self, path):
"""Set path to file
Args:
path (str): file path
"""
self._infile = path
def _validate_path(self):
"""Validate path to file
Raises:
YAMLFileError: notify user that the file does not exist
"""
if not os.path.exists(self._infile):
raise YAMLFileError(
"'{0}' does not exist".format(self._infile))
def show(self):
"""Show contents of file
Returns:
str: returns contents of file
"""
self._validate_path()
with open(self._infile, 'r') as stream:
return stream.read()
def is_empty(self):
"""return True if self.read() is None else False"""
self.read()
class YAMLFileError(Exception):
"""An exception that occurs when YAML file cannot load or has errors"""
pass
class YAMLFile(FileOps):
"""Load, validate, write and show YAML files"""
def __init__(self, path):
"""Set path to file
Args:
path (str): file path
"""
super(YAMLFile, self).__init__(path)
def _marker(self, error):
"""Helper to get yaml error positions in file
Args:
error (yaml.scanner.ScannerError): scannerError object from exception raised
Returns:
str: Error message string
"""
if hasattr(error, 'problem_mark'):
mark = error.problem_mark
return "{0} has errors in position in line {1}, column {2}".format(
self._infile, mark.line+1, mark.column+1)
else:
return "Something went wrong while attempting to read {0}".format(self._infile)
def read(self):
"""Read yaml file
Returns:
dict: contents of yaml file as a dictionary object
Raises:
YAMLFileError: notify user that the file has errors
"""
super(YAMLFile, self)._validate_path()
try:
with open(self._infile, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.scanner.ScannerError, e:
raise YAMLFileError(self._marker(e))
except yaml.YAMLError, e:
raise YAMLFileError(self._marker(e))
def write(self, data):
"""Write to yaml file
Args:
data (dict): dictionary of contents to write to file
"""
with open(self._infile, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
| 3.5
| 4
|
tests/test_byol.py
|
kitfactory/tftk
| 6
|
12778529
|
from tftk.image.dataset import Mnist
from tftk.image.dataset import Food101
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context
from tftk.image.model.representation import SimpleRepresentationModel, add_projection_layers
from tftk.train.image import ImageTrain
from tftk import ENABLE_SUSPEND_RESUME_TRAINING, ResumeExecutor
import tensorflow as tf
class MovingAverageCallback(tf.keras.callbacks.Callback):
def __init__(self, model):
self.model = model
def on_train_begin(self, logs=None):
print("Starting training")
def on_train_end(self, logs=None):
print("Stop training")
def on_epoch_begin(self, epoch, logs=None):
print("\nStart epoch")
def on_epoch_end(self, epoch, logs=None):
print("\nOn epoch end, updating moving average")
w1 = self.model.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
self.model.set_weights(w2)
def get_moving_average_callback(model):
m = model
def moving_average(loss, acc):
print("on epoch end")
w1 = m.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
m.set_weights(w2)
return moving_average
def custom_loss(y_pred, y_true):
y_1, y_2 = y_pred
diff = y_1 - y_2
loss = tf.keras.backend.abs(diff)
return loss
def reinforcement(data):
img = data["image"]
label = data["label"]
return ([img,img],[img,img])
# supervised
def supervised_dataset(dataset:tf.data.Dataset, max_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] < max_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
label = data['label']
label = tf.one_hot(label, max_label)
return image, label
return filtered.map(supervised_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def pretext_dataset(dataset:tf.data.Dataset, start_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] >= start_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
def random_transform(image):
pass
if __name__ == '__main__':
context = Context.init_context(TRAINING_NAME='')
# ENABLE_SUSPEND_RESUME_TRAINING()
BATCH_SIZE = 500
CLASS_NUM = 10
IMAGE_SIZE = 28
EPOCHS = 2
SHUFFLE_SIZE = 1000
# if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:
# train, train_len = Mnist.get_train_dataset()
# validation, validation_len = Mnist.get_test_dataset()
# train = train.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# validation = validation.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# train = train.map(reinforcement)
# online_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# target_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# print(online_model.layers)
# online_projection_model = add_projection_layers(online_model)
# target_projection_model = add_projection_layers(target_model)
# input_online = online_model.layers[0].input
# input_target = target_model.layers[0].input
# output_online = online_model.layers[-1].output
# output_target = target_model.layers[-1].output
# mearged_model = tf.keras.Model(inputs=[input_online,input_target], outputs=[output_online,output_target])
# mearged_model.summary()
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# mearged_model.compile(optimizer=optimizer, loss=custom_loss)
# train = train.take(10)
# y = mearged_model.predict(train)
# print(y)
# optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs)
# online_projection = add_projection_layers(online_model)
# target_projection = add_projection_layers(target_model)
# inputs = [online_projection.input, target_projection.input]
# outputs = [online_projection.output, target_projection.output]
# total_model = tf.keras.Model(inputs=inputs, outputs=outputs)
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,1),classes=CLASS_NUM)
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# callbacks.append(MovingAverageCallback(model))
# ImageTrain.train_image_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=SHUFFLE_SIZE,model=model,callbacks=callbacks,optimizer=optimizer,loss="categorical_crossentropy",max_epoch=EPOCHS)
# w1 = model.get_weights()
# # print(type(w1))
# w2 = []
# for a in w1:
# print(type(a))
# w2.append( a*0.8 )
# model.set_weights(w2)
| 2.5625
| 3
|
pkg/gmail.py
|
rmc8/bibliography_alert
| 0
|
12778530
|
<reponame>rmc8/bibliography_alert
import os
import ssl
import smtplib
from typing import Optional, List
from email.utils import formatdate
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
class Gmail:
def __init__(self, from_: str, to: Optional[str] = None, cc: Optional[str] = None,
bcc: Optional[str] = None, subject: str = "", body: str = "",
host: str = "smtp.gmail.com", port=465, sep: str = ",") -> None:
self.from_: str = from_
self.to: Optional[str] = to
self.cc: Optional[str] = cc
self.bcc: Optional[str] = bcc
self.subject: str = subject
self.body: str = body
self.attachment: List[str] = []
context = ssl.create_default_context()
self.server = smtplib.SMTP_SSL(host=host, port=port, context=context)
self.sep = sep
def login(self, user: str, password: str) -> None:
self.server.login(user=user, password=password)
def add_attachment(self, attachment_path: str) -> None:
self.attachment.append(attachment_path)
def attachment_len(self) -> int:
return len(self.attachment)
def _set_attachment(self, msg):
for file_path in self.attachment:
if not os.path.exists(file_path):
continue
file_name: str = os.path.basename(file_path)
with open(file_path, "rb") as f:
part = MIMEApplication(f.read(), Name=file_name)
part["Content-Disposition"] = f'attachment; filename="{file_name}"'
msg.attach(part)
return msg
def _create_msg(self, is_html: bool):
msg = MIMEMultipart()
msg.attach(MIMEText(self.body, "html" if is_html else "plain"))
msg["Subject"] = self.subject
msg["From"] = self.from_
msg["To"] = self.to
msg["Cc"] = self.cc
msg["Bcc"] = self.bcc
msg["Date"] = formatdate()
return self._set_attachment(msg)
@staticmethod
def _split_addrs(addrs: Optional[str], sep: str):
if type(addrs) is str:
return addrs.split(sep)
return []
def _get_recipients_list(self) -> list:
to: list = self._split_addrs(self.to, self.sep)
cc: list = self._split_addrs(self.to, self.sep)
bcc: list = self._split_addrs(self.to, self.sep)
return to + cc + bcc
def send(self, is_html: bool = False) -> None:
msg = self._create_msg(is_html=is_html)
recipients_list: list = self._get_recipients_list()
self.server.sendmail(from_addr=self.from_, to_addrs=recipients_list, msg=msg.as_string())
def close(self) -> None:
self.server.close()
| 2.53125
| 3
|
modules/AR_Scheduler.py
|
deepsphere/deepsphere-weather
| 38
|
12778531
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 11:31:21 2021
@author: ghiggi
"""
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
##----------------------------------------------------------------------------.
### Check AR weights
def check_ar_weights(ar_weights):
"""Check AR weights validity."""
if isinstance(ar_weights, (int, float)):
ar_weights = [ar_weights]
if isinstance(ar_weights, list):
ar_weights = np.array(ar_weights)
if not isinstance(ar_weights, np.ndarray):
raise TypeError("Specify AR weights with a list or a numpy array.")
# Check that any intial_ar_weights is negative
if any(ar_weights < 0):
raise ValueError("AR weights must not contain negative weights.")
# Check that the last AR weight is not zero !
if ar_weights[-1] == 0:
raise ValueError("The last weight of ar_weights must not be 0.")
return ar_weights
#----------------------------------------------------------------------------.
# No AR weights update when .step()
def _ConstantStep(self):
return self.ar_weights
def _DiracDeltaStep(self):
return self.ar_weights
##----------------------------------------------------------------------------.
## Discrete weight update functions
def _StepwiseDecayStep(self):
weights = self.ar_absolute_weights[:-1]
weights = weights - self.factor
weights[weights < 0] = 0
self.ar_absolute_weights[:-1] = weights
def _StepwiseGrowthStep(self):
weight = self.ar_absolute_weights[-1]
weight = weight + self.factor
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _StepwiseStep(self):
if self.temporary_step_count >= self.step_interval:
_StepwiseDecayStep(self)
if self.smooth_growth:
_StepwiseGrowthStep(self)
# Reset temporary_step_count
self.temporary_step_count = 0
def _HalfDecayStep(self):
weights = self.ar_absolute_weights[:-1]
weights = weights/2
self.ar_absolute_weights[:-1] = weights
def _HalfGrowthStep(self):
weight = self.ar_absolute_weights[-1]
if weight == 0:
weight = self.factor
weight = weight*2
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _HalfStep(self):
if self.temporary_step_count >= self.step_interval:
_HalfDecayStep(self)
if self.smooth_growth:
_HalfGrowthStep(self)
# Reset temporary_step_count
self.temporary_step_count = 0
##----------------------------------------------------------------------------.
### Continous weight update functions
def _LinearDecayStep(self):
initial_weights = self.ar_absolute_initial_weights[:-1]
weights = initial_weights - self.factor*self.global_step_count_arr[:-1]
weights[weights < 0] = 0
self.ar_absolute_weights[:-1] = weights
def _LinearGrowthStep(self):
initial_weight = self.ar_absolute_initial_weights[-1]
weight = initial_weight + self.factor*self.global_step_count_arr[-1]
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _LinearStep(self):
_LinearDecayStep(self)
if self.smooth_growth:
_LinearGrowthStep(self)
def _ExponentialDecayStep(self):
initial_weights = self.ar_absolute_initial_weights[:-1]
weights = initial_weights * np.exp(-self.factor*self.global_step_count_arr[:-1])
self.ar_absolute_weights[:-1] = weights
def _ExponentialGrowthStep(self):
weight = self.factor * np.exp(self.factor*self.global_step_count_arr[-1])
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _ExponentialStep(self):
_ExponentialDecayStep(self)
if self.smooth_growth:
_ExponentialGrowthStep(self)
#-----------------------------------------------------------------------------.
class AR_Scheduler():
"""Autoregressive (AR) weights scheduler."""
def __init__(self,
method = "LinearStep",
factor = 0.001,
step_interval = None,
smooth_growth = True,
fixed_ar_weights = None,
initial_ar_absolute_weights = None,
initial_ar_weights = None):
"""Autoregressive (AR) weights scheduler.
Parameters
----------
smooth_growth : bool, optional
Wheter to set the new AR weight to 0 and growth it smoothly to avoid
training destabilization.
Do not apply to 'Constant' and 'DiracDelta' methods.
The default is True.
method : str, optional
Available methods: 'Constant','DiracDelta','StepwiseDecay','HalfDecay','LinearDecay','ExponentialDecay'
The default method is "DiracDelta".
Methods explanation:
Constant: Add an AR weight (with absolute value 1) when .update() is called.
DiracDelta: Add an AR weight when .update() is called and
reset to 0 the others AR weights.
StepwiseStep: When a new AR weight is added with .update(), it start to substract
'factor' from the others AR absolute weights every 'step_interval' .step() calls.
If smooth_growth=True, the new AR weight growth by step from 0 every 'step_interval' .step() calls.)
HalfStep: When a new AR weight is added with .update(), it start to half
the others AR absolute weights every 'step_interval' .step() calls.
If smooth_growth=True, the new AR weight growth by doubling from factor every 'step_interval' .step() calls.
LinearStep : When a new AR weight is added with .update(), it start to
decrease linearly (with slope '-factor') the others
AR absolute weights every .step() call.
If smooth_growth=True, the new AR weight growth linearly
starting from 0.
ExponentialStep: When a new AR weight is added with .update(), it start to
decrease exponentially (with decay rate '-factor')
the others AR absolute weights every .step() call.
If smooth_growth=True, the new AR weight growth exponentially
starting from 'factor'.
factor : float, optional
Argument required by the following methods: 'StepwiseStep','HalfStep','LinearStep','ExponentialStep'.
Regulate the decay and growth of AR absolute weights when .step() is called.
For HalfStep and ExponentialStep, is also used as first value for the new ar_weight when smooth_growth=True.
step_interval : int, optional
Argument required by the following methods: 'StepwiseStep','HalfStep'.
Specify the frequency with which the AR weights are updated with methods 'StepwiseStep' and 'HalfStep'.
Step_interval = 1 cause weight update at every .step() call.
fixed_ar_weights : list, optional
List of AR iterations for which the value AR weights must not be
modified by the step functions.
The default is None. No AR weights is fixed.
initial_ar_abolute_weights : list, optional
Specify the initial absolute AR weights.
They will be rescaled to have 1 has largest value.
If specified, initial_ar_weights must not be specified.
The default is ar_weights = [1].
initial_ar_weights : list, optional
Specify the initial normalized AR weights. (must sum up to 1).
If specified, initial_ar_abolute_weights must not be specified.
The default is ar_weights = [1].
"""
# 'StepwiseDecay' and 'HalfDecay' factor is applied to the ar_absolute weights (not the normalized ar_weights)
# 'LinearDecay','ExponentialDecay' is applied from the initial ar_absolute_weights
# TODO:
# - Implement a min_ar_weight_option? (instead of decaying to 0)
# - Increasing-Decreasing Decay ... "
# Check smooth_growth
##--------------------------------------------------------------------.
if not isinstance(smooth_growth, bool):
raise TypeError("'smooth_growth' must be either True or False.")
##--------------------------------------------------------------------.
# Check valid method
valid_method = ['Constant','DiracDelta','StepwiseStep','HalfStep','LinearStep','ExponentialStep']
if method not in valid_method:
raise ValueError("Provide a valid 'method'.")
##--------------------------------------------------------------------.
# Check fixed_ar_weights
if not isinstance(fixed_ar_weights, (type(None), np.ndarray, list)):
raise TypeError("'fixed_ar_weights' must be specified as list.")
if isinstance(fixed_ar_weights, list):
fixed_ar_weights = np.array(fixed_ar_weights)
if fixed_ar_weights is not None:
if len(fixed_ar_weights) == 0:
fixed_ar_weights = None
##---------------------------------------------------------------------.
# Check initial_ar_weights and initial_ar_absolute_weights are not both specified.
if initial_ar_weights is not None and initial_ar_absolute_weights is not None:
raise ValueError("Specify either 'initial_ar_weights' or 'initial_ar_absolute_weights'.")
# Set default ar_weights if not specified
if initial_ar_weights is None and initial_ar_absolute_weights is None:
initial_ar_weights = [1]
# Check initial_ar_weights
if initial_ar_weights is not None:
# Check AR weights validity
initial_ar_weights = check_ar_weights(initial_ar_weights)
# Check ar_weights sum up to 1
if np.sum(initial_ar_weights) != 1:
raise ValueError("'initial_ar_weights' must sum up to 1.")
# Compute AR absolute weights
# - Force the largest values to be 1
initial_ar_absolute_weights = initial_ar_weights/initial_ar_weights.max()
# Check initial_ar_absolute_weights
elif initial_ar_absolute_weights is not None:
# Check AR weights validity
initial_ar_absolute_weights = check_ar_weights(initial_ar_absolute_weights)
# - Force the maximum values to be 1
initial_ar_absolute_weights = initial_ar_absolute_weights/initial_ar_absolute_weights.max()
# Compute the normalized AR weights
initial_ar_weights = initial_ar_absolute_weights/initial_ar_absolute_weights.sum()
else:
raise NotImplementedError("This option has been not considered.")
##--------------------------------------------------------------------.
# Check that factor and step_interval are not negative
if factor is not None:
if factor < 0:
raise ValueError("Provide a factor between 0 and 1.")
if step_interval is not None:
if step_interval <= 0:
raise ValueError("'step_interval' must be an integer value equal or larger than 1.")
##---------------------------------------------------------------------.
# Check required method arguments are specified
if method in ['StepwiseStep','HalfStep']:
if step_interval is None:
raise ValueError("'{}' method requires specification of the 'step_interval' argument".format(method))
if method in ['HalfStep','StepwiseStep','LinearStep','ExponentialStep']:
if factor is None:
raise ValueError("'{}' method requires specification of the 'factor' argument".format(method))
if method in ['Constant', 'DiracDelta']:
smooth_growth = False
##---------------------------------------------------------------------.
# Count the number of AR iteration (at start)
current_ar_iterations = len(initial_ar_weights) - 1
self.current_ar_iterations = current_ar_iterations
# Set absolute AR weights
self.ar_absolute_weights = initial_ar_absolute_weights
# Set ar_weights (normalized AR weights)
self.ar_weights = initial_ar_weights
# Set initial AR absolute weights (for fixed weights) and 'LinearDecay' and 'ExponentialDecay'
self.ar_absolute_initial_weights = self.ar_absolute_weights.copy()
##--------------------------------------------------------------------.
# Add method arguments
self.method = method
self.step_interval = step_interval
self.factor = factor
self.smooth_growth = smooth_growth
self.fixed_ar_weights = fixed_ar_weights
##--------------------------------------------------------------------.
# Initialize temporary step counter
# - For 'StepwiseDecay' and 'HalfDecay' method --> step_interval
self.temporary_step_count = 0
##--------------------------------------------------------------------.
# - Initialize global step counter
# - For 'LinearDecay' and 'ExponentialDecay'
self.global_step_count_arr = np.zeros(current_ar_iterations+1)
##--------------------------------------------------------------------.
### Define the update_weights function
fun_dict = {'Constant': _ConstantStep,
'DiracDelta': _DiracDeltaStep,
'StepwiseStep': _StepwiseStep,
'HalfStep': _HalfStep,
'LinearStep': _LinearStep,
'ExponentialStep': _ExponentialStep,
}
self.update_weights = fun_dict[method]
##--------------------------------------------------------------------.
def step(self):
"""Update AR weights."""
# Update step count
self.temporary_step_count = self.temporary_step_count + 1 # for 'StepwiseDecay' and 'HalfDecay'
self.global_step_count_arr = self.global_step_count_arr + 1 # for 'LinearDecay' and 'ExponentialDecay'
##---------------------------------------------------------------------.
if self.current_ar_iterations > 0:
# - Update weights
self.update_weights(self)
# - Refix the value of fixed AR weights
if self.fixed_ar_weights is not None:
tmp_fixed_ar_weights = self.fixed_ar_weights[self.fixed_ar_weights < self.current_ar_iterations]
self.ar_absolute_weights[tmp_fixed_ar_weights] = self.ar_absolute_initial_weights[tmp_fixed_ar_weights]
##---------------------------------------------------------------------.
# Retrieve normalized AR weights (summing up to 1)
self.ar_weights = np.array(self.ar_absolute_weights)/np.sum(self.ar_absolute_weights)
def update(self):
"""Add an ar_absolute_weight with value 1."""
# Update the number of AR iterations
self.current_ar_iterations = self.current_ar_iterations + 1
# Add a new AR weight
if not self.smooth_growth: # ... with (absolute) value 1
self.ar_absolute_weights = np.append(self.ar_absolute_weights, 1)
self.ar_absolute_initial_weights = np.append(self.ar_absolute_initial_weights, 1)
else: # start at 0 (or factor for ExponentialStep, HalfStep)
# Update current last weight value (for ExponentialStep and LInearStep)
self.ar_absolute_initial_weights[-1] = self.ar_absolute_weights[-1]
# Add new weight
self.ar_absolute_initial_weights = np.append(self.ar_absolute_initial_weights, 0)
self.ar_absolute_weights = np.append(self.ar_absolute_weights, 0)
##---------------------------------------------------------------------.
# If DiracDelta weight update method is choosen, set to 0 the other weights
if self.method == "DiracDelta":
self.ar_absolute_weights[:-1] = 0
##---------------------------------------------------------------------.
# Update normalization of AR weights
self.ar_weights = np.array(self.ar_absolute_weights)/np.sum(self.ar_absolute_weights)
##---------------------------------------------------------------------.
# Update the step count array (--> For LinearDecay and ExponentialDecay)
self.global_step_count_arr[-1] = 0 # Reset the last (because will start to decay)
self.global_step_count_arr = np.append(self.global_step_count_arr, 0)
#----------------------------------------------------------------------------.
def plot_AR_scheduler(ar_scheduler,
n_updates=4,
update_every=15,
plot_absolute_ar_weights=True,
plot_normalized_ar_weights=True):
n_initial_ar_weights = len(ar_scheduler.ar_weights)
n_final_ar_weights = n_initial_ar_weights + n_updates
### Initialize dictionary
ar_weights_per_ar_iteration = {}
for i in range(n_final_ar_weights + 1):
ar_weights_per_ar_iteration[i] = {}
ar_weights_per_ar_iteration[i]['iteration'] = []
ar_weights_per_ar_iteration[i]['ar_absolute_weights'] = []
ar_weights_per_ar_iteration[i]['ar_weights'] = []
# Simulate AR weights step() and update()
iteration = 0
for u in range(n_updates+1):
for i in range(update_every+1):
current_ar_iterations = len(ar_scheduler.ar_weights) - 1
for ar_iteration in range(current_ar_iterations+1):
ar_weights_per_ar_iteration[ar_iteration]['iteration'].append(iteration)
ar_weights_per_ar_iteration[ar_iteration]['ar_absolute_weights'].append(ar_scheduler.ar_absolute_weights[ar_iteration])
ar_weights_per_ar_iteration[ar_iteration]['ar_weights'].append(ar_scheduler.ar_weights[ar_iteration])
ar_scheduler.step()
iteration = iteration + 1
ar_scheduler.update()
##------------------------------------------------------------------------.
### Visualize AR weights
method = ar_scheduler.method
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
custom_cycler = cycler(linestyle=['-', '--', ':', '-.','-', '--', ':', '-.','-', '--'],
color=colors)
if plot_absolute_ar_weights:
fig, ax = plt.subplots()
ax.set_prop_cycle(custom_cycler)
for ar_iteration in range(n_final_ar_weights+1):
plt.plot(ar_weights_per_ar_iteration[ar_iteration]['iteration'],
ar_weights_per_ar_iteration[ar_iteration]['ar_absolute_weights'],
antialiased = True)
ax.set_xlabel("Iteration")
plt.title("Absolute AR weights ({})".format(method))
ax.legend(labels=list(range(n_final_ar_weights+1)), loc='upper right')
plt.show()
if plot_normalized_ar_weights:
fig, ax = plt.subplots()
ax.set_prop_cycle(custom_cycler)
for ar_iteration in range(n_final_ar_weights+1):
plt.plot(ar_weights_per_ar_iteration[ar_iteration]['iteration'],
ar_weights_per_ar_iteration[ar_iteration]['ar_weights'],
antialiased = True)
ax.set_xlabel("Iteration")
plt.title("Normalized AR weights ({})".format(method))
ax.legend(labels=list(range(n_final_ar_weights+1)), loc='upper right')
plt.show()
##----------------------------------------------------------------------------.
| 2.953125
| 3
|
ejercicio5-condicionales.py
|
raformatico/5-ejemplos-if-elif-else
| 0
|
12778532
|
<reponame>raformatico/5-ejemplos-if-elif-else
# SEÑALES ENVIADAS POR EL CHAT
# > LAMADA DEL CHAT A LA CREACIÓN DEL CAMINO
def on_chat_crea_camino():
agent.teleport(world(44, 4, 7), SOUTH)
crea_camino()
player.on_chat("crea_camino", on_chat_crea_camino)
# > LAMADA DEL CHAT A LA CREACIÓN DEL CAMINO
def on_chat_recorre_camino():
agent.teleport(world(44, 4, 7), SOUTH)
recorre_camino()
player.on_chat("recorre_camino", on_chat_recorre_camino)
# FUNCIÓN PARA COLOCAR ORO
def poner_oro(num: number):
for index5 in range(num):
agent.destroy(DOWN)
agent.set_item(GOLD_BLOCK, 1, 1)
agent.place(DOWN)
agent.move(FORWARD, 1)
agent.collect_all()
agent.teleport(world(14, 4, 7), SOUTH)
# FUNCIÓN PARA CREAR EL CAMINO
def crea_camino():
poner_oro(10)
agent.turn(LEFT_TURN)
poner_oro(3)
agent.turn(RIGHT_TURN)
poner_oro(10)
agent.teleport(world(44, 4, 17), SOUTH)
agent.turn(RIGHT_TURN)
poner_oro(3)
agent.turn(LEFT_TURN)
poner_oro(10)
# FUNCIÓN PARA RECORRER EL CAMINO CORRECTO
# FUNCIÓN QUE IMPLEMENTA LA LÓGICA DE IF/ELIF/ELSE
def recorre_camino():
agent.move(FORWARD, 10)
if agent.inspect(AgentInspection.BLOCK, DOWN) == IRON_BLOCK:
agent.turn_left()
agent.move(FORWARD, 3)
agent.turn_right()
agent.move(FORWARD, 9)
agent.move(UP, 5)
elif agent.inspect(AgentInspection.BLOCK, DOWN) == REDSTONE_BLOCK:
agent.turn_right()
agent.move(FORWARD, 3)
agent.turn_left()
agent.move(FORWARD, 9)
agent.move(UP, 5)
else:
agent.teleport_to_player()
| 3.375
| 3
|
tests/test_utils.py
|
logics4ai-sapienza/hoa2dot
| 0
|
12778533
|
<reponame>logics4ai-sapienza/hoa2dot<filename>tests/test_utils.py
# -*- coding: utf-8 -*-
# This file is part of hoa-utils.
#
# hoa-utils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hoa-utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hoa-utils. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains test utils."""
import os
from contextlib import contextmanager
from pathlib import Path
@contextmanager
def cd(new_dir: Path):
"""
Change directory with a context manager.
:param new_dir: the new directory where to go.
:return: None
"""
old_dir = os.getcwd()
try:
os.chdir(str(new_dir))
yield
finally:
os.chdir(old_dir)
| 1.75
| 2
|
substra/sdk/rest_client.py
|
cupcicm/substra
| 0
|
12778534
|
<reponame>cupcicm/substra
# Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import requests
from substra.sdk import exceptions, assets, utils
logger = logging.getLogger(__name__)
DEFAULT_RETRY_TIMEOUT = 5 * 60
class Client():
"""REST Client to communicate with Substra server."""
def __init__(self, config=None):
self._headers = {}
self._default_kwargs = {}
self._base_url = None
if config:
self.set_config(config)
def set_config(self, config):
"""Reset internal attributes from config."""
# get default requests keyword arguments from config
kwargs = {}
if config['auth']:
user, password = config['auth']['user'], config['auth']['password']
kwargs['auth'] = (user, password)
if config['insecure']:
kwargs['verify'] = False
# get default HTTP headers from config
headers = {'Accept': 'application/json;version={}'.format(config['version'])}
self._headers = headers
self._default_kwargs = kwargs
self._base_url = config['url'][:-1] if config['url'].endswith('/') else config['url']
def _request(self, request_name, url, **request_kwargs):
"""Base request helper."""
if request_name == 'get':
fn = requests.get
elif request_name == 'post':
fn = requests.post
else:
raise NotImplementedError
# override default request arguments with input arguments
kwargs = dict(self._default_kwargs)
kwargs.update(request_kwargs)
# do HTTP request and catch generic exceptions
try:
r = fn(url, headers=self._headers, **kwargs)
r.raise_for_status()
except requests.exceptions.ConnectionError as e:
raise exceptions.ConnectionError.from_request_exception(e)
except requests.exceptions.Timeout as e:
raise exceptions.Timeout.from_request_exception(e)
except requests.exceptions.HTTPError as e:
logger.error(f"Requests error status {e.response.status_code}: {e.response.text}")
if e.response.status_code == 400:
raise exceptions.InvalidRequest.from_request_exception(e)
if e.response.status_code == 401:
raise exceptions.AuthenticationError.from_request_exception(e)
if e.response.status_code == 403:
raise exceptions.AuthorizationError.from_request_exception(e)
if e.response.status_code == 404:
raise exceptions.NotFound.from_request_exception(e)
if e.response.status_code == 408:
raise exceptions.RequestTimeout.from_request_exception(e)
if e.response.status_code == 409:
raise exceptions.AlreadyExists.from_request_exception(e)
if e.response.status_code == 500:
raise exceptions.InternalServerError.from_request_exception(e)
raise exceptions.HTTPError.from_request_exception(e)
return r
def request(self, request_name, asset_name, path=None, json_response=True,
**request_kwargs):
"""Base request."""
path = path or ''
url = f"{self._base_url}/{assets.to_server_name(asset_name)}/{path}"
if not url.endswith("/"):
url = url + "/" # server requires a suffix /
response = self._request(
request_name,
url,
**request_kwargs,
)
if not json_response:
return response
try:
return response.json()
except ValueError as e:
msg = f"Cannot parse response to JSON: {e}"
raise exceptions.InvalidResponse(response, msg)
def get(self, name, key):
"""Get asset by key."""
return self.request(
'get',
name,
path=f"{key}",
)
def list(self, name, filters=None):
"""List assets by filters."""
request_kwargs = {}
if filters:
request_kwargs['params'] = utils.parse_filters(filters)
items = self.request(
'get',
name,
**request_kwargs,
)
# when filtering 'complex' assets the server responds with a list per filter
# item, these list of list must then be flatten
if isinstance(items, list) and all([isinstance(i, list) for i in items]):
items = utils.flatten(items)
return items
def add(self, name, retry_timeout=DEFAULT_RETRY_TIMEOUT, exist_ok=False,
**request_kwargs):
"""Add asset.
In case of timeout, block till resource is created.
If `exist_ok` is true, `AlreadyExists` exceptions will be ignored and the
existing asset will be returned.
"""
try:
return self.request(
'post',
name,
**request_kwargs,
)
except exceptions.RequestTimeout as e:
logger.warning(
'Request timeout, blocking till asset is created')
key = e.pkhash
is_many = isinstance(key, list) # timeout on many objects is not handled
if not retry_timeout or is_many:
raise e
retry = utils.retry_on_exception(
exceptions=(exceptions.NotFound),
timeout=float(retry_timeout),
)
return retry(self.get)(name, key)
except exceptions.AlreadyExists as e:
if not exist_ok:
raise
key = e.pkhash
is_many = isinstance(key, list)
if is_many:
logger.warning("Many assets not compatible with 'exist_ok' option")
raise
logger.warning(f"{name} already exists: key='{key}'")
return self.get(name, key)
def get_data(self, address, **request_kwargs):
"""Get asset data."""
return self._request(
'get',
address,
**request_kwargs,
)
| 2
| 2
|
test/lv1/lv2/include_lv2.py
|
pavelkukov/fsimport
| 0
|
12778535
|
<gh_stars>0
def get_text():
return 'text from lv2'
| 1.054688
| 1
|
krules_core/base_functions/processing.py
|
airspot-dev/krules-core
| 4
|
12778536
|
# Copyright 2019 The KRules Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from collections.abc import Mapping
from krules_core.route.router import DispatchPolicyConst
from krules_core.base_functions import RuleFunctionBase, Filter
class Process(Filter):
"""
*Like* `Filter <https://intro.krules.io/Filters.html#krules_core.base_functions.filters.Filter>`_ *evaluate a given expression but does not return it.*
*The best way to exploit it is to use it in combination with* `Argument Processors <https://intro.krules.io/ArgumentProcessors.html>`_.
::
from krules_env import RULE_PROC_EVENT
#...
rulesdata = [
{
# Store processed events with Django ORM
rulename: "processed-rules",
subscibre_to: RULE_PROC_EVENT,
ruledata: {
processing: [
Process(
lambda payload:(
ProcessedEvent.objects.create(
rule_name=payload["name"],
type=payload["type"],
subject=payload["subject"],
event_info=payload["event_info"],
payload=payload["payload"],
time=payload["event_info"].get("time", datetime.now().isoformat()),
filters=payload["filters"],
processing=payload["processing"],
got_errors=payload["got_errors"],
processed=payload["processed"],
origin_id=payload["event_info"].get("originid", "-")
)
)
),
]
}
}
]
"""
def execute(self, value):
"""
"""
super().execute(value)
## PAYLOAD FUNCTIONS ##############################################################
class SetPayloadProperties(RuleFunctionBase):
"""
*Set the given properties in the payload, if some of that already exist will be overridden*
::
rulesdata = [
{
rulename: "on-admin-login-update-payload",
subscibre_to: "user-login",
ruledata: {
filters: [
...
# Check if user is admin
]
processing: [
SetPayloadProperties( # Definition with a dictionary
lambda: **{
"has_admin_access": True,
"last_login": datetime.now()
}
)
]
}
},
{
rulename: "on-user-login-update-payload",
subscibre_to: "user-login",
ruledata: {
filters: [
...
# Check if user has not admin privileges
]
processing: [
SetPayloadProperties( # Definition with named arguments
has_admin_access=False,
last_login=lambda:datetime.now()
)
]
}
},
# Thanks to ArgumentProcessor we can use a lambda, without that last_login
# would be always equal to the Rule instantiation's datetime while we need the execution's one.
]
"""
def execute(self, **kwargs):
"""
Args:
**kwargs: Each named paramenter is the key and the value to update with.
"""
for k, v in kwargs.items():
self.payload[k] = v
class SetPayloadProperty(SetPayloadProperties):
"""
*Extends* `SetPayloadProperties <https://intro.krules.io/Processing.html#krules_core.base_functions.processing.SetPayloadProperties>`_
*expecting a single property to set*
::
rulesdata = [
{
rulename: "on-heather-onboarded-set-class",
subscibre_to: "device-onboarded",
ruledata: {
filters: [
...
# Check if device has characteristics of an heather
]
processing: [
SetPayloadProperty(
property_name="device_class",
value="heather"
)
]
}
},
]
"""
def execute(self, property_name, value):
"""
Args:
property_name: Name of property which will be to set,
value: Value to set.
"""
super().execute(**{property_name: value})
## SUBJECT FUNCTIONS ################################################################
class SetSubjectProperty(RuleFunctionBase):
"""
*Set a single property of the subject, supporting atomic operation.*
*By default, the property is reactive unless is muted (muted=True) or extended (extended=True)*
::
rulesdata = [
{
rulename: "set-device-class",
subscibre_to: "device-onboarded",
ruledata: {
filters: [
...
# Check if device has characteristics of an heather
]
processing: [
SetSubjectProperty(
property_name="device_class",
value="heather"
)
]
}
},
{
rulename: "on-new-checkup-increment-counter",
subscibre_to: "checkup",
ruledata: {
processing: [
SetSubjectProperty(
property_name="checkup_cnt",
value=lambda x: x is None and 1 or x + 1 # Operation is atomic
)
]
}
}
]
"""
def execute(self, property_name, value, extended=False, muted=False, use_cache=True):
"""
Args:
property_name: Name of the property to set. It may or may not exist
value: Value to set. It can be a callable and receives (optionally) the current property value.
If the property does not exist yet, it receives None. Note that value setting is an atomic operation.
extended: If True set an extended property instead a standard one. [default False]
muted: If True no subject-property-changed will be raised after property setting. Note that extended
properties are always muted so, if extended is True, this parameter will be ignored. [default False]
use_cache: If False store the property value immediately on the storage, otherwise wait for the end of rule execution. [default False]
"""
if extended:
fn = lambda v: self.subject.set_ext(property_name, v, use_cache)
else:
fn = lambda v: self.subject.set(property_name, v, muted, use_cache)
return fn(value)
class SetSubjectPropertyImmediately(SetSubjectProperty):
"""
*Extends* `SetSubjectProperty <https://intro.krules.io/Processing.html#krules_core.base_functions.processing.SetSubjectProperty>`_
*setting a property directly to the storage without using the cache (* **use_cache=False** *).
This could be very helpful to avoid concurrency issues by avoiding running into inconsistencies during the execution.
The extension's aim is to made code more readable.*
"""
def execute(self, property_name, value, extended=False, muted=False, **kwargs):
"""
"""
return super().execute(property_name, value, extended=extended, muted=muted, use_cache=False)
class SetSubjectExtendedProperty(SetSubjectProperty):
"""
*Extends* `SetSubjectProperty <https://intro.krules.io/Processing.html#krules_core.base_functions.processing.SetSubjectProperty>`_
*setting an extended property of the subject(* **extended=True** *). Note that* **muted** *is not present anymore
in the arguments because an extended property is always muted.
The extension's aim is to made code more readable.*
"""
def execute(self, property_name, value, use_cache=True, **kwargs):
"""
"""
return super().execute(property_name, value, extended=True, muted=True, use_cache=use_cache)
class SetSubjectProperties(RuleFunctionBase):
"""
*Set multiple properties in subject from dictionary. This is allowed only by using cache and not for
extended properties. Each property set in that way is muted but it is possible to unmute some of that using*
**unmuted** *parameter*
::
rulesdata = [
{
rulename: "on-device-oboarded-update",
subscibre_to: "device-onboarded",
ruledata: {
filters: [
...
# Check if device has characteristics of an heather
]
processing: [
SetSubjectProperties(
props=lambda: {
"device_class": "heather",
"on_boarding_tm": datetime.now(),
},
unmuted=["heather"]
)
# Thanks to ArgumentProcessor we can use a lambda, without that on_boarding_tm
# would be always equal to the Rule instantiation's datetime while we need the execution's one.
]
}
}
]
"""
def execute(self, props, unmuted=[]):
"""
Args:
props: The properties to set
unmuted: List of property names for which emit property changed events
"""
for name, value in props.items():
self.subject.set(name, value, muted=name not in unmuted)
class StoreSubject(RuleFunctionBase):
"""
*Store alla subject properties on the subject storage and then flush the cache.
Usually this happens at the end of the ruleset execution.*
"""
def execute(self):
self.subject.store()
class FlushSubject(RuleFunctionBase):
"""
*Remove all subject's properties. It is important tho recall that a subject exists while it has at least a property,
so* **remove all its properties means remove the subject itself**.
::
rulesdata = [
{
rulename: "on-user-unsubscribe-delete-subject",
subscibre_to: "user-unsubscribed",
ruledata: {
processing: [
DeleteProfileFromDB(user_id=lambda subject: subject.user_id),
FlushSubject()
]
}
},
{
rulename: "on-onboard-device-store-properties",
subscribe_to: "onboard-device",
ruledata: {
processing: [
FlushSubject(),
SetSubjectProperties(lambda payload: payload["data"]),
SetSubjectProperty('status', 'READY'),
],
},
},
]
"""
def execute(self):
self.subject.flush()
#####################################################################################
class Route(RuleFunctionBase):
"""
*Produce an event inside and/or outside the ruleset, for "sending outside" the event we mean to deliver it to the
dispatcher component.
By default an event is dispatched outside only if there is no handler defined in the current ruleset.
However it is possible to change this behavior using* **dispatch_policy**.
*Available choices are defined in* **krules_core.route.router.DispatchPolicyConst** *as:*
- **DEFAULT**: *Dispatched outside only when no handler is found in current ruleset;*
- **ALWAYS**: *Always dispatched outside even if an handler is found and processed in the current ruleset;*
- **NEVER**: *Never dispatched outside;*
- **DIRECT**: *Skip to search for a local handler and send outside directly.*
::
from krules_core.route.router.DispatchPolicyConst import DEFAULT, ALWAYS, NEVER, DIRECT
from krules_core.event_types import SUBJECT_PROPERTY_CHANGED
# ...
rulesdata = [
{
rulename: "on-device-onboarded-dispatch-added-event",
subscibre_to: "device-onboarded",
ruledata: {
processing: [
# ...
# do something with device
Route(
subject=lambda payload: payload["device_id"],
payload=lambda payload: payload["device_data"],
event_type="device-added",
# no dispatch_policy is provided so will be used the DEFAULT one
),
]
}
},
{
rulename: "on-position-change-propagate-event",
subscibre_to: SUBJECT_PROPERTY_CHANGED,
ruledata: {
filters: [
OnSubjectPropertyChanged("position")
]
processing: [
Route(
dispatch_policy=DIRECT
# In this case we don't specify neither type, nor subject, nor payload.
# We use dispatch_policy DIRECT to propagate the received event outside, this increase
# efficiency because we want avoid useless check in other rules subscribed to SUBJECT_PROPERTY_CHANGED.
# Note that the rules are processed following the order in which they were defined.
)
]
}
},
{
rulename: "on-temp-change-propagate-event",
subscibre_to: SUBJECT_PROPERTY_CHANGED,
ruledata: {
filters: [
OnSubjectPropertyChanged("temp", value=lambda v: v > 30)
]
processing: [
Route(
event_type="device-overheated"
dispatch_policy=ALWAYS
# We want to handle device-overheated event both in the current container and outside, for example to send an external notification
)
]
}
},
{
rulename: "on-device-overheated-schedule-check",
subscribe_to: "device-overheated",
ruledata: {
# ...
}
},
]
"""
def execute(self, event_type=None, subject=None, payload=None, dispatch_policy=DispatchPolicyConst.DEFAULT):
"""
Args:
event_type: The event type. If None use current processing event type [default None]
subject: The event subject. If None use the current subject [default None]
payload: The event payload. If None use the current payload [default None]
dispatch_policy: Define the event dispatch policy as explained before. [default DispatchPolicyConst.DEFAULT]
"""
from krules_core.providers import event_router_factory
if event_type is None:
event_type = self.event_type
if subject is None:
subject = self.subject
if payload is None:
payload = self.payload
self.router.route(event_type, subject, payload, dispatch_policy=dispatch_policy)
class RaiseException(RuleFunctionBase):
"""
*Force the given exception raising*
::
from .my_custom_exceptions import UnexpectedPayload # supposing we defined a module with custom exceptions
rulesdata = [
{
rulename: "on-unexpected-payload-raise-exception",
subscibre_to: "device-onboarded",
ruledata: {
filters: [
Return(lambda payload: "device_id" not in payload)
]
processing: [
RaiseException(
UnexpectedPayload("device_id missing!")
)
]
}
},
]
"""
def execute(self, ex):
"""
Args:
ex: The exception to be raised
"""
raise ex
| 1.757813
| 2
|
drepr/old_code/prototype/drepr/services/ra_reader/net_cdf_reader.py
|
scorpio975/d-repr
| 5
|
12778537
|
<reponame>scorpio975/d-repr<gh_stars>1-10
from pathlib import Path
from typing import List, Any, Union
import netCDF4
import numpy
from drepr.models import Variable, Location
from drepr.services.ra_reader.ra_reader import RAReader
from drepr.services.ra_reader.tensor_ra_reader import TensorRAReader
class NetCDFRAReader(RAReader):
def __init__(self, fpath: Path):
super().__init__()
net_cdf_file = netCDF4.Dataset(str(fpath))
self.tensors = {}
for var in net_cdf_file.variables.values():
data = numpy.asarray(var).tolist()
shape = var.shape
self.tensors[var.name] = TensorRAReader(data, shape)
def get_value(self, index: List[Union[str, int]], start_idx: int = 0) -> Any:
return self.tensors[index[0]].get_value(index, start_idx + 1)
def replace_value(self, index: List[Union[str, int]], value: Any, start_idx: int = 0):
self.tensors[index[0]].replace_value(index, value, start_idx + 1)
def insert_value(self, index: List[Union[str, int]], value: Any, start_idx: int = 0):
self.tensors[index[0]].insert_value(index, value, start_idx + 1)
def remove_value(self, index: List[Union[str, int]], start_idx: int = 0):
self.tensors[index[0]].remove_value(index, start_idx + 1)
def dump2json(self) -> Union[dict, list]:
return {k: v.dump2json() for k, v in self.tensors.items()}
def ground_location_mut(self, loc: Location, start_idx: int = 0) -> None:
self.tensors[loc.slices[start_idx].idx].ground_location_mut(loc, start_idx + 1)
| 2.078125
| 2
|
utils.py
|
laituan245/BLINK
| 0
|
12778538
|
<gh_stars>0
def visualize_el_preds(data_and_predictions, output_fp='visualization.html'):
f = open(output_fp, 'w+')
for data in data_and_predictions:
inst_type = data['type']
ctx_left = data['context_left']
mention = data['mention']
ctx_right = data['context_right']
# Input
f.write(f'<span style="color:red">[{inst_type}]</span> {ctx_left} <b>{mention}</b> {ctx_right}</br></br>\n')
# Predictions
for p in data['top_entities']:
eid, e_title, e_url, e_text = p['id'], p['title'], p['url'], p['text']
f.write(f'[<a href="{e_url}">{eid}</a> <i>{e_title}</i>] ')
f.write(f'{e_text[:200]} ...')
f.write('</br></br>\n')
# Separators
f.write('</br><hr>\n')
f.close()
print(f'Generated a visualization file {output_fp}')
| 2.671875
| 3
|
src/adobe/pdfservices/operation/pdfops/options/extractpdf/extract_pdf_options.py
|
hvntravel/pdfservices-python-sdk
| 2
|
12778539
|
# Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
from typing import List
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_element_type import ExtractElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_renditions_element_type import \
ExtractRenditionsElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.table_structure_type import TableStructureType
class ExtractPDFOptions():
""" An Options Class that defines the options for ExtractPDFOperation.
.. code-block:: python
extract_pdf_options: ExtractPDFOptions = ExtractPDFOptions.builder() \\
.with_elements_to_extract([ExtractElementType.TEXT, ExtractElementType.TABLES]) \\
.with_get_char_info(True) \\
.with_table_structure_format(TableStructureType.CSV) \\
.with_elements_to_extract_renditions([ExtractRenditionsElementType.FIGURES, ExtractRenditionsElementType.TABLES]) \\
.with_include_styling_info(True) \\
.build()
"""
def __init__(self, elements_to_extract, elements_to_extract_renditions, get_char_info, table_output_format,
include_styling_info=None):
self._elements_to_extract = elements_to_extract
self._elements_to_extract_renditions = elements_to_extract_renditions
self._get_char_info = get_char_info
self._table_output_format = table_output_format
self._include_styling_info = include_styling_info
self._logger = logging.getLogger(__name__)
@property
def elements_to_extract(self):
""" List of pdf element types to be extracted in a structured format from input file"""
return self._elements_to_extract
@property
def elements_to_extract_renditions(self):
""" List of pdf element types whose renditions needs to be extracted from input file"""
return self._elements_to_extract_renditions
@property
def get_char_info(self):
""" Boolean specifying whether to add character level bounding boxes to output json """
return self._get_char_info
@property
def table_output_format(self):
""" export table in specified format - currently csv supported """
return self._table_output_format
@property
def include_styling_info(self):
""" Boolean specifying whether to add PDF Elements Styling Info to output json """
return self._include_styling_info
@staticmethod
def builder():
"""Returns a Builder for :class:`ExtractPDFOptions`
:return: The builder class for ExtractPDFOptions
:rtype: ExtractPDFOptions.Builder
"""
return ExtractPDFOptions.Builder()
class Builder:
""" The builder for :class:`ExtractPDFOptions`.
"""
def __init__(self):
self._elements_to_extract = None
self._elements_to_extract_renditions = None
self._table_output_format = None
self._get_char_info = None
self._include_styling_info = None
def _init_elements_to_extract(self):
if not self._elements_to_extract:
self._elements_to_extract = []
def _init_elements_to_extract_renditions(self):
if not self._elements_to_extract_renditions:
self._elements_to_extract_renditions = []
def with_element_to_extract(self, element_to_extract: ExtractElementType):
"""
adds a pdf element type for extracting structured information.
:param element_to_extract: ExtractElementType to be extracted
:type element_to_extract: ExtractElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract is None.
"""
if element_to_extract and element_to_extract in ExtractElementType:
self._init_elements_to_extract()
self._elements_to_extract.append(element_to_extract)
else:
raise ValueError("Only ExtractElementType enum is accepted for element_to_extract")
return self
def with_elements_to_extract(self, elements_to_extract: List[ExtractElementType]):
"""
adds a list of pdf element types for extracting structured information.
:param elements_to_extract: List of ExtractElementType to be extracted
:type elements_to_extract: List[ExtractElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract and all(element in ExtractElementType for element in elements_to_extract):
self._init_elements_to_extract()
self._elements_to_extract.extend(elements_to_extract)
else:
raise ValueError("Only ExtractElementType enum List is accepted for elements_to_extract")
return self
def with_element_to_extract_renditions(self, element_to_extract_renditions: ExtractRenditionsElementType):
"""
adds a pdf element type for extracting rendition.
:param element_to_extract_renditions: ExtractRenditionsElementType whose renditions have to be extracted
:type element_to_extract_renditions: ExtractRenditionsElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract_renditions is None.
"""
if element_to_extract_renditions and element_to_extract_renditions in ExtractRenditionsElementType:
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.append(element_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum is accepted for element_to_extract_renditions")
return self
def with_elements_to_extract_renditions(self, elements_to_extract_renditions: List[ExtractRenditionsElementType]):
"""
adds a list of pdf element types for extracting rendition.
:param elements_to_extract_renditions: List of ExtractRenditionsElementType whose renditions have to be extracted
:type elements_to_extract_renditions: List[ExtractRenditionsElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract_renditions and all(
element in ExtractRenditionsElementType for element in elements_to_extract_renditions):
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.extend(elements_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum List is accepted for elements_to_extract_renditions")
return self
def with_table_structure_format(self, table_structure: TableStructureType):
"""
adds the table structure format (currently csv only) for extracting structured information.
:param table_structure: TableStructureType to be extracted
:type table_structure: TableStructureType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if table_structure is None.
"""
if table_structure and table_structure in TableStructureType:
self._table_output_format = table_structure
else:
raise ValueError("Only TableStructureType enum is accepted for table_structure_format")
return self
def with_get_char_info(self, get_char_info: bool):
"""
sets the Boolean specifying whether to add character level bounding boxes to output json
:param get_char_info: Set True to extract character level bounding boxes information
:type get_char_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._get_char_info = get_char_info
return self
def with_include_styling_info(self, include_styling_info: bool):
"""
sets the Boolean specifying whether to add PDF Elements Styling Info to output json
:param include_styling_info: Set True to extract PDF Elements Styling Info
:type include_styling_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._include_styling_info = include_styling_info
return self
def build(self):
return ExtractPDFOptions(self._elements_to_extract, self._elements_to_extract_renditions,
self._get_char_info,
self._table_output_format, self._include_styling_info)
| 2.171875
| 2
|
equipment/framework/Mail/SMTPMail.py
|
didacelgueta/equipment
| 1
|
12778540
|
<reponame>didacelgueta/equipment
import mail1
from equipment.framework.Log.AbstractLog import AbstractLog
from equipment.framework.Config.AbstractConfig import AbstractConfig
from equipment.framework.Mail.AbstractMail import AbstractMail
from typing import Union
from equipment.framework.Mail.Email.Email import Email
from equipment.framework.Mail.Email.EmailFactory import EmailFactory
class SMTPMail(AbstractMail):
def __init__(self, config: AbstractConfig, log: AbstractLog):
self.config = config
self.log = log
def send(self, email: Union[Email, EmailFactory]) -> bool:
if isinstance(email, EmailFactory):
email = email.make()
try:
mail1.send(
subject=email.subject,
text=email.text,
text_html=email.html,
sender=email.sender,
recipients=email.recipients,
cc=email.cc,
bcc=email.bcc,
attachments=email.attachments,
smtp_host=self.config.get('MAIL_SMTP', 'host'),
smtp_port=int(self.config.get('MAIL_SMTP', 'port')),
username=self.config.get('MAIL_SMTP', 'user'),
password=self.config.get('MAIL_SMTP', 'password')
)
return True
except Exception as e:
self.log.error(e, exc_info=True)
return False
| 2.5625
| 3
|
rorolite/utils.py
|
anandology/rorolite
| 30
|
12778541
|
<filename>rorolite/utils.py
import logging
from fabric import io
OutputLooper = io.OutputLooper
class RoroliteOutputLooper(OutputLooper):
"""Replacement to OutputLooper of Fabric that doesn't print prefix
in the output.
"""
def __init__(self, *args, **kwargs):
OutputLooper.__init__(self, *args, **kwargs)
self.prefix = ""
def hijack_output_loop():
"""Hijacks the fabric's output loop to supress the '[hostname] out:'
prefix from output.
"""
io.OutputLooper = RoroliteOutputLooper
def setup_logger(verbose=False):
if verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='[%(name)s] %(message)s', level=level)
| 2.609375
| 3
|
house_robber_ii/solution.py
|
mahimadubey/leetcode-python
| 528
|
12778542
|
"""
Note: This is an extension of House Robber.
After robbing those houses on that street, the thief has found himself a new
place for his thievery so that he will not get too much attention. This time,
all houses at this place are arranged in a circle. That means the first house
is the neighbor of the last one. Meanwhile, the security system for these
houses remain the same as for those in the previous street.
Given a list of non-negative integers representing the amount of money of each
house, determine the maximum amount of money you can rob tonight without
alerting the police.
"""
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
elif n == 1:
return nums[0]
return max(self.rob_aux(nums, 0), self.rob_aux(nums, 1))
def rob_aux(self, nums, left):
n = len(nums) - 1
t = [0 for i in range(n + 1)]
if n == 0:
return t[n]
t[1] = nums[left]
if n <= 1:
return t[n]
t[2] = max(nums[left: left + 2])
for i in range(3, n + 1):
t[i] = max(t[i - 2] + nums[left + i - 1], t[i - 1])
return t[n]
a1 = [1]
a2 = [4, 1, 6, 10, 5, 13, 2, 7]
s = Solution()
print(s.rob(a1))
print(s.rob(a2))
| 3.8125
| 4
|
base.py
|
justicedundas/research-python
| 0
|
12778543
|
import datetime
import time
from urllib.parse import urlencode
import requests
from pandas import read_csv
from geodataimport.compat import StringIO, binary_type, bytes_to_str
from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates
class _GeoData(object):
"""
Parameters
----------
symbols : {str, List[str]}
String symbol of like of symbols
start : string, int, date, datetime, Timestamp
Starting date. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, int, date, datetime, Timestamp
Ending date
retry_count : int, default 3
Number of times to retry query request.
pause : float, default 0.1
Time, in seconds, of the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
freq : {str, None}
Frequency to use in select readers
"""
_chunk_size = 1024 * 1024
_format = "string"
def __init__(
self,
symbols,
start=None,
end=None,
retry_count=5,
pause=0.1,
timeout=30,
session=None,
freq=None,
asynchronous=False,
**kwargs,
):
self.symbols = symbols
start, end = _sanitize_dates(start or self.default_start_date, end)
self.start = start
self.end = end
if not isinstance(retry_count, int) or retry_count < 0:
raise ValueError("'retry_count' must be integer larger than 0")
self.retry_count = retry_count
self.pause = pause
self.timeout = timeout
self.pause_multiplier = 1
self.session = _init_session(
session, retry=retry_count, asynchronous=asynchronous
)
self.freq = freq
def close(self):
"""Close network session"""
self.session.close()
@property
def default_start_date(self):
"""Default start date for reader. Defaults to 5 years before current date"""
today = datetime.date.today()
return today - datetime.timedelta(days=365 * 5)
@property
def url(self):
"""API URL"""
# must be overridden in subclass
raise NotImplementedError
@property
def params(self):
"""Parameters to use in API calls"""
return None
def _read_one_data(self, url, params):
""" read one data from specified URL """
if self._format == "string":
out = self._read_url_as_StringIO(url, params=params)
elif self._format == "json":
out = self._get_response(url, params=params).json()
else:
raise NotImplementedError(self._format)
return self._read_lines(out)
def _read_url_as_StringIO(self, url, params=None):
"""
Open url (and retry)
"""
response = self._get_response(url, params=params)
text = self._sanitize_response(response)
out = StringIO()
if len(text) == 0:
service = self.__class__.__name__
raise IOError(
"{} request returned no data; check URL for invalid "
"inputs: {}".format(service, self.url)
)
if isinstance(text, binary_type):
out.write(bytes_to_str(text))
else:
out.write(text)
out.seek(0)
return out
@staticmethod
def _sanitize_response(response):
"""
Hook to allow subclasses to clean up response data
"""
return response.content
def _get_response(self, url, params=None, headers=None):
""" send raw HTTP request to get requests.Response from the specified url
Parameters
----------
url : str
target URL
params : dict or None
parameters passed to the URL
"""
# initial attempt + retry
pause = self.pause
last_response_text = ""
for _ in range(self.retry_count + 1):
response = self.session.get(
url, params=params, headers=headers, timeout=self.timeout
)
if response.status_code == requests.codes["ok"]:
return response
if response.encoding:
last_response_text = response.text.encode(response.encoding)
time.sleep(pause)
# Increase time between subsequent requests, per subclass.
pause *= self.pause_multiplier
# Get a new breadcrumb if necessary, in case ours is invalidated
if isinstance(params, list) and "crumb" in params:
params["crumb"] = self._get_crumb(self.retry_count)
# If our output error function returns True, exit the loop.
if self._output_error(response):
break
if params is not None and len(params) > 0:
url = url + "?" + urlencode(params)
msg = "Unable to read URL: {0}".format(url)
if last_response_text:
msg += "\nResponse Text:\n{0}".format(last_response_text)
raise RemoteDataError(msg)
def _output_error(self, out):
"""If necessary, a service can implement an interpreter for any non-200
HTTP responses.
Parameters
----------
out: bytes
The raw output from an HTTP request
Returns
-------
boolean
"""
return False
def _get_crumb(self, *args):
""" To be implemented by subclass """
raise NotImplementedError("Subclass has not implemented method.")
def _read_lines(self, out):
rs = read_csv(out, index_col=0, parse_dates=True, na_values=("-", "null"))[::-1]
# Needed to remove blank space character in header names
rs.columns = list(map(lambda x: x.strip(), rs.columns.values.tolist()))
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
# Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode("unicode_escape").encode(
"ascii", "ignore"
)
except AttributeError:
# Python 3 string has no decode method.
rs.index.name = rs.index.name.encode("ascii", "ignore").decode()
return rs
| 3.078125
| 3
|
add_user.py
|
phaughney22/insightCloudSec_Scripts
| 0
|
12778544
|
import requests
import params
base_url = params.url
url = base_url+"v2/public/user/create"
user_email_domain = params.email_domain
# User Inputs
first_name = input("Type user's first name: ")
last_name = input("Type user's last name: ")
# Create user creds from inputs
full_name = first_name + " " + last_name
user_name = first_name + "." + last_name + user_email_domain
email_address = first_name + "." + last_name + user_email_domain
payload = {
"authentication_type": "saml",
"access_level": "BASIC_USER",
"username": user_name,
"authentication_server_id": 1,
"name": full_name,
"email": email_address
}
headers = {
"Accept": "application/json",
"content-type": "application/json",
"accept-encoding": "gzip",
"Api-Key": params.api_key
}
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
| 3.015625
| 3
|
ckanext/datapusher/logic/auth.py
|
dassolkim/default-ckan
| 1
|
12778545
|
<reponame>dassolkim/default-ckan<filename>ckanext/datapusher/logic/auth.py
# encoding: utf-8
import ckanext.datastore.logic.auth as auth
import ckan.plugins as p
def datapusher_submit(context, data_dict):
return auth.datastore_auth(context, data_dict)
def datapusher_status(context, data_dict):
return auth.datastore_auth(context, data_dict)
def datapusher_hook(context, data_dict):
return {'success': True}
def resource_upload(context, data_dict):
# return p.toolkit.check_access('resource_show', context, data_dict)
return {'success': True}
| 1.507813
| 2
|
spyder_terminal/server/rest/__init__.py
|
mrclary/spyder-terminal
| 169
|
12778546
|
# -*- coding: utf-8 -*-
"""
rest module.
=========
Provides:
1. Asynchronous execution of JSON services
How to use the documentation
----------------------------
Documentation is available in one form: docstrings provided
with the code
Copyright (c) 2016, <NAME>.
MIT, see LICENSE for more details.
"""
from . import term_rest
term_rest
| 1.625
| 2
|
Twitch.py
|
borgej/BrexBot
| 0
|
12778547
|
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, Brexit boy and SaLmon king"
__credits__ = ["<NAME>, <NAME>"]
__license__ = "Apache License"
__version__ = "2.0"
__maintainer__ = "<NAME>, <NAME>"
__status__ = "Development"
import logging
from twitch.api import v3
| 1.039063
| 1
|
Ch02/kNN.py
|
uncarman2017/MLBeginner
| 0
|
12778548
|
'''
Created on Sep 16, 2010
kNN: k Nearest Neighbors
Input: inX: vector to compare to existing dataset (1xN)
dataSet: size m data set of known vectors (NxM)
labels: data set labels (1xM vector)
k: number of neighbors to use for comparison (should be an odd number)
Output: the most popular class label
@author: pbharrin
'''
'''
k近值算法示例
'''
import matplotlib.pyplot as plt
import operator
from numpy import *
from numpy.ma.core import *
'''
使用k近邻算法改进约会网站的配对效果,算法步骤如下:
(1) 收集数据:提供文本文件。
(2) 准备数据:使用Python解析文本文件。
(3) 分析数据:使用Matplotlib画二维扩散图。
(4) 训练算法:此步骤不适用于k-近邻算法。
(5) 测试算法:使用海伦提供的部分数据作为测试样本。测试样本和非测试样本的区别在于:测试样本是已经完成分类的数据,如果预测分类与实际类别不同,则标记为一个错误。
(6) 使用算法:产生简单的命令行程序,然后海伦可以输入一些特征数据以判断对方是否为自己喜欢的类型。
'''
def dating_class_test():
hoRatio = 0.50 # hold out 10%
# 从指定文件中载入数据,载入数据为每年获得的飞行常客里程数,玩视频游戏所耗时间百分比,每周消费的冰淇淋公升数
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
# print(normMat[0:20])
# print(ranges)
# print(minVals)
# exit()
m = normMat.shape[0] # 取矩阵行数,rage函数返回包含行列数的元组对象
numTestVecs = int(m * hoRatio) # 取测试行数
errorCount = 0.0
for i in range(numTestVecs):
# 取出矩阵每一行,非测试行,非测试行的分类标签
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3) # 求预测分类值
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]): errorCount += 1.0 # 预测分类与实际类别不同,则标记为一个错误
print("the total error rate is: %f, error count is %d" % (errorCount / float(numTestVecs), errorCount))
# 分类器方法,求预测分类值
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] # 行数
diffMat = tile(inX, (dataSetSize, 1)) - dataSet # 求出测试样本与非测试样本的差值
sqDiffMat = diffMat ** 2 # 求方
sqDistances = sqDiffMat.sum(axis=1) # 行求和
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# 将文本文件转换为NumPy矩阵
# Input: 文本文件路径
# Output: 包含训练样本数据的NumPy矩阵和类标签向量
def file2matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) # 得到文件行数
returnMat = zeros((numberOfLines, 3)) # 创建Numpy矩阵并初始化0
classLabelVector = [] # 初始化分类标签向量,存放文本行中最后一列分类标签
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip() # 去除回车符
listFromLine = line.split('\t')
returnMat[index, :] = listFromLine[0:3] # 给矩阵填值
classLabelVector.append(int(listFromLine[-1])) # 取出最后一个字段作为标签值存入向量对象
index += 1
return returnMat, classLabelVector
# 归一化特征值,即将飞行公里数值转化为[0,1]区间值
# newValue = (oldValue-min)/(max-min)
# dataset: NumPy矩阵
# 返回值:归一化的numPy矩阵, 最大最小飞行公里数的差值行, 最小矩阵行
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals # 取最大最小飞行公里数的差值
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m, 1)) # 矩阵每一行都与最小矩阵行做差值运算
normDataSet = normDataSet / tile(ranges, (m, 1)) # element wise divide,上一步计算出的矩阵每一行去除最大最小插值矩阵
return normDataSet, ranges, minVals
# 读取NumPy矩阵格式的特征值,显示为散列图
def test1():
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
print(datingDataMat[0:20])
print(datingLabels[0:20])
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋公升数
# plt.show()
ax.scatter(datingDataMat[:, 0], datingDataMat[:, 1], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋公升数
plt.show()
def test2():
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
print(normMat)
print(minVals)
print(ranges)
print(normMat.shape)
# print("========================================================")
# test1()
# print("========================================================")
# test2()
dating_class_test()
| 3.546875
| 4
|
CQS/util/verification.py
|
avijitshee/cartan-quantum-synthesizer
| 5
|
12778549
|
<reponame>avijitshee/cartan-quantum-synthesizer<gh_stars>1-10
#import sys
#sys.path.append('c:\\Users\\Thoma\\OneDrive\\Documents\\2021_ORNL\\CartanCodeGit\\cartan-quantum-synthesizer')
# -*- coding: utf-8 -*-
__docformat__ = 'google'
"""
A collection of functions useful for exact diagonalization and converting KHK decomposition to a matrix
"""
import numpy as np
from numpy import kron
from scipy.linalg import expm, norm
import CQS.util.IO as IO
#The Pauli Matricies in matrix form
X = np.array([[0,1],[1,0]])
#Pauli X
Y = np.array([[0,-1j],[1j,0]])
#Pauli Y
Z = np.array([[1,0],[0,-1]])
#PauliZ
I = np.array([[1,0],[0,1]])
#2x2 idenity
paulis = [I,X,Y,Z]
# Allows for indexing the Pauli Arrays (Converting from tuple form (0,1,2,3) to string form IXYZ)
def Nident (N):
""" Generates an N qubit Identity Matrix """
return np.diag(np.ones(2**N))
def PauliExpUnitary(N, co, PauliTuple):
"""
Generates the Unitary Matrix for a Pauli Exponential
Uses e^{i.co.Pauli} = I*cos(a) + i*sin(a)*Pauli
Args:
N (int): Number of qubits
co (float): The coefficient of the Pauli Matrix
PauliTuple (Tuple): (PauliString) to exp
Returns:
The result e<sup>i•co•PauliTuple</sup> = I•cos(co) + i•sin(co)•PauliTuple
"""
II = Nident(N)
U = paulis[PauliTuple[0]]
for pauli in PauliTuple[1:]:
U = kron(U,paulis[pauli]) #Generates the PauliTuple Matrix Element
return np.cos(co)*II + 1j*np.sin(co)*U
def exactU(HCos, HTups, time):
"""
Computes the exact matrix exponential for time evolution at the time t. Takes as an input the real component of the exponential.
Args:
HCos (List of complex numbers):
HTupes (List of (PauliStrings)):
time (float - time evolution final time):
"""
H = np.diag(np.zeros(2**len(HTups[0])))
for (co, term) in zip(HCos, HTups):
H = H + IO.tuplesToMatrix(co, term)
return expm(1j * time * H)
def Trotter(ham, time, N, steps):
"""
Prepares U_t, the Trotterized input U
Args:
ham (List of Tuples): Hamiltonian formatted as (co, (PauliString))
time (float): final time to evolve to
N (int): number of qubits
steps (int): Number of trotter steps to take
Returns:
The U<sub>trotter</sub>(t) that approximates U<sub>exact</sub>(t)
"""
timeStep = time/steps
U = Nident(N)
for (co, pauliTuple) in ham:
U = U @ PauliExpUnitary(N, 1*co*timeStep, pauliTuple)
finalU = Nident(N)
for i in range(steps):
finalU = finalU @ U
return finalU
def KHK(kCoefs, hCoefs, k, h):
"""
Defines the Unitary for the KHK<sup>†</sup>]
Specifically, performs ℿ<sub>i</sub> e<sup>i•k[l]•kCoefs[l]</sup> ℿ<sub>j</sub> e<sup>i•h[j]•hCoefs[j]</sup> ℿ<sub>l</sub> e<sup>i•k[(lenK - l)]•kCoefs[(lenK - l)]</sup>
Multiply by t before passing the coefficients for h. Do not multiply h by i, that is automatic. The coefficients should be real for k, imaginary for h
Args:
kCoefs (List): A list of (real) coefficients for k
hCoefs (List): The list of (imaginary) coefficients for the elements in h.
k (List of Tuples): The list of (PauliStrings)
h (List of Tuples): List of (PauliStrings) for h (in the same indexing)
"""
N = len(h[0])
KHK = Nident(N)
#First loop of K terms:
for (term, co) in zip(k, kCoefs):
KHK = KHK @ PauliExpUnitary(N, co, term)
#H terms
for (term, co) in zip(h, hCoefs):
KHK = KHK @ PauliExpUnitary(N, co, term)
for (term, co) in zip(k[::-1], kCoefs[::-1]):
KHK = KHK @ PauliExpUnitary(N, -1*co, term)
return KHK
| 2.84375
| 3
|
newrelic.py
|
devopstguru/aws-infra
| 3
|
12778550
|
#!/usr/bin/python
from os import environ
import requests
import click
import databag
import base64
admin_key = environ.get("NEWRELIC_API_KEY")
def create_synthetics_monitor(monitor_name, monitor_url, monitor_type="BROWSER", monitor_frequency=15):
"""
Source: https://docs.newrelic.com/docs/apis/synthetics-rest-api/monitor-examples/manage-synthetics-monitors-via-rest-api
Command:
curl -v \
-X POST -H 'X-Api-Key:{Admin API key}' \
-H 'Content-Type: application/json' https://synthetics.newrelic.com/synthetics/api/v1/monitors \
-d '{ "name" : "Monitor Name", "frequency" : 15, "uri" : "Monitor URL", "locations" : [ "AWS_US_WEST_2" ], "type" : "SIMPLE"}'
"""
synthetics_endpoint = r'https://synthetics.newrelic.com/synthetics/api/v1/monitors'
headers = {
'X-Api-Key': admin_key,
'Content-Type': 'application/json'
}
payload = build_monitor(monitor_name, monitor_type, monitor_url=monitor_url, frequency=monitor_frequency)
r = requests.post(synthetics_endpoint, json=payload, headers=headers)
if r.status_code != 201:
print "Status Code is {code}".format(code=r.status_code)
print r.text
return False
print "\tSuccessfully added {monitor} -> {monitor_url}".format(monitor=monitor_name, monitor_url=monitor_url)
#return True
if monitor_type == "SCRIPT_BROWSER":
# Get the monitor ID
# Get the monitor location URL (which has the ID at the end of it)
location_url=r.headers['Location']
monitor_id = location_url.replace(synthetics_endpoint+"/", "")
add_script_to_synthetics_monitor(monitor_id, monitor_url)
def get_synthetics_monitors():
"""
curl -v
-H 'X-Api-Key:{Admin_User_Key}' https://synthetics.newrelic.com/synthetics/api/v1/monitors
"""
synthetics_endpoint = r"https://synthetics.newrelic.com/synthetics/api/v1/monitors"
headers = {
'X-Api-Key': admin_key,
}
r = requests.get(synthetics_endpoint, headers=headers)
if r.status_code != 200:
print "Failed to get list"
print "Status code is {code}".format(code=r.status_code)
print r.text
else:
return r.json()["monitors"]
def remove_synthetics_monitor(monitor_id):
"""
curl -v
-H 'X-Api-Key:{Admin_User_Key}'
-X DELETE https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}
"""
synthetics_endpoint = r"https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}".format(id=monitor_id)
headers = {
'X-Api-Key': admin_key,
}
r = requests.delete(synthetics_endpoint, headers=headers)
if r.status_code != 204:
print "Failed to delete monitor with id {id}".format(id=monitor_id)
print "Status code is {code}".format(code=r.status_code)
print r.text
return False
else:
print "\tSuccessfully removed monitor with id {id}".format(id=monitor_id)
return True
def update_existing_synthetics_monitor(monitor_id, new_monitor):
synthetics_endpoint=r"https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}".format(id=monitor_id)
headers = {
'X-Api-Key': admin_key,
'Content-Type': 'application/json'
}
r = requests.put(synthetics_endpoint, json=new_monitor, headers=headers)
if r.status_code != 204:
print "Failed to update monitor with id {id}".format(id=monitor_id)
print "Status code is {code}".format(code=r.status_code)
print r.text
return False
else:
print "\tSuccessfully updated monitor with id {id}".format(id=monitor_id)
return True
def add_script_to_synthetics_monitor(monitor_id, monitor_url):
synthetics_endpoint=r"https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}/script".format(id=monitor_id)
# Generate the base64 string for the basic HTTP authentication
decoded_auth_string = "{user}:{password}".format(user=environ.get("NEWRELIC_QUERY_USER"), password=environ.get("NEWRELIC_QUERY_PASSWORD"))
encoded_auth_string = base64.b64encode(decoded_auth_string)
# Here's a little Selenium/Node.JS to pass through the HTTP auth and query the site
scripted_browser="""
var assert = require('assert');
$browser.addHeader("Authorization", "Basic " + "%s");
$browser.get("%s").then(function(){
});
"""
scripted_browser = scripted_browser % (encoded_auth_string, monitor_url)
scripted_browser_encoded=base64.b64encode(scripted_browser)
payload = {
"scriptText": scripted_browser_encoded
}
headers = {
'X-Api-Key': admin_key,
'Content-Type': 'application/json'
}
r = requests.put(synthetics_endpoint, json=payload, headers=headers)
if r.status_code != 204:
print "Failed to add script to monitor with id {id}".format(id=monitor_id)
print "Status code is {code}".format(code=r.status_code)
print r.text
return False
else:
print "\tSuccessfully added script to monitor with id {id}".format(id=monitor_id)
return True
def build_monitor(monitor_name,
monitor_type="SIMPLE",
monitor_url=None,
frequency=1,
locations=[u'AWS_AP_NORTHEAST_1', u'AWS_AP_SOUTHEAST_1', u'AWS_AP_SOUTHEAST_2', u'AWS_EU_CENTRAL_1', u'AWS_EU_WEST_1', u'AWS_SA_EAST_1', u'AWS_US_EAST_1', u'AWS_US_WEST_1', u'AWS_US_WEST_2', u'LINODE_EU_WEST_1', u'LINODE_US_CENTRAL_1', u'LINODE_US_EAST_1', u'LINODE_US_SOUTH_1', u'LINODE_US_WEST_1'],
status="ENABLED",
validation_string="",
verify_ssl=""):
if verify_ssl == "":
verify_ssl = "true" if "https" in monitor_url else "false"
monitor = {
"name": monitor_name,
"type": monitor_type,
"frequency": frequency,
"locations": locations,
"status": status,
"options": {
"validationString": validation_string,
"verifySSL": verify_ssl
}
}
if monitor_url != None:
monitor["uri"] = monitor_url
return monitor
def has_synthetics_monitor_script(monitor_id):
"""
curl -v
-H 'X-Api-Key: {Admin_User_Key}'
https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}/script
"""
synthetics_endpoint=r"https://synthetics.newrelic.com/synthetics/api/v1/monitors/{id}/script".format(id=monitor_id)
headers = {
'X-Api-Key': admin_key
}
r = requests.get(synthetics_endpoint, headers=headers)
if int(r.status_code) not in [200, 403, 404]:
print "Invalid status code recieved".format(id=monitor_id)
print "Status code is {code}".format(code=r.status_code)
print r.text
return False
elif r.status_code == 200:
print "\tMonitor has a script associated with it already"
return True
elif r.status_code == 403:
print "The monitor is not the correct type"
return False
elif r.status_code == 404:
print "\tThe monitor has no script associated with it."
return False
@click.command()
@click.option("--operation", help="Add/list/remove/update")
@click.option("--add", 'operation', flag_value='add')
@click.option("--list", 'operation', flag_value='list')
@click.option("--remove", 'operation', flag_value='remove')
@click.option("--update", 'operation', flag_value='update')
@click.option("--add-script", 'operation', flag_value='add-script')
@click.option("--monitor-name", help="The human friendly name of the synthetics monitor")
@click.option("--monitor-url", help="The URL that you would like to monitor")
@click.option("--monitor-id", help="Monitor id is needed for remove/update")
@click.option("--monitor-type", default="BROWSER", help="The type of monitor", type=click.Choice(["SIMPLE", "BROWSER", "SCRIPT_API", "SCRIPT_BROWSER"]))
def synthetics_router(operation, monitor_name, monitor_url, monitor_id, monitor_type):
if operation == "add":
create_synthetics_monitor(monitor_name, monitor_url)
elif operation == "list":
get_synthetics_monitors()
elif operation == "remove":
remove_synthetics_monitor(monitor_id)
elif operation == "update":
new_monitor = build_monitor(monitor_name, monitor_type, monitor_url)
update_existing_synthetics_monitor(monitor_id, new_monitor)
elif operation == "add-script":
add_script_to_synthetics_monitor(monitor_id, monitor_url)
if __name__ == '__main__':
synthetics_router()
| 2.203125
| 2
|