max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
DynamicModel/io.py | horribleheffalump/AUVResearch | 4 | 12771051 | <reponame>horribleheffalump/AUVResearch<gh_stars>1-10
"""Save/load operations
"""
import numpy as np
import pandas as pd
from glob import glob
from sys import maxsize, float_info
def save_path(filename, x):
np.savetxt(filename, x, fmt='%f')
def save_many(filename_template, x, start_from=0, total=None):
if total is None:
total = x.shape[0]
for m in range(0, x.shape[0]):
filename = filename_template.replace('*', str(start_from + m).zfill(int(np.log10(total))))
np.savetxt(filename, x[m], fmt='%f')
def load_path(file_name_path, column_names, estimate_errors_files_dict=None):
path_data = np.loadtxt(file_name_path)
df = pd.DataFrame(path_data)
df.columns = column_names
if estimate_errors_files_dict is not None:
dfs = [df]
for contents, file_name in estimate_errors_files_dict.items():
column_names_full = [f"{n}{contents}" for n in column_names]
error_data = np.loadtxt(file_name)
df = pd.DataFrame(path_data - error_data)
df.columns = column_names_full
dfs.append(df)
df = pd.concat(dfs, axis=1, sort=False)
return df
def load_stats(file_template, diverged_limit=float_info.max, max_paths=maxsize, silent=False):
ensemble = None
m = 0
files = glob(file_template)
mean_file = file_template.replace('*', 'mean')
std_file = file_template.replace('*', 'std')
ignore = [mean_file, std_file]
for f in ignore:
try:
files.remove(f)
except ValueError:
pass
for f in files:
data = np.loadtxt(f)
if ensemble is None:
ensemble = np.zeros((len(files), data.shape[0], data.shape[1]))
ensemble[m, :, :] = data
if np.max(data) > diverged_limit:
print(f)
else:
m += 1
if m % 1000 == 0:
if not silent:
print(f'estimate done {m} of {len(files)} {file_template}')
if m == max_paths:
break
ensemble = ensemble[:m, :, :]
return ensemble
def calculate_stats(file_template, diverged_limit=float_info.max, max_paths=maxsize, silent=False, save=False):
ensemble = load_stats(file_template, diverged_limit, max_paths, silent)
mean = np.mean(ensemble, axis=0)
std = np.std(ensemble, axis=0)
if save:
mean_file = file_template.replace('*', 'mean')
std_file = file_template.replace('*', 'std')
save_path(mean_file, mean)
save_path(std_file, std)
return mean, std
| 2.125 | 2 |
ee/api/chalicelib/blueprints/bp_saml.py | champkeh/openreplay | 1 | 12771052 | from chalice import Blueprint
from chalicelib import _overrides
from chalicelib.utils.SAML2_helper import prepare_request, init_saml_auth
app = Blueprint(__name__)
_overrides.chalice_app(app)
from chalicelib.utils.helper import environ
from onelogin.saml2.auth import OneLogin_Saml2_Logout_Request
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from chalice import Response
from chalicelib.core import users, tenants
@app.route("/saml2", methods=['GET'], authorizer=None)
def start_sso():
app.current_request.path = ''
req = prepare_request(request=app.current_request)
auth = init_saml_auth(req)
sso_built_url = auth.login()
return Response(
# status_code=301,
status_code=307,
body='',
headers={'Location': sso_built_url, 'Content-Type': 'text/plain'})
@app.route('/saml2/acs', methods=['POST'], content_types=['application/x-www-form-urlencoded'], authorizer=None)
def process_sso_assertion():
req = prepare_request(request=app.current_request)
session = req["cookie"]["session"]
request = req['request']
auth = init_saml_auth(req)
request_id = None
if 'AuthNRequestID' in session:
request_id = session['AuthNRequestID']
auth.process_response(request_id=request_id)
errors = auth.get_errors()
user_data = {}
if len(errors) == 0:
if 'AuthNRequestID' in session:
del session['AuthNRequestID']
user_data = auth.get_attributes()
# session['samlUserdata'] = user_data
# session['samlNameId'] = auth.get_nameid()
# session['samlNameIdFormat'] = auth.get_nameid_format()
# session['samlNameIdNameQualifier'] = auth.get_nameid_nq()
# session['samlNameIdSPNameQualifier'] = auth.get_nameid_spnq()
# session['samlSessionIndex'] = auth.get_session_index()
# session['samlSessionExpiration'] = auth.get_session_expiration()
# print('>>>>')
# print(session)
self_url = OneLogin_Saml2_Utils.get_self_url(req)
if 'RelayState' in request.form and self_url != request.form['RelayState']:
print("====>redirect")
return Response(
status_code=307,
body='',
headers={'Location': auth.redirect_to(request.form['RelayState']), 'Content-Type': 'text/plain'})
elif auth.get_settings().is_debug_active():
error_reason = auth.get_last_error_reason()
return {"errors": [error_reason]}
email = auth.get_nameid()
existing = users.get_by_email_only(auth.get_nameid())
internal_id = next(iter(user_data.get("internalId", [])), None)
if len(existing) == 0 or existing[0].get("origin") != 'saml':
tenant_key = user_data.get("tenantKey", [])
if len(tenant_key) == 0:
print("tenantKey not present in assertion")
return Response(
status_code=307,
body={"errors": ["tenantKey not present in assertion"]},
headers={'Location': auth.redirect_to(request.form['RelayState']), 'Content-Type': 'text/plain'})
else:
t = tenants.get_by_tenant_key(tenant_key[0])
if t is None:
return Response(
status_code=307,
body={"errors": ["Unknown tenantKey"]},
headers={'Location': auth.redirect_to(request.form['RelayState']), 'Content-Type': 'text/plain'})
if len(existing) == 0:
print("== new user ==")
users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=True, origin='saml',
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id)
else:
existing = existing[0]
if existing.get("origin") != 'saml':
print("== migrating user to SAML ==")
users.update(tenant_id=t['tenantId'], user_id=existing["id"],
changes={"origin": 'saml', "internal_id": internal_id})
return users.authenticate_sso(email=email, internal_id=internal_id, exp=auth.get_session_expiration())
@app.route('/saml2/slo', methods=['GET'])
def process_slo_request(context):
req = prepare_request(request=app.current_request)
session = req["cookie"]["session"]
request = req['request']
auth = init_saml_auth(req)
name_id = session_index = name_id_format = name_id_nq = name_id_spnq = None
if 'samlNameId' in session:
name_id = session['samlNameId']
if 'samlSessionIndex' in session:
session_index = session['samlSessionIndex']
if 'samlNameIdFormat' in session:
name_id_format = session['samlNameIdFormat']
if 'samlNameIdNameQualifier' in session:
name_id_nq = session['samlNameIdNameQualifier']
if 'samlNameIdSPNameQualifier' in session:
name_id_spnq = session['samlNameIdSPNameQualifier']
users.change_jwt_iat(context["userId"])
return Response(
status_code=307,
body='',
headers={'Location': auth.logout(name_id=name_id, session_index=session_index, nq=name_id_nq,
name_id_format=name_id_format,
spnq=name_id_spnq), 'Content-Type': 'text/plain'})
@app.route('/saml2/sls', methods=['GET'], authorizer=None)
def process_sls_assertion():
req = prepare_request(request=app.current_request)
session = req["cookie"]["session"]
request = req['request']
auth = init_saml_auth(req)
request_id = None
if 'LogoutRequestID' in session:
request_id = session['LogoutRequestID']
def dscb():
session.clear()
url = auth.process_slo(request_id=request_id, delete_session_cb=dscb)
errors = auth.get_errors()
if len(errors) == 0:
if 'SAMLRequest' in req['get_data']:
logout_request = OneLogin_Saml2_Logout_Request(auth.get_settings(), req['get_data']['SAMLRequest'])
user_email = logout_request.get_nameid(auth.get_last_request_xml())
to_logout = users.get_by_email_only(user_email)
if len(to_logout) > 0:
to_logout = to_logout[0]['id']
users.change_jwt_iat(to_logout)
else:
print("Unknown user SLS-Request By IdP")
else:
print("Preprocessed SLS-Request by SP")
if url is not None:
return Response(
status_code=307,
body='',
headers={'Location': url, 'Content-Type': 'text/plain'})
return Response(
status_code=307,
body='',
headers={'Location': environ["SITE_URL"], 'Content-Type': 'text/plain'})
@app.route('/saml2/metadata', methods=['GET'], authorizer=None)
def saml2_metadata():
req = prepare_request(request=app.current_request)
auth = init_saml_auth(req)
settings = auth.get_settings()
metadata = settings.get_sp_metadata()
errors = settings.validate_metadata(metadata)
if len(errors) == 0:
return Response(
status_code=200,
body=metadata,
headers={'Content-Type': 'text/xml'})
else:
return Response(
status_code=500,
body=', '.join(errors))
| 2.15625 | 2 |
utils/ranked.py | Plux1/imagelab | 0 | 12771053 | import numpy as np
def rank5_accuracy(predictions, labels):
# initialize the rank-1 and rank-5 accuracies
rank_1 = 0
rank_5 = 0
# new_predictions = []
# loop over the predictions and the ground-truth labels
for (prediction_, ground_truth) in zip(predictions, labels):
# sort the probabilities by their index in descending order
# so that the more confident guesses are at the front of the list
prediction_ = np.argsort(prediction_)[::-1]
# check if the ground-truth label is in the top-5 predictions
if ground_truth in prediction_[:5]:
rank_5 += 1
# check if the ground-truth is in #1 prediction
if ground_truth == prediction_[0]:
rank_1 += 1
# compute the final rank-1 and rank-5 accuracy
rank_1 /= float(len(labels))
rank_5 /= float(len(labels))
# return a tuple of the rank-1 and rank-5 accuracies
return rank_1, rank_5 | 2.828125 | 3 |
cogs/example.py | StvenYun/BBBot | 0 | 12771054 | <gh_stars>0
from discord.ext import commands
#######################KEY COMPONENT#########################
class Example(commands.Cog):
def __init__(self, bot):
self.bot = bot
#######################KEY COMPONENT#########################
#Events
@commands.Cog.listener()
async def on_ready(self):
print('example.py injected')
#Commands
@commands.command(name='ping', help='Returns latency between bot,api, and database')
async def ping(self, ctx):
await ctx.send(f'Pong! { round(self.bot.latency*1000)} ms')
#######################KEY COMPONENT#########################
def setup(bot):
bot.add_cog(Example(bot))
#######################KEY COMPONENT#########################
| 2.515625 | 3 |
sippycode/twitter/sippy_twitter.py | schreck2/sippycode | 0 | 12771055 | # Copyright 2008 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import getpass
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import sippycode.twitter.core as twitter
import sippycode.auth.core as auth
class StatusClient(object):
_twitter_page = 0
_local_page = 0
def __init__(self):
self._status_cache = []
def print_current_page(self):
print '------------------------------'
start_index = self._local_page * 5
for result in self._status_cache[start_index:start_index+5]:
print result[0]
print result[1]
print ''
def get_page(self, number, client):
old_twitter_page = self._twitter_page
if number > 0:
self._twitter_page = (number / 4) + 1
self._local_page = (number - 1) % 4
else:
self._twitter_page = 1
self._local_page = 0
if old_twitter_page != self._twitter_page:
response = client.friends_timeline(page=self._twitter_page)
if response.status == 200:
self._status_cache = []
tree = ElementTree.fromstring(response.read())
for status in tree.findall('status'):
self._status_cache.append(
('%s - %s - %s' % (
status.findtext('user/screen_name'),
status.findtext('user/name'),
status.findtext('created_at')),
status.findtext('text')))
else:
print 'Sorry, we couldn\'t read the updates from your friends.'
def main():
command = ''
# The pages start at 1, so we begin at zero so that the first page will
# be fetched.
current_page = 0
print 'Welcome to the sippycode Twitter client.'
client = get_credentialed_client()
status_viewer = StatusClient()
print_instructions()
while not command.startswith('q'):
command = raw_input(': ')
if command.startswith('q'):
break
elif command.startswith('u'):
message = raw_input('What are you doing?: ')
while len(message) >= 140:
print 'Too long!'
message = raw_input('What are you doing?: ')
if len(message) > 0:
client.update(message)
elif command.startswith('n'):
if current_page > 0:
current_page -= 1
status_viewer.get_page(current_page, client)
status_viewer.print_current_page()
elif command.startswith('p'):
current_page = int(raw_input('Start at page: '))
status_viewer.get_page(current_page, client)
status_viewer.print_current_page()
else:
current_page += 1
status_viewer.get_page(current_page, client)
status_viewer.print_current_page()
def print_instructions():
print 'Commands:'
print ' u: to post an update.'
print ' nothing or o: to see older updates.'
print ' n: to see newer updates.'
print ' p: to see a specific page of updates.'
#print ' r: to read updates from a particular user.'
print ' q: to quit.'
# TODO add f command to follow a user.
def get_credentialed_client():
if os.path.exists('.twitter.creds'):
choice = raw_input('Load credentials from file y/n (y): ')
if not choice.startswith('n'):
credential_file = open('.twitter.creds', 'r')
client = twitter.TwitterClient('', '')
credentials = auth.BasicAuth('', '')
credentials.basic_cookie = credential_file.read()
client._credentials = credentials
credential_file.close()
return client
username = raw_input('Username: ')
password = getpass.getpass()
client = twitter.TwitterClient(username, password)
choice = raw_input('Save credentials in .twitter.creds y/n (y): ')
if not choice.startswith('n'):
credential_file = open('.twitter.creds', 'w')
credential_file.write(client._credentials.basic_cookie)
credential_file.close()
return client
if __name__ == '__main__':
main()
| 2.5 | 2 |
yarnrunner_python/vm_std_lib.py | mapto/YarnRunner-Python | 2 | 12771056 | <gh_stars>1-10
functions = {
'Add': (2, lambda p: p[0] + p[1]),
'Minus': (2, lambda p: p[0] - p[1]),
'UnaryMinus': (1, lambda p: -p[0]),
'Divide': (2, lambda p: p[0] / p[1]),
'Multiply': (2, lambda p: p[0] * p[1]),
'Modulo': (2, lambda p: p[0] % p[1]),
'EqualTo': (2, lambda p: p[0] == p[1]),
'NotEqualTo': (2, lambda p: p[0] != p[1]),
'GreaterThan': (2, lambda p: p[0] > p[1]),
'GreaterThanOrEqualTo': (2, lambda p: p[0] >= p[1]),
'LessThan': (2, lambda p: p[0] < p[1]),
'LessThanOrEqualTo': (2, lambda p: p[0] <= p[1]),
'And': (2, lambda p: p[0] and p[1]),
'Or': (2, lambda p: p[0] or p[1]),
'Xor': (2, lambda p: p[0] ^ p[1]),
'Not': (1, lambda p: not p[0])
}
typeMethods = {
'Boolean': ['EqualTo',
'NotEqualTo',
'And',
'Or',
'Xor',
'Not'],
'Number': ['EqualTo',
'NotEqualTo',
'Add',
'Minus',
'Divide',
'Multiply',
'Modulo',
'UnaryMinus',
'GreaterThan',
'GreaterThanOrEqualTo',
'LessThan',
'LessThanOrEqualTo'],
'String': ['EqualTo',
'NotEqualTo',
'Add']
}
for type, methods in typeMethods.items():
for method in methods:
functions[f'{type}.{method}'] = functions[method]
| 2.875 | 3 |
graphic_trees.py | nelliesnoodles/PythonDLL | 0 | 12771057 | from DLL import *
# create list
mytrees = DLList()
mytrees.push('pine')
mytrees.push('maple')
mytrees.push('palm')
mytrees.push('redwood')
mytrees.push('baobab')
mytrees.graphical()
noodles = DLList()
node1 = DLLNode(None, 'ramen', None)
node2 = DLLNode(None, 'egg', None)
node3 = DLLNode(None, 'speghetti', None)
noodles.push_node(node1)
noodles.push_node(node2)
noodles.push_node(node3)
#print(noodles.get_node(1))
noodles.dump()
| 2.296875 | 2 |
scrape_name.py | OpenHacks-2020/wishbot | 3 | 12771058 | <gh_stars>1-10
# NOT implemented as part of bot.py
# Possible usage: ask user if they would like to add the suggested gift (by displaying the name and soliciting a 'yes' or 'no') before adding it to the wishlist
# #-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
def getName(url):
'''Takes a product URL as an input and isolates and prints the product's name/title'''
content = requests.get(url).content
soup = BeautifulSoup(content, features='lxml')
product_names = str(soup.findAll('title')[0])
list_pn = product_names.split()
list_pn.remove('|')
list_pn.remove('Etsy</title>')
list_pn[0] = list_pn[0][7:]
print(' '.join(list_pn))
| 3.453125 | 3 |
phi/physics/pressuresolver/geom.py | Neroware/PhiFlow | 13 | 12771059 | from numbers import Number
from phi import math
from phi.math.blas import conjugate_gradient
from phi.math.helper import _dim_shifted
from phi.physics.field import CenteredGrid
from .solver_api import PoissonDomain, PoissonSolver
class GeometricCG(PoissonSolver):
def __init__(self, accuracy=1e-5, gradient_accuracy='same',
max_iterations=2000, max_gradient_iterations='same',
autodiff=False):
"""
Conjugate gradient solver that geometrically calculates laplace pressure in each iteration.
Unlike most other solvers, this algorithm is TPU compatible but usually performs worse than SparseCG.
Obstacles are allowed to vary between examples but the same number of iterations is performed for each example in one batch.
:param accuracy: the maximally allowed error on the divergence channel for each cell
:param gradient_accuracy: accuracy applied during backpropagation, number of 'same' to use forward accuracy
:param max_iterations: integer specifying maximum conjugent gradient loop iterations or None for no limit
:param max_gradient_iterations: maximum loop iterations during backpropagation,
'same' uses the number from max_iterations,
'mirror' sets the maximum to the number of iterations that were actually performed in the forward pass
:param autodiff: If autodiff=True, use the built-in autodiff for backpropagation.
The intermediate results of each loop iteration will be permanently stored if backpropagation is used.
If False, replaces autodiff by a forward pressure solve in reverse accumulation backpropagation.
This requires less memory but is only accurate if the solution is fully converged.
"""
PoissonSolver.__init__(self, 'Single-Phase Conjugate Gradient',
supported_devices=('CPU', 'GPU', 'TPU'),
supports_guess=True, supports_loop_counter=True, supports_continuous_masks=True)
assert isinstance(accuracy, Number), 'invalid accuracy: %s' % accuracy
assert gradient_accuracy == 'same' or isinstance(gradient_accuracy, Number), 'invalid gradient_accuracy: %s' % gradient_accuracy
assert max_gradient_iterations in ['same', 'mirror'] or isinstance(max_gradient_iterations, Number), 'invalid max_gradient_iterations: %s' % max_gradient_iterations
self.accuracy = accuracy
self.gradient_accuracy = accuracy if gradient_accuracy == 'same' else gradient_accuracy
self.max_iterations = max_iterations
if max_gradient_iterations == 'same':
self.max_gradient_iterations = max_iterations
elif max_gradient_iterations == 'mirror':
self.max_gradient_iterations = 'mirror'
else:
self.max_gradient_iterations = max_gradient_iterations
assert not autodiff, 'Cannot specify max_gradient_iterations when autodiff=True'
self.autodiff = autodiff
def solve(self, divergence, domain, guess):
assert isinstance(domain, PoissonDomain)
fluid_mask = domain.accessible_tensor(extend=1)
if self.autodiff:
return solve_pressure_forward(divergence, fluid_mask, self.max_iterations, guess, self.accuracy, domain, back_prop=True)
else:
def pressure_gradient(op, grad):
return solve_pressure_forward(grad, fluid_mask, max_gradient_iterations, None, self.gradient_accuracy, domain)[0]
pressure, iteration = math.with_custom_gradient(
solve_pressure_forward,
[divergence, fluid_mask, self.max_iterations, guess, self.accuracy, domain],
pressure_gradient,
input_index=0, output_index=0, name_base='geom_solve'
)
max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations
return pressure, iteration
def solve_pressure_forward(divergence, fluid_mask, max_iterations, guess, accuracy, domain, back_prop=False):
from phi.physics.material import Material
extrapolation = Material.extrapolation_mode(domain.domain.boundaries)
def apply_A(pressure):
pressure = CenteredGrid(pressure, extrapolation=extrapolation)
pressure_padded = pressure.padded([[1, 1]] * pressure.rank)
return _weighted_sliced_laplace_nd(pressure_padded.data, weights=fluid_mask)
return conjugate_gradient(divergence, apply_A, guess, accuracy, max_iterations, back_prop=back_prop)
def _weighted_sliced_laplace_nd(tensor, weights):
if tensor.shape[-1] != 1:
raise ValueError('Laplace operator requires a scalar channel as input')
dims = range(math.spatial_rank(tensor))
components = []
for dimension in dims:
lower_weights, center_weights, upper_weights = _dim_shifted(weights, dimension, (-1, 0, 1), diminish_others=(1, 1))
lower_values, center_values, upper_values = _dim_shifted(tensor, dimension, (-1, 0, 1), diminish_others=(1, 1))
diff = math.mul(upper_values, upper_weights * center_weights) + math.mul(lower_values, lower_weights * center_weights) + math.mul(center_values, - lower_weights - upper_weights)
components.append(diff)
return math.sum(components, 0)
| 2.765625 | 3 |
COMET/ui_plugins/Alignment_window.py | dallaval5u/COMET | 0 | 12771060 | <filename>COMET/ui_plugins/Alignment_window.py
import ast
import json
import os
import os.path as osp
import sys, importlib, logging
import numpy as np
import pyqtgraph as pq
from PyQt5.QtCore import Qt
from PyQt5 import QtGui
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from random import randint
from time import sleep
import re
from ..utilities import transformation, connection_test
from .Table_widget import Table_widget
class Alignment_window(Table_widget):
def __init__(self, GUI, layout):
self.log = logging.getLogger(__name__)
self.previous_xloc = 0
self.previous_yloc = 0
self.previous_zloc = 0
self.alignment_step = -1
self.alignment_started = False
self.check_strip = 1
self.project = None
self.sensor = None
self.new_reference_pads = []
self.reference_pads = []
self.alignment_pads_changed = True
self.reference_pads_positions = []
self.sensor_pad_file = None
self.connection_test_switchings = ["DC1Test", "DC2Test", "AC1Test", "AC2Test"]
self.variables = GUI
self.connection_test_device = self.variables.devices_dict.get("2410SMU", None)
if not self.connection_test_device:
self.log.warning("No connection test SMU could be loaded!!!")
self.transformation_matrix = self.variables.default_values_dict["settings"].get(
"trans_matrix", None
)
self.V0 = self.variables.default_values_dict["settings"].get("V0", None)
self.layout = layout
self.child_layouts = {"Table": None}
self.trans = transformation()
# Alignment tab
alignment_widget = QWidget()
self.alignment = self.variables.load_QtUi_file("Alignment.ui", alignment_widget)
self.layout.addWidget(alignment_widget)
self.child_layouts["Table"] = self.alignment.Table_Layout
# Init the other classes
super(Alignment_window, self).__init__(self) # I need the main
# Load camera
try:
from .Ueye_camera_main import Ueye_main
# self.alignment.layout_camera.setAlignment(Qt.AlignVertical_Mask)
self.camera = Ueye_main(
self.alignment.layout_camera, roi_width=1280, roi_height=1024
)
except ImportError:
self.log.warning("Could not import camera module")
# Asign the buttons
self.alignment.ref_1.valueChanged.connect(self.spin_box_action_1)
self.alignment.ref_2.valueChanged.connect(self.spin_box_action_2)
self.alignment.ref_3.valueChanged.connect(self.spin_box_action_3)
self.alignment.StartAlignment_btn.clicked.connect(self.start_alignment_action)
self.alignment.nextstep_btn.clicked.connect(lambda: self.next_step_action(None))
self.alignment.abort_btn.clicked.connect(self.abort_action)
self.alignment.move_to_strip_button.clicked.connect(self.move_to_strip_action)
self.alignment.camera_on_Button.clicked.connect(self.camera.start)
self.alignment.camera_off_Button.clicked.connect(self.camera.stop)
self.alignment.test_needle_conn_pushButton.clicked.connect(
self.test_needle_contact_action
)
self.variables.add_update_function(self.current_strip_lcd)
# Find pad data in the additional files and parse them
self.pad_files = self.variables.framework_variables["Configs"][
"additional_files"
].get("Pad_files", {})
if self.pad_files:
self.parse_pad_files(self.pad_files)
else:
self.log.error(
"No pad files found! Please check if they are correctly defined in the configs!"
)
self.what_to_do_text(-1) # Initializes the text
def set_needle_contact_lamp(self, state):
states = {
"contact unclear": "QFrame { background :rgb(255, 215, 0) }",
"contact": "QFrame { background :rgb(36, 216, 93) }",
"no contact": "QFrame { background :rgb(214, 40, 49) }",
}
self.alignment.Needle_connection_label.setStyleSheet(states.get(state.lower()))
self.alignment.Needle_connection_label.setText(state.upper())
def test_needle_contact_action(self):
"""Test the needle contact"""
if self.variables.switching and self.connection_test_device:
res = connection_test(
self.connection_test_switchings,
self.variables.switching,
self.variables.vcw,
self.connection_test_device,
target_resistance=2.5,
abs_err=5.0,
)
if isinstance(res, bool):
self.set_needle_contact_lamp("contact")
else:
self.log.critical("Needles {} have no contact!".format(res))
self.set_needle_contact_lamp("no contact")
def parse_pad_files(self, parent_dict):
"""
Parses the parent directory of pad files, data must be a str
:param parent_dict:
:return:
"""
# Separate header and data via regex
data_pattern = re.compile(
r"(\w+)\s+(-?\d+)\s+(-?\d+)\s+(-?\d+)\n", re.MULTILINE
) # Todo: Its possible to do all the work with regex
for project, sensors in parent_dict.items():
for sensor, raw_data in sensors.items():
# parent_dict[project][sensor]["data"] = {k: v for d in [{str(line[0]): [float(x) for x in line[1:]]} for line in data_pattern.findall(raw_data["raw"])] for k, v in d.items()}
Data = data_pattern.findall(raw_data["raw"])
parent_dict[project][sensor]["data"] = dict(
zip(
[line[0] for line in Data],
[tuple(float(x) for x in line[1:]) for line in Data],
)
)
# Find reference pads
find_parameters = re.compile(r"^(\w+\s?\w+):\s+(.+)", re.MULTILINE)
parent_dict[project][sensor]["additional_params"] = {
str(x[1]).strip(): x[2].strip()
for x in find_parameters.finditer(raw_data["raw"])
}
# Get reference pads alone
reference_pad_pattern = re.compile(
r"(reference.?pad.?\d?):\s+(\d+)", re.MULTILINE
)
parent_dict[project][sensor]["reference_pads"] = [
x[2] for x in reference_pad_pattern.finditer(raw_data["raw"])
]
def current_strip_lcd(self):
"""This function updtes the current strip lcd display"""
current_lcd_value = self.alignment.current_strip_lcdNumber.intValue()
current_strip = self.variables.default_values_dict["settings"].get(
"current_strip", -1
)
if current_lcd_value != current_strip:
self.alignment.current_strip_lcdNumber.display(current_strip)
def move_to_strip_action(self):
"""This is the action when the move to strip button is pressed"""
self.set_needle_contact_lamp("contact unclear")
if not self.variables.default_values_dict["settings"]["table_is_moving"]:
strip_to_move = str(self.alignment.move_to_strip_spin.value())
if self.variables.default_values_dict["settings"]["Alignment"]:
error = self.variables.table.move_to_strip(
self.sensor_pad_file,
strip_to_move,
self.trans,
self.transformation_matrix,
self.V0,
self.variables.default_values_dict["settings"]["height_movement"],
)
if error:
# self.variables.message_to_main.put(error)
self.error_action(error)
return
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(
"No alignment is done, please make the alignment and try again..."
)
msg.setWindowTitle("Arrr, Pirate...")
msg.exec_()
return
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(
"Table is currently moving, please wait until movement is finished..."
)
msg.setWindowTitle("Arrr, Pirate...")
msg.exec_()
return
def start_alignment_action(self):
"""This function starts the whole alignement proceedure"""
self.set_needle_contact_lamp("contact unclear")
# First ask if you want to start the alignment
if self.alignment_started:
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The alignment procedure is currently running.")
msg.setWindowTitle("Alignment in progress")
msg.exec_()
return
try:
if not self.variables.table.table_ready:
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("It seems that no table is connected to this machine...")
msg.setWindowTitle("Sorry Bro...")
msg.exec_()
return
except:
self.log.error("Table seems to be missing", exc_info=True)
reply = QMessageBox.question(
None,
"Warning",
"Are you sure to start the alignment proceedure? A previous alignement will be deleted",
QMessageBox.Yes,
QMessageBox.No,
)
if reply == QMessageBox.Yes:
# Update the GUI
self.alignment_started = True
self.alignment_step = 0
self.next_step_action(self.alignment_step)
else:
pass
def next_step_action(self, step=None):
"""This updates all gui elements for the next step"""
maximum_step = 5
self.set_needle_contact_lamp("contact unclear")
if step == None:
self.alignment_step += 1 # so the next step is executed
step = self.alignment_step
if step > maximum_step or not self.alignment_started:
self.what_to_do_text(-1) # Resets the text
if self.variables.default_values_dict["settings"]["Alignment"]:
success = self.variables.table.move_to_strip(
self.sensor_pad_file,
self.reference_pads[0],
self.trans,
self.transformation_matrix,
self.V0,
self.variables.default_values_dict["settings"]["height_movement"],
)
if not success:
self.error_action(success)
return
self.alignment_started = False
if self.alignment_started:
# First set the GUI
self.what_to_do_text(step)
self.do_alignment(step)
if step == maximum_step:
self.alignment_started = False
def set_checkboxes(self, list):
"""This function sets the checkboxes for the checklist"""
for i, state in enumerate(list):
getattr(self.alignment, "ali_" + str(i)).setChecked(state)
def do_alignment(self, step):
"""Does the steps for the alignment"""
if step == -1:
# reset all elements
self.set_checkboxes([False, False, False, False, False])
self.variables.default_values_dict["settings"][
"Alignment"
] = False # So I cannot do a measuremnt until the alignment is done
if step == 0:
# Reset some elements and set new elements
self.set_checkboxes([False, False, False, False, False])
# Get sensor
self.project = self.variables.default_values_dict["settings"][
"Current_project"
]
self.sensor = str(
self.variables.default_values_dict["settings"]["Current_sensor"]
)
try:
self.sensor_pad_file = self.pad_files[self.project][self.sensor].copy()
self.reference_pads = self.sensor_pad_file["reference_pads"][:]
self.update_reference_pad_positions()
# self.adjust_alignment_points(2) should be here but the spin boxes get asignal and then they would change again- > therefore only spin boxes change this value
self.number_of_pads = len(self.sensor_pad_file["data"])
self.update_static()
except Exception as err:
self.log.error(
"An error while accessing the pad files with error: {}".format(err)
)
self.error_action(
"An error while accessing the pad files with error: {}".format(err)
)
if step == 1:
# Get all changed values for alignment strips and move to first
self.set_checkboxes([False, False, False, False, False])
self.variables.table.set_axis([True, True, False])
self.variables.table.set_joystick(True)
if step == 2:
# move to second alignment point
self.set_checkboxes([True, False, False, False, False])
self.variables.table.set_joystick(False)
self.variables.table.set_axis([True, True, True])
self.first_pos = self.variables.table.get_current_position()
sensor_first_pos = self.reference_pads_positions[
0
] # this gives me absolute positions
sensor_second_pos = self.reference_pads_positions[
1
] # this gives me absolute positions
relative_movepos = [
x1 - x2 for (x1, x2) in zip(sensor_second_pos, sensor_first_pos)
]
# No add the strip length to the y axis for aliognement reasons
# relative_movepos[1] = relative_movepos[1] + self.sensor_pad_file["strip_length"]
success = self.variables.table.relative_move_to(
relative_movepos,
True,
self.variables.default_values_dict["settings"]["height_movement"],
clearance=self.variables.default_values_dict["settings"]["clearance"],
)
if not success:
self.error_action(success)
return
self.variables.table.set_axis([True, True, False])
self.variables.table.set_joystick(True)
if step == 3:
# move to third alignment point
self.set_checkboxes([True, True, False, False, False])
self.variables.table.set_joystick(False)
self.variables.table.set_axis([True, True, True])
self.second_pos = self.variables.table.get_current_position()
sensor_first_pos = self.reference_pads_positions[
1
] # this gives me absolute positions
sensor_second_pos = self.reference_pads_positions[
2
] # this gives me absolute positions
relative_movepos = [
x1 - x2 for (x1, x2) in zip(sensor_second_pos, sensor_first_pos)
]
success = self.variables.table.relative_move_to(
relative_movepos,
True,
self.variables.default_values_dict["settings"]["height_movement"],
clearance=self.variables.default_values_dict["settings"]["clearance"],
)
if not success:
self.error_action(success)
return
self.variables.table.set_axis([True, True, False])
self.variables.table.set_joystick(True)
if step == 4:
# choose random strip and move to
self.set_checkboxes([True, True, True, False, False])
self.third_pos = self.variables.table.get_current_position()
self.variables.table.set_joystick(False)
# Resets the axis to all open
self.variables.table.set_axis([True, True, True])
# Now make alignment and move absolute to the last position
sensorx = self.reference_pads_positions[0]
sensory = self.reference_pads_positions[1]
sensorz = self.reference_pads_positions[2]
T, V0 = self.trans.transformation_matrix(
sensorx,
sensory,
sensorz,
self.first_pos,
self.second_pos,
self.third_pos,
)
if type(T) == type(int):
self.log.error(
"There was an error while doing the transformation, please check error log."
)
self.error_action(
"There was an error while doing the transformation, please check error log."
)
return
self.transformation_matrix = T
self.V0 = V0
relative_check_pos = self.sensor_pad_file["data"][self.check_strip]
table_abs_pos = self.trans.vector_trans(relative_check_pos, T, V0)
success = self.variables.table.move_to(
list(table_abs_pos),
True,
self.variables.default_values_dict["settings"]["height_movement"],
)
if not success:
self.error_action(success)
return
if step == 5:
# calculate the transformation and save it
self.set_checkboxes(
[True, True, True, True, True]
) # The last true only when alignemt was successful
self.variables.default_values_dict["settings"][
"trans_matrix"
] = self.transformation_matrix
self.variables.default_values_dict["settings"]["V0"] = self.V0
self.variables.default_values_dict["settings"]["Alignment"] = True
def abort_action(self):
"""Aborts the alignement proceedure"""
if self.alignment_started:
reply = QMessageBox.question(
None,
"Warning",
"Are you sure to stop the alignment proceedure? Any progress will be deleted.",
QMessageBox.Yes,
QMessageBox.No,
)
if reply == QMessageBox.Yes:
# Update the GUI
self.alignment_started = False
self.next_step_action(-1)
self.do_alignment(-1)
self.set_needle_contact_lamp("contact unclear")
else:
pass
else:
return
def error_action(self, error):
"""Aborts the alignement proceedure, without question"""
if self.alignment_started:
# Update the GUI
self.alignment_started = False
self.next_step_action(-1)
self.do_alignment(-1)
def what_to_do_text(self, step):
"""This renders just the new text what to do"""
if step == -1:
self.lock_spinboxes(False)
self.alignment.what_to_do_text.setText(
"Press the start button to start the alignment process."
)
elif step == 0:
self.lock_spinboxes(True)
self.alignment.what_to_do_text.setText(
"Please check if all informations are right and or change parameters. \n\n "
"In the next step, the table will move directly to the first alignment point. "
"Therefore, make sure that all obstacles are removed. \n \n"
"When ready click on 'Next Step' button."
)
elif step == 1:
self.lock_spinboxes(False)
self.alignment.what_to_do_text.setText(
"Please contact the needles on the FIRST alignment point. \n \n"
"The Joystick has been activated, you can use it if you want, or just use the positioner, that is cool too.\n \n"
"Uncontact the needles from the sensor before you hit the 'Next Step' button."
)
elif step == 2:
self.alignment.what_to_do_text.setText(
"Please contact the needles on the SECOND alignment point. \n \n"
"Please DO NOT move the positioner in the xy-plane. The Joystick has been activated, use it!\n \n"
"Uncontact the needles from the sensor before you hit the 'Next Step' button."
)
elif step == 3:
self.alignment.what_to_do_text.setText(
"Please contact the needles on the THIRD alignment point. \n \n"
"Please DO NOT move the positioner in the xy-plane. The Joystick has been activated, use it!\n \n"
"Please do not uncontact the needles from the sensor before you hit the 'Next Step' button. \n \n"
"Warning: In the next step the table will move to a random strip to check the alignment"
)
elif step == 4:
self.alignment.what_to_do_text.setText(
"Please validate that the contact on strip: "
+ str(self.check_strip)
+ " is correct. "
"Please hit the 'Next step' button if everything looks ok."
)
elif step == 5:
self.alignment.what_to_do_text.setText(
"The alignment proceedure is now finished. \n \n"
"If you are interested: The transformation matrix is: "
+ str(self.transformation_matrix)
+ "."
"\n \n "
"Another hit on the 'Next button' will move the table to the first reference strip"
)
def adjust_alignment_points(
self, adjust_point=2, axis=2, variable="implant_length"
):
"""This function adjusts the position of the alignment points so that a 3D alignment is possible"""
to_adjust = self.reference_pads_positions[adjust_point - 1][axis - 1]
to_adjust += float(self.sensor_pad_file["additional_params"].get(variable, 0))
new_coord = list(self.reference_pads_positions[adjust_point - 1])
new_coord[axis - 1] = to_adjust
self.reference_pads_positions[adjust_point - 1] = tuple(new_coord)
def lock_spinboxes(self, state):
"""Locks the spin boxes"""
self.alignment.ref_1.setEnabled(state)
self.alignment.ref_2.setEnabled(state)
self.alignment.ref_3.setEnabled(state)
def spin_box_action_1(
self,
): # it has to be that way unfortunately (race conditions)
"""If the alignment point are changed"""
if self.alignment_started:
ref = int(self.alignment.ref_1.value())
self.reference_pads[0] = int(ref)
self.update_reference_pad_positions()
self.update_static()
def spin_box_action_2(self):
"""If the alignment point are changed"""
if self.alignment_started:
ref = int(self.alignment.ref_2.value())
self.reference_pads[1] = int(ref)
self.update_reference_pad_positions()
# self.adjust_alignment_points(2)
self.update_static()
def spin_box_action_3(self):
"""If the alignment point are changed"""
if self.alignment_started:
ref = int(self.alignment.ref_3.value())
self.reference_pads[2] = int(ref)
self.update_reference_pad_positions()
self.update_static()
def update_reference_pad_positions(self):
self.reference_pads_positions = [
self.sensor_pad_file["data"][str(item)] for item in self.reference_pads
]
self.adjust_alignment_points(2, 1) # not so good
def update_static(self):
"""This updates the static text of the gui, like sensor type"""
# Set maxima and minima and value
# Rare bug in spin boxes when a variable is assigned to the value and the range changes in a way that
# it wont match with the range, the value in the variable get changed. Therefore, always increase the range
# first and then change value, and then change range again.
self.alignment.ref_1.setRange(1, 10000)
self.alignment.ref_2.setRange(1, 10000)
self.alignment.ref_3.setRange(1, 10000)
self.alignment.ref_1.setValue(int(self.reference_pads[0]))
self.alignment.ref_2.setValue(int(self.reference_pads[1]))
self.alignment.ref_3.setValue(int(self.reference_pads[2]))
self.alignment.ref_1.setRange(1, int(self.number_of_pads))
self.alignment.ref_2.setRange(1, int(self.number_of_pads))
self.alignment.ref_3.setRange(1, int(self.number_of_pads))
self.first_ref = self.reference_pads_positions[0]
self.secon_ref = self.reference_pads_positions[1]
self.third_ref = self.reference_pads_positions[2]
self.alignment.sensortype.setText("Sensor type: " + str(self.sensor))
self.alignment.project.setText("Project: " + str(self.project))
self.check_strip = str(randint(2, int(self.number_of_pads) - 1))
if self.check_strip not in self.sensor_pad_file:
self.check_strip = "2"
self.alignment.first_co_label.setText(
"First alignment coord: " + str(self.first_ref)
)
self.alignment.second_co_label.setText(
"Second alignment coord: " + str(self.secon_ref)
)
self.alignment.third_co_label.setText(
"Third alignment coord: " + str(self.third_ref)
)
self.alignment.check_co_label.setText(
"Check alignment coord: "
+ str(self.sensor_pad_file["data"][self.check_strip])
)
| 2.0625 | 2 |
playground/playground.py | llv22/pyalgorithm | 0 | 12771061 | <gh_stars>0
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.7
# ---
# # Solution for Global Minimial
from time import time
# $$ f(x)=x^3-60x^2-4x+6 $$, 求x在[0,100]范围内f(x)最小值
# $$f'(x) = 3x^2 - 120x - 4$$
#
# $$ x_{n+1} = x_{n} - \frac{f(x_n)}{f'(x_n)} $$
# # Newton for f'
start = time()
x0 = 100
x1 = x0 - (3*(x0**2) - 120 * x0 - 4) / (6 * x0 - 120)
f_val = (x0 ** 3) - 60 * (x0**2) - 4 * x0 + 6
while f_val > 1e-8:
x1 = x0 - (3*(x0**2) - 120 * x0 - 4) / (6 * x0 - 120)
x0 = x1
f_val = 3*(x0**2) - 120 * x0 - 4
print("Newton method for f' is about : %0.2f seconds" % (time() - start))
x0
(x0 ** 3) - 60 * (x0**2) - 4 * x0 + 6
# # Gradient Descent
#
# $$ f(x)=x^3-60x^2-4x+6 $$
import tensorflow as tf
from tensorflow.train import AdamOptimizer
start = time()
x = tf.get_variable('x', initializer=tf.constant(100.0))
y = x * x * x - 60 * x * x - 4 * x + 6
# +
optimizer = AdamOptimizer(learning_rate=1e-2).minimize(y)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(50000):
sess.run(optimizer)
print(sess.run(x))
# -
print("Gradient Descent for y is about : %0.2f seconds" % (time() - start))
# # Binary Search
#
# similiar to Newton method from [0, 100]. Somehow simple, just skip it
# # MCTS
import random
start = time()
min_val = (lambda x: x**3 + 6)(100)
solution = 100
best_step = 0
delta_value = 100000; delta_no_improve = 0
fv_pre = (lambda v: v**3 - 60 * (v**2) - 4 * v + 6)(solution)
i = 0
# for i in range(1000000):
# 1. only integer number
# v = random.randrange(0, 100.0)
# 2. sample uniform between [0, 1) * 100.0
while delta_no_improve < 200000:
v = random.random() * 100
fv = (lambda v: v**3 - 60 * (v**2) - 4 * v + 6)(v)
if min_val > fv:
min_val = fv
solution = v
best_step = i
delta_value = 100000; delta_no_improve = 0
else:
if abs(fv_pre - fv) < delta_value:
delta_value = abs(fv_pre - fv)
else:
delta_no_improve += 1
fv_pre = fv; i += 1
print("Random MCTS is about : %0.2f seconds" % (time() - start))
delta_value
delta_no_improve
min_val
best_step
solution
# # MCTS - Exploration with f' as penality
#
# Sample with **Uniform Distribution** to select better seed
import random
import numpy as np
start = time()
# +
min_val = (lambda x: x**3 + 6)(100)
solution = 100
best_penality = 1000000
best_step = 0
no_improve = 0
seed = 0
minv_region = 1; maxv_region = -1; sample_region_n = 0
delta_value = 100000; delta_no_improve = 0
fv_pre = (lambda v: v**3 - 60 * (v**2) - 4 * v + 6)(solution)
i = 0
while delta_no_improve < 200000:
# for i in range(1000000):
if no_improve < 10000 or sample_region_n < 100:
seed = random.random()
else:
# uniform seems still not very good, how about gaussion?
seed = np.random.uniform(minv_region, maxv_region)
v = seed * 100
fv = (lambda v: v**3 - 60*(v**2) - 4 * v + 6)(v)
penality = (lambda x: x**3 - 60*(x**2) - 4*x + 6)(v)
if abs(penality) < best_penality:
best_penality = abs(penality)
no_improve = 0
minv_region = 1; maxv_region = -1; sample_region_n = 0
else:
no_improve += 1
if no_improve > 10000:
if v > maxv_region:
maxv_region = seed
if v < minv_region:
minv_region = seed
sample_region_n += 1
if min_val > fv:
min_val = fv
solution = v
best_step = i
else:
if abs(fv_pre - fv) < delta_value:
delta_value = abs(fv_pre - fv)
else:
delta_no_improve += 1
fv_pre = fv; i += 1
# -
print("MCTS with uniform sampling for exploration is about : %0.2f seconds" % (time() - start))
delta_value
delta_no_improve
min_val
solution
best_step
# # MCTS - Exploration with f' as penality
#
# Sample with **Gaussion Distribution** to select better seed
#
# Too costy for calcuation of $\delta$ for Gaussian distribution, set $\delta$ = 0.01
import random
import numpy as np
start = time()
# +
min_val = (lambda x: x**3 + 6)(100)
solution = 100
best_penality = 1000000
best_step = 0
no_improve = 0
seed = 0
minv_region = 1; maxv_region = -1; sample_region_n = 0; sample_mean = 0
delta_value = 100000; delta_no_improve = 0
fv_pre = (lambda v: v**3 - 60 * (v**2) - 4 * v + 6)(solution)
i = 0
while delta_no_improve < 200000:
# for i in range(1000000):
if no_improve < 10000 or sample_region_n < 1000:
seed = random.random()
else:
# uniform seems still not very good, how about gaussion?
seed = np.random.normal(sample_mean, 0.01, 1)[0]
v = seed * 100
fv = (lambda v: v**3 - 60*(v**2) - 4 * v + 6)(v)
penality = (lambda x: x**3 - 60*(x**2) - 4*x + 6)(v)
if abs(penality) < best_penality:
best_penality = abs(penality)
no_improve = 0
minv_region = 1; maxv_region = -1; sample_region_n = 0
else:
no_improve += 1
if no_improve > 10000:
if v > maxv_region:
maxv_region = seed
if v < minv_region:
minv_region = seed
sample_mean = (sample_region_n * sample_mean + seed) / (sample_region_n + 1)
sample_region_n += 1
if min_val > fv:
min_val = fv
solution = v
best_step = i
else:
if abs(fv_pre - fv) < delta_value:
delta_value = abs(fv_pre - fv)
else:
delta_no_improve += 1
fv_pre = fv; i += 1
# -
print("MCTS with gaussion sampling for exploration is about : %0.2f seconds" % (time() - start))
delta_value
delta_no_improve
min_val
solution
best_step
| 2.890625 | 3 |
djecommerce/settings/production.py | IHIMEKPEN/Pegasus | 0 | 12771062 | from .base import *
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = ['*']
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}
]
| 1.679688 | 2 |
api.py | Wern-rm/UnitPay-PythonAPI | 1 | 12771063 | from flask import Flask
import locale
from flask_sqlalchemy import SQLAlchemy
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
locale.setlocale(locale.LC_ALL, '')
db = SQLAlchemy(app)
@app.route('/')
def index():
return 'UnitPay API'
from models import UnitpayPayments, AccountData
from unitpay import UnitPay
from flask import request
from sqlalchemy import exc
from datetime import datetime
import decimal
@app.route('/api/v1.0/unitpay/payment/', methods=['GET'])
def unitpay_processor():
unitpay = UnitPay('SECRET_KEY') # ВВести ключ от UnitPay
if unitpay.check_handler_request():
try:
sum_count = decimal.Decimal(request.args.get('params[profit]'))
account = db.session.query(AccountData).filter(AccountData.name == request.args.get('params[account]')).first()
if request.args.get('method') == 'pay':
if account:
pay = UnitpayPayments(unitpay_id=request.args.get('params[unitpayId]'),
account=request.args.get('params[account]'),
sum=request.args.get('params[payerSum]'),
payment_type=request.args.get('params[paymentType]'),
payer_currency=request.args.get('params[payerCurrency]'),
signature=request.args.get('params[signature]'),
profit=request.args.get('params[profit]'))
db.session.add(pay)
db.session.commit()
update_count = sum_count
db.session.query(AccountData).filter(AccountData.name == request.args.get('params[account]')).update({
'balance': account.balance + update_count
})
db.session.query(UnitpayPayments).filter(UnitpayPayments.unitpay_id == request.args.get('params[unitpayId]')).update({
'date_complete': datetime.now(),
'status': 1
})
db.session.commit()
app.logger.info('The request was successfully processed by the system.')
return unitpay.get_success_handler_response("The request was successfully processed by the system.")
else:
app.logger.info('Account with this email does not exist.')
return unitpay.get_error_handler_response("Account with this email does not exist.")
else:
app.logger.info('The request was successfully processed by the system without writing to the database because of.')
return unitpay.get_success_handler_response("The request was successfully processed by the system without writing to the database because of.")
except exc.SQLAlchemyError as e:
print(e)
app.logger.error(e)
db.session.rollback()
return unitpay.get_error_handler_response("The request has been processed by the system.")
else:
return unitpay.get_error_handler_response("The request has been processed by the system.")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
| 2.3125 | 2 |
updates_to_drs/mf_ccf_mask_may2020.py | njcuk9999/apero-utils | 2 | 12771064 | import numpy as np
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import InterpolatedUnivariateSpline
import matplotlib.pyplot as plt
#from scipy.signal import medfilt
# Your input template
template = 'Template_s1d_Gl699_sc1d_v_file_AB.fits'
# template = 'Template_s1d_Gl15A_sc1d_v_file_AB.fits'
# template = 'Template_s1d_HD189733_sc1d_v_file_AB.fits'
c = 2.99792458e5 # speed of light
# read wavelength and flux. The wavelength is expressed in Ang, we convert to µm
wave_phoenix = fits.getdata('WAVE_PHOENIX-ACES-AGSS-COND-2011.fits') / 10
# bit of code to download goettigen models. Use only you don't have the models locally and change the False to True
if False:
import os as os
for temperature in np.arange(3000, 6100, 100):
temperature = str(np.int(np.round(temperature, -2)))
print(temperature)
os.system(
'wget ftp://phoenix.astro.physik.uni-goettingen.de/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/lte0' + temperature + '-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits')
# read template and header
tbl, hdr = fits.getdata(template, ext=1, header=True)
# round temperature in header to nearest 100 and get the right model
if 'OBJTEMP' in hdr:
temperature = hdr['OBJTEMP']
if temperature < 3000:
temperature = 3000
if temperature > 6000:
temperature = 6000
temperature = str(np.int(np.round(temperature, -2)))
else:
# if the header does not have a temperature value, assume it is an early-M. This does not really change much
temperature = '3600'
# tell the user which model you are using
print('Temperature = ', temperature)
model_file = 'lte0' + temperature + '-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'
print('Model file = ', model_file)
flux_phoenix = fits.getdata(model_file)
# get wave and flux vectors for the template
w = np.array(tbl['wavelength'])
f = np.array(tbl['flux'])
# smooth the template with a 7 km/s boxcar. This avoids lines due to spurious noise excursions
f2 = np.array(f)
mask = np.isfinite(f)
f2[~mask] = 0
mask = mask*1.0
# smooth by a boxcar and divide by a weight vector to avoid discontinuities at the edge or regions with NaNs
f = np.convolve(f2,np.ones(7), mode = 'same')/np.convolve(mask,np.ones(7), mode = 'same')
# find the first and second derivative of the flux
df = np.gradient(f)
ddf = np.gradient(np.gradient(f))
# lines are regions there is a sign change in the derivative of the flux
# we also have some checks for NaNs
line = np.where((np.sign(df[1:]) != np.sign(df[:-1])) &
np.isfinite(ddf[1:])
& np.isfinite(df[1:])
& np.isfinite(df[:-1]))[0]
# create the output table
tbl = Table()
tbl['ll_mask_s'] = np.zeros_like(line, dtype=float)
tbl['ll_mask_e'] = np.zeros_like(line, dtype=float)
dv = 0# mask width in km/s. Set to zero, but could be changed
for i in range(len(line)):
# we perform a linear interpolation to find the exact wavelength
# where the derivatives goes to zero
wave_cen = (np.polyfit(df[line[i]:line[i] + 2], w[line[i]:line[i] + 2], 1))[1]
# historically, masks are defined over a box of a given width (hence the 's' "start" and the 'e' "end" here)
# here the two values are that same, but one could have a non-zero dv value
corrv = np.sqrt((1 + (-dv / 2) / c) / (1 - (-dv / 2) / c))
tbl['ll_mask_s'][i] = wave_cen * corrv
# same but for the upper bound to the line position
corrv = np.sqrt((1 + (dv / 2) / c) / (1 - (dv / 2) / c))
tbl['ll_mask_e'][i] = wave_cen * corrv
# wavelength of lines is the mean of start and end.
wavelines = (tbl['ll_mask_s'] + tbl['ll_mask_e']) / 2.0
# the weight is the second derivative of the flux. The sharper the line,
# the more weight we give it
g = np.isfinite(ddf)
weight = InterpolatedUnivariateSpline(w[g], ddf[g])(wavelines)
# weight will be the second derivative
tbl['w_mask'] = weight
# create a spline of the model
model = InterpolatedUnivariateSpline(wave_phoenix, flux_phoenix)
# assume a 0 velocity and search
dv0 = 0.0 #
scale = 1.0
for ite in range(2):
dvs = np.arange(400, dtype=float)
dvs -= np.mean(dvs)
dvs *= scale
dvs += dv0
# loop in velocity space and fill the CCF values for each velocity step
ccf = np.zeros_like(dvs)
# this is the line to change if you want to have positive or negative features
mask = weight>0
for i in range(len(dvs)):
corrv = np.sqrt((1 + dvs[i] / c) / (1 - dvs[i] / c))
# lines that will be used in the CCF mask
ccf[i] = np.sum(model(wavelines[mask] / corrv))
# just centering the cc around one and removing low-f trends.
mini = np.argmin(ccf)
dv0 = dvs[mini]
scale /= 10.0
plt.plot(dvs, ccf)
plt.show()
# find the lowest point in the CCF
minpos = np.argmin(ccf)
# fit a 2nd order polynomial to the bottom pixels (-1 to +1 from bottom) and find minimimum point
fit = np.polyfit(dvs[minpos - 1:minpos + 2], ccf[minpos - 1:minpos + 2], 2)
# minimum of a 2nd order polynomial
systemic_velocity = -.5 * fit[1] / fit[0]
# return systemic velocity measured
print('systemic velocity : ', systemic_velocity, 'km/s')
# generate a nice plot to show positive/negative features
wavelines =tbl['ll_mask_s']
# find flux at sub-pixel position of lines
g = np.isfinite(f)
flux_lines = InterpolatedUnivariateSpline(w[g], f[g])(wavelines)
plt.plot(w,f, 'g-',label = 'spectrum')
pos_lines = np.array(tbl['w_mask'] < 0)
plt.plot(np.array(wavelines[pos_lines]),flux_lines[pos_lines],'r.', label = 'positive features')
neg_lines = np.array(tbl['w_mask'] > 0)
plt.plot(np.array(wavelines[neg_lines]),flux_lines[neg_lines],'b.', label = 'negative features')
plt.legend()
plt.show()
# updating the table to account for systemic velocity of star
corrv = np.sqrt((1 + systemic_velocity / c) / (1 - systemic_velocity / c)) # relativistic Doppler
tbl['ll_mask_s'] = tbl['ll_mask_s'] / corrv
tbl['ll_mask_e'] = tbl['ll_mask_e'] / corrv
# write the output table
fits.writeto(hdr['OBJECT'] + '.fits', tbl, hdr, overwrite=True)
tbl[tbl['w_mask'] < 0].write(hdr['OBJECT'] + '_pos.mas', format='ascii', overwrite=True)
tbl[tbl['w_mask'] > 0].write(hdr['OBJECT'] + '_neg.mas', format='ascii', overwrite=True)
tbl.write(hdr['OBJECT'] + '_full.mas', format='ascii', overwrite=True)
| 2.375 | 2 |
scrapperwheather101.py | srgautam01/Weather | 0 | 12771065 | <reponame>srgautam01/Weather
import requests
from bs4 import BeautifulSoup
print("Downloading page...")
page = requests.get("https://forecast.weather.gov/MapClick.php?lat=30.504170000000045&lon=-90.45845999999995")
assert page.status_code == 200
print("Parsing HTML...")
soup = BeautifulSoup(page.content, 'html.parser')
print("Extracting temperature...")
temp = soup.find('p', {'class': 'myforecast-current-lrg'}).get_text()
print()
print(f"The temperature is {temp}.")
| 3.453125 | 3 |
tests/test_ktbxsosdconfig.py | tmbx/kctl | 0 | 12771066 | <filename>tests/test_ktbxsosdconfig.py
# -*- mode: python; tab-width: 4; indent-tabs-mode: t; py-indent-offset: 4 -*-
import sys
# Make sure we test local classes first.
sys.path.insert(0, '.')
import unittest
from kctllib.ktbxsosdconfig import *
class KTbxsosdConfigTest(unittest.TestCase):
def test(self):
config = KTbxsosdConfig(source_file = "tests/test_ktbxsosdconfig.conf")
self.assert_(config.get("test.item1") == "value1")
self.assert_(config.get("test.item2") == "value2")
self.assert_(not config.get("test.item3"))
config.set("test.item3", "value3")
self.assert_(config.get("test.item3") == "value3")
config.save(target_file = "/tmp/ktbxsosdconfig_test.conf")
config = KTbxsosdConfig(source_file = "tests/test_ktbxsosdconfig.conf",
user_file = "/tmp/ktbxsosdconfig_test.conf")
self.assert_(config.get("test.item1") == "value1")
self.assert_(config.get("test.item2") == "value2")
self.assert_(config.get("test.item3") == "value3")
if __name__ == "__main__":
unittest.main()
| 2.578125 | 3 |
ocellaris_post/inspector/panel_files.py | TormodLandet/Ocellaris | 1 | 12771067 | <reponame>TormodLandet/Ocellaris
# Copyright (C) 2017-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import yaml
import wx
from . import pub, TOPIC_METADATA, TOPIC_RELOAD
class OcellarisFilesPanel(wx.Panel):
def __init__(self, parent, inspector_state):
super().__init__(parent)
self.istate = inspector_state
self.layout_widgets()
self.reload_soon()
self.Bind(wx.EVT_IDLE, self.on_idle)
pub.subscribe(self.reload_soon, TOPIC_RELOAD)
pub.subscribe(self.reload_soon, TOPIC_METADATA)
def reload_soon(self, evt=None):
lables = [res.label for res in self.istate.results]
self.results_selector.Set(lables)
self.results_selector.SetSelection(0)
self.need_update = True
# In case the wx.Choice was empty it must now grow a bit vertically
self.GetSizer().Layout()
def layout_widgets(self):
v = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(v)
# Choose results to plot
h = wx.BoxSizer(wx.HORIZONTAL)
v.Add(h, flag=wx.ALL | wx.EXPAND, border=5)
h.Add(wx.StaticText(self, label='File:'), flag=wx.ALIGN_CENTER_VERTICAL)
h.AddSpacer(5)
self.results_selector = wx.Choice(self)
self.results_selector.Bind(wx.EVT_CHOICE, self.switch_file)
h.Add(self.results_selector, proportion=1)
# Monospaced font
font = wx.Font(10, wx.TELETYPE, wx.NORMAL, wx.NORMAL, False)
st = wx.StaticText(self, label='Input:')
st.SetFont(st.GetFont().Bold())
v.Add(st, flag=wx.ALL, border=5)
self.input_file = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_DONTWRAP
)
self.input_file.SetFont(font)
v.Add(self.input_file, flag=wx.ALL | wx.EXPAND, proportion=1, border=10)
st = wx.StaticText(self, label='Log:')
st.SetFont(st.GetFont().Bold())
v.Add(st, flag=wx.ALL, border=5)
self.log_file = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_DONTWRAP
)
self.log_file.SetFont(font)
v.Add(self.log_file, flag=wx.ALL | wx.EXPAND, proportion=2, border=10)
v.Fit(self)
def on_idle(self, evt=None):
if self.need_update:
self.switch_file()
def switch_file(self, evt=None):
"""
Switch which simulation is active
"""
i = self.results_selector.GetSelection()
if i == wx.NOT_FOUND:
self.input_file.SetValue('')
self.log_file.SetValue('')
return
results = self.istate.results[i]
inp = yaml.dump(results.input, default_flow_style=False, default_style='')
log = results.log
if len(log) > 40000:
log = (
log[:20000]
+ '\n\n...\n... MIDDLE SECTION SKIPPED ...\n...\n\n'
+ log[-20000:]
)
self.input_file.SetValue(inp)
self.log_file.SetValue(log)
self.need_update = False
| 1.976563 | 2 |
src/werkzeug/wrappers/__init__.py | omerholz/werkzeug | 0 | 12771068 | <reponame>omerholz/werkzeug
from .accept import AcceptMixin
from .auth import AuthorizationMixin, WWWAuthenticateMixin
from .base_request import BaseRequest
from .base_response import BaseResponse
from .common_descriptors import (
CommonRequestDescriptorsMixin,
CommonResponseDescriptorsMixin,
)
from .etag import ETagRequestMixin, ETagResponseMixin
from .request import PlainRequest
from .request import Request as Request
from .request import StreamOnlyMixin
from .response import Response as Response
from .response import ResponseStream, ResponseStreamMixin
from .user_agent import UserAgentMixin
| 1.1875 | 1 |
letsencrypt/client/acme.py | rcoscali/lets-encrypt-preview | 1 | 12771069 | <gh_stars>1-10
#!/usr/bin/env python
# acme.py
# validate JSON objects as ACME protocol messages
import json, jsonschema
import pkg_resources
schemata = {schema: json.load(open(pkg_resources.resource_filename(
__name__, "schemata/%s.json" % schema))) for schema in [
"authorization", "authorizationRequest", "certificate", "certificateRequest",
"challenge", "challengeRequest", "defer", "error", "revocation",
"revocationRequest", "statusRequest"]
}
def acme_object_validate(j):
"""Validate a JSON object against the ACME protocol using JSON Schema.
Success will return None; failure to validate will raise a
jsonschema.ValidationError exception describing the reason that the
object could not be validated successfully."""
j = json.loads(j)
if not isinstance(j, dict):
raise jsonschema.ValidationError("this is not a dictionary object")
if "type" not in j:
raise jsonschema.ValidationError("missing type field")
if j["type"] not in schemata:
raise jsonschema.ValidationError("unknown type %s" % j["type"])
jsonschema.validate(j, schemata[j["type"]])
def pretty(s):
"""Return a pretty-printed version of any JSON string (useful when
printing out protocol messages for debugging purposes."""
return json.dumps(json.loads(s), indent=4)
| 2.890625 | 3 |
testarch/unet/model.py | weihao94/deepdyn | 42 | 12771070 | <reponame>weihao94/deepdyn<gh_stars>10-100
import torch
import torch.nn.functional as F
from torch import nn
from utils.weights_utils import initialize_weights
class _DoubleConvolution(nn.Module):
def __init__(self, in_channels, middle_channel, out_channels, p=0):
super(_DoubleConvolution, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channel, kernel_size=3, padding=p),
nn.BatchNorm2d(middle_channel),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channel, out_channels, kernel_size=3, padding=p),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class UNet(nn.Module):
def __init__(self, num_channels, num_classes):
super(UNet, self).__init__()
reduce_by = 1
self.A1_ = _DoubleConvolution(num_channels, int(64 / reduce_by), int(64 / reduce_by))
self.A2_ = _DoubleConvolution(int(64 / reduce_by), int(128 / reduce_by), int(128 / reduce_by))
self.A3_ = _DoubleConvolution(int(128 / reduce_by), int(256 / reduce_by), int(256 / reduce_by))
self.A4_ = _DoubleConvolution(int(256 / reduce_by), int(512 / reduce_by), int(512 / reduce_by))
self.A_mid = _DoubleConvolution(int(512 / reduce_by), int(1024 / reduce_by), int(1024 / reduce_by))
self.A4_up = nn.ConvTranspose2d(int(1024 / reduce_by), int(512 / reduce_by), kernel_size=2, stride=2)
self._A4 = _DoubleConvolution(int(1024 / reduce_by), int(512 / reduce_by), int(512 / reduce_by))
self.A3_up = nn.ConvTranspose2d(int(512 / reduce_by), int(256 / reduce_by), kernel_size=2, stride=2)
self._A3 = _DoubleConvolution(int(512 / reduce_by), int(256 / reduce_by), int(256 / reduce_by))
self.A2_up = nn.ConvTranspose2d(int(256 / reduce_by), int(128 / reduce_by), kernel_size=2, stride=2)
self._A2 = _DoubleConvolution(int(256 / reduce_by), int(128 / reduce_by), int(128 / reduce_by))
self.A1_up = nn.ConvTranspose2d(int(128 / reduce_by), int(64 / reduce_by), kernel_size=2, stride=2)
self._A1 = _DoubleConvolution(int(128 / reduce_by), int(64 / reduce_by), int(64 / reduce_by))
self.final = nn.Conv2d(int(64 / reduce_by), num_classes, kernel_size=1)
initialize_weights(self)
def forward(self, x):
a1_ = self.A1_(x)
a1_dwn = F.max_pool2d(a1_, kernel_size=2, stride=2)
a2_ = self.A2_(a1_dwn)
a2_dwn = F.max_pool2d(a2_, kernel_size=2, stride=2)
a3_ = self.A3_(a2_dwn)
a3_dwn = F.max_pool2d(a3_, kernel_size=2, stride=2)
a4_ = self.A4_(a3_dwn)
# a4_ = F.dropout(a4_, p=0.2)
a4_dwn = F.max_pool2d(a4_, kernel_size=2, stride=2)
a_mid = self.A_mid(a4_dwn)
a4_up = self.A4_up(a_mid)
_a4 = self._A4(UNet.match_and_concat(a4_, a4_up))
# _a4 = F.dropout(_a4, p=0.2)
a3_up = self.A3_up(_a4)
_a3 = self._A3(UNet.match_and_concat(a3_, a3_up))
a2_up = self.A2_up(_a3)
_a2 = self._A2(UNet.match_and_concat(a2_, a2_up))
# _a2 = F.dropout(_a2, p=0.2)
a1_up = self.A1_up(_a2)
_a1 = self._A1(UNet.match_and_concat(a1_, a1_up))
final = self.final(_a1)
return final
@staticmethod
def match_and_concat(bypass, upsampled, crop=True):
if crop:
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = F.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
m = UNet(1, 2)
torch_total_params = sum(p.numel() for p in m.parameters() if p.requires_grad)
print('Total Params:', torch_total_params)
| 2.25 | 2 |
solution_10.py | johndunne2019/pands-problem-set | 1 | 12771071 | # This program displays a plot of the functions x, x2 and 2x in the range [0, 4]
# <NAME> 2019-03-24
# I formulated this solution using the week 9 lectures as a starting point followed by further reading and research which is detailed further in the references section in the Readme file
# Additional reading included the matplotlib pyplot tutorial as recommended in lectures: https://matplotlib.org/users/pyplot_tutorial.html
print("The Plot should appear on your screen momentarily") # I have returned this line to the user when the script is run
import numpy as np # numpy module is imported and given the short name np
import matplotlib.pyplot as pl # mathplotlib.pyplot module is imported and given a shorter name pl
x = np.arange(start = 0, stop = 4) #The range is defined as being between 0 and 4 using the numpy.arange function
# Using numpy.arange to set the range between 0 and 4 using start and stop parameters
pl.xlabel("x axis", fontsize=12, fontweight= 'bold') # Adding name to x and y axis (read in pyplot tutorial text section)
pl.ylabel("y axis", fontsize=12, fontweight= 'bold') # Added a font size and font weight to the x and y axis labels
pl.title("Plot Generated from Solution_10.py", fontsize= 14, fontweight='bold')
# Adding a title to the plot and formatting as read in the text section of the pyplot tutorial
a = x # I introduced a variable called a set it equal to the value of x
b = x*x # I introduced a variable called b and set it equal to the value of x squared
c = 2**x # I introduced a variable called C and set it equal to 2 to the power of x. ** the power operator is used
pl.plot(a, c='r', lw= 4.0, ls= '--', label= 'x') # I have asked for 'a' to be plotted with a red line
# c short for color, ls short for linestyle and lw short for linewidth
# I formatted the line that will be plotted using the attributes that I read about in the 'controlling line properties' section of the pyplot tutorial
pl.plot(b, c='g', lw= 4.0, ls= '--', label= 'x²') # I have asked for 'b' to be plotted with a green line
pl.plot(c, c='y', lw= 4.0, ls= '--', label= '2x') # I have asked for 'c' to be plotted with a yellow line
pl.legend(loc= 'upper left') # This command shows the legend on the plot, I have given the names in pl.plot above using label
# I have used loc and upper left ask for the legend to be placed in the top left corner of the plot
pl.grid(True) # I revisited the solution and asked for a grid to be shown on the plot after further reading
pl.show() # This is the command used to show the plot created above | 4.375 | 4 |
single qubit/deep QL/environment.py | 93xiaoming/RL_state_preparation | 17 | 12771072 | import numpy as np
from scipy.linalg import expm
class Env( object):
def __init__(self,
action_space=[0,1,2],
dt=0.1):
super(Env, self).__init__()
self.action_space = action_space
self.n_actions = len(self.action_space)
self.n_features = 4
self.state = np.array([1,0,0,0])
self.nstep=0
self.dt=dt
def reset(self):
self.state = np.array([1,0,0,0])
self.nstep = 0
return self.state
def step(self, action):
psi = np.array([self.state[0:int(len(self.state) / 2)] + self.state[int(len(self.state) / 2):int(len(self.state))] * 1j])
psi = psi.T
psi=np.mat(psi)
J = 4 # control field strength
sx = np.mat([[0, 1], [1, 0]], dtype=complex)
sz = np.mat([[1, 0], [0, -1]], dtype=complex)
U = np.matrix(np.identity(2, dtype=complex))
H = J *float(action)/(self.n_actions-1)* sz + 1 * sx
U = expm(-1j * H * self.dt)
psi = U * psi # final state
target = np.mat([[0], [1]], dtype=complex)
err = 1 - (np.abs(psi.H * target) ** 2).item(0).real
rwd = 10 * (err<0.5)+100 * (err<0.1)+5000*(err < 10e-3)
done =( (err < 10e-3) or self.nstep>=np.pi/self.dt )
self.nstep +=1
psi=np.array(psi)
psi_T = psi.T
self.state = np.array(psi_T.real.tolist()[0] + psi_T.imag.tolist()[0])
return self.state, rwd, done, 1 - err
| 2.328125 | 2 |
server/equationObject.py | petnica-rac-seminari/stunning-calculator-2021 | 1 | 12771073 | from pydantic import BaseModel
class EquationObject(BaseModel):
equation: str | 1.773438 | 2 |
girder/app/app/constants.py | bnmajor/mongochemserver | 14 | 12771074 | class Features:
NOTEBOOKS = 'app.features.notebooks'
class Deployment:
SITE = 'app.deployment.site'
class Branding:
PRIVACY = 'app.configuration.privacy'
LICENSE = 'app.configuration.license'
HEADER_LOGO_ID = 'app.configuration.header.logo.file.id'
FOOTER_LOGO_ID = 'app.configuration.footer.logo.file.id'
FOOTER_LOGO_URL = 'app.configuration.footer.logo.url',
FAVICON_ID = 'app.configuration.favicon.file.id'
| 1.398438 | 1 |
vanilla_gan.py | Immocat/csc321_assignment4 | 0 | 12771075 | # CSC 321, Assignment 4
#
# This is the main training file for the vanilla GAN part of the assignment.
#
# Usage:
# ======
# To train with the default hyperparamters (saves results to checkpoints_vanilla/ and samples_vanilla/):
# python vanilla_gan.py
import os
import pdb
import pickle
import argparse
import warnings
warnings.filterwarnings("ignore")
# Numpy & Scipy imports
import numpy as np
import scipy
import scipy.misc
# Torch imports
import torch
import torch.nn as nn
import torch.optim as optim
# Local imports
import utils
from data_loader import get_emoji_loader
from models import DCGenerator, DCDiscriminator
from models import WGANDiscriminator, WGANGenerator
from models import WGANGPDiscriminator, WGANGPGenerator
SEED = 11
# Set the random seed manually for reproducibility.
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
def print_models(G, D):
"""Prints model information for the generators and discriminators.
"""
print(" G ")
print("---------------------------------------")
print(G)
print("---------------------------------------")
print(" D ")
print("---------------------------------------")
print(D)
print("---------------------------------------")
def create_model(opts):
"""Builds the generators and discriminators.
"""
if opts.GAN_type == 'LSGAN':
G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)
D = DCDiscriminator(conv_dim=opts.conv_dim, batch_norm=not opts.disable_bn)
elif opts.GAN_type == 'WGAN':
G = WGANGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)
D = WGANDiscriminator(conv_dim=opts.conv_dim, batch_norm=not opts.disable_bn)
elif opts.GAN_type == 'WGANGP':
G = WGANGPGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)
D = WGANGPDiscriminator(conv_dim=opts.conv_dim)
#print_models(G, D)
#move to device
G.to(opts.device) # in-place
D.to(opts.device) # in-place
print_models(G, D)
print('Models are at:'+str(opts.device))
return G, D
def checkpoint(iteration, G, D, opts):
"""Saves the parameters of the generator G and discriminator D.
"""
G_path = os.path.join(opts.checkpoint_dir, 'G.pkl')
D_path = os.path.join(opts.checkpoint_dir, 'D.pkl')
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path)
def create_image_grid(array, ncols=None):
"""
"""
num_images, channels, cell_h, cell_w = array.shape
if not ncols:
ncols = int(np.sqrt(num_images))
nrows = int(np.math.floor(num_images / float(ncols)))
result = np.zeros((cell_h*nrows, cell_w*ncols, channels), dtype=array.dtype)
for i in range(0, nrows):
for j in range(0, ncols):
result[i*cell_h:(i+1)*cell_h, j*cell_w:(j+1)*cell_w, :] = array[i*ncols+j].transpose(1, 2, 0)
if channels == 1:
result = result.squeeze()
return result
def save_samples(G, fixed_noise, iteration, opts):
generated_images = G(fixed_noise)
generated_images = utils.to_data(generated_images)
grid = create_image_grid(generated_images)
# merged = merge_images(X, fake_Y, opts)
path = os.path.join(opts.sample_dir, 'sample-{:06d}.png'.format(iteration))
scipy.misc.imsave(path, grid)
print('Saved {}'.format(path))
def sample_noise(dim):
"""
Generate a PyTorch Variable of uniform random noise.
Input:
- batch_size: Integer giving the batch size of noise to generate.
- dim: Integer giving the dimension of noise to generate.
Output:
- A PyTorch Variable of shape (batch_size, dim, 1, 1) containing uniform
random noise in the range (-1, 1).
"""
return utils.to_var(torch.rand(batch_size, dim) * 2 - 1).unsqueeze(2).unsqueeze(3)
def training_loop_LSGAN(train_dataloader, opts):
"""Runs the training loop.
* Saves checkpoints every opts.checkpoint_every iterations
* Saves generated samples every opts.sample_every iterations
"""
# Create generators and discriminators
G, D = create_model(opts)
# Create optimizers for the generators and discriminators
if opts.optimizer == 'Adam':
d_optimizer = optim.Adam(D.parameters(), opts.lr, [opts.beta1, opts.beta2])
g_optimizer = optim.Adam(G.parameters(), opts.lr, [opts.beta1, opts.beta2])
elif opts.optimizer == 'RMSProp' or opts.GAN_type == 'WGAN':
d_optimizer = optim.RMSprop(D.parameters(), opts.lr)
g_optimizer = optim.RMSprop(G.parameters(), opts.lr)
print(d_optimizer)
print(g_optimizer)
# Generate fixed noise for sampling from the generator
fixed_noise = sample_noise(opts.noise_size) # batch_size x noise_size x 1 x 1
iteration = 1
total_train_iters = opts.num_epochs * len(train_dataloader)
device = opts.device
noise_dim = opts.noise_size
#batch_size = opts.batch_size
for epoch in range(opts.num_epochs):
for batch in train_dataloader:
real_images, _ = batch
#print(real_images.device)
real_images = real_images.to(device)
#print(real_images.device)
#real_images, labels = utils.to_var(real_images), utils.to_var(labels).long().squeeze()
#print(real_images.shape)
################################################
### TRAIN THE DISCRIMINATOR ####
################################################
d_optimizer.zero_grad()
# FILL THIS IN
# 1. Compute the discriminator loss on real images
D_real_loss = 0.5 * torch.sum((D(real_images) - 1)**2) / batch_size
#D_real_loss = 0.5 * torch.sum((D(real_images) - 0.9)**2) / batch_size
#print(D_real_loss)
# 2. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
#print(noise.shape)
# 3. Generate fake images from the noise
fake_images = G(noise)
# 4. Compute the discriminator loss on the fake images
D_fake_loss = 0.5 * torch.sum(D(fake_images)**2) / batch_size
# 5. Compute the total discriminator loss
D_total_loss = D_fake_loss + D_real_loss
D_total_loss.backward()
d_optimizer.step()
###########################################
### TRAIN THE GENERATOR ###
###########################################
g_optimizer.zero_grad()
# FILL THIS IN
# 1. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
# 2. Generate fake images from the noise
fake_images = G(noise)
# 3. Compute the generator loss
G_loss = torch.sum((D(fake_images) -1)**2)/ batch_size
#G_loss = torch.sum((D(fake_images) -0.9)**2)/ batch_size
G_loss.backward()
g_optimizer.step()
# Print the log info
if iteration % opts.log_step == 0:
print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f}'.format(
iteration, total_train_iters, D_real_loss.data[0], D_fake_loss.data[0], G_loss.data[0]))
# Save the generated samples
if iteration % opts.sample_every == 0:
save_samples(G, fixed_noise, iteration, opts)
# Save the model parameters
if iteration % opts.checkpoint_every == 0:
checkpoint(iteration, G, D, opts)
iteration += 1
def training_loop_WGAN(train_dataloader, opts):
"""Runs the training loop.
* Saves checkpoints every opts.checkpoint_every iterations
* Saves generated samples every opts.sample_every iterations
"""
# Create generators and discriminators
G, D = create_model(opts)
# Create optimizers for the generators and discriminators
if opts.optimizer == 'Adam':
d_optimizer = optim.Adam(D.parameters(), opts.lr, [opts.beta1, opts.beta2])
g_optimizer = optim.Adam(G.parameters(), opts.lr, [opts.beta1, opts.beta2])
elif opts.optimizer == 'RMSProp' or opts.GAN_type == 'WGAN':
d_optimizer = optim.RMSprop(D.parameters(), opts.lr)
g_optimizer = optim.RMSprop(G.parameters(), opts.lr)
print(d_optimizer)
print(g_optimizer)
# Generate fixed noise for sampling from the generator
fixed_noise = sample_noise(opts.noise_size) # batch_size x noise_size x 1 x 1
iteration = 1
total_train_iters = opts.num_epochs * len(train_dataloader)
device = opts.device
noise_dim = opts.noise_size
clip_value = 0.01
for epoch in range(opts.num_epochs):
for batch in train_dataloader:
real_images, _ = batch
#print(real_images.device)
real_images = real_images.to(device)
#print(real_images.device)
#real_images, labels = utils.to_var(real_images), utils.to_var(labels).long().squeeze()
#print(real_images.shape)
################################################
### TRAIN THE DISCRIMINATOR ####
################################################
d_optimizer.zero_grad()
# 2. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
# 3. Generate fake images from the noise
fake_images = G(noise).detach()
# 5. Compute the total discriminator loss
D_total_loss = torch.mean(D(fake_images)) - torch.mean(D(real_images))
D_total_loss.backward()
d_optimizer.step()
# CLIP WEIGHTS!!!! of Dicriminator
for p in D.parameters():
p.data.clamp_(-clip_value, clip_value)
###########################################
### TRAIN THE GENERATOR ###
###########################################
g_optimizer.zero_grad()
# FILL THIS IN
# 1. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
# 2. Generate fake images from the noise
fake_images = G(noise)
# 3. Compute the generator loss
G_loss = -torch.mean(D(fake_images))
#G_loss = torch.sum((D(fake_images) -0.9)**2)/ batch_size
G_loss.backward()
g_optimizer.step()
# Print the log info
with torch.no_grad():
if iteration % opts.log_step == 0:
print('Iteration [{:4d}/{:4d}] | D_total_loss: {:6.4f} | G_loss: {:6.4f}'.format(
iteration, total_train_iters, D_total_loss.data[0], G_loss.data[0]))
# Save the generated samples
if iteration % opts.sample_every == 0:
save_samples(G, fixed_noise, iteration, opts)
# Save the model parameters
if iteration % opts.checkpoint_every == 0:
checkpoint(iteration, G, D, opts)
iteration += 1
def training_loop_WGANGP(train_dataloader, opts):
"""Runs the training loop.
* Saves checkpoints every opts.checkpoint_every iterations
* Saves generated samples every opts.sample_every iterations
"""
# Create generators and discriminators
G, D = create_model(opts)
# Create optimizers for the generators and discriminators
if opts.optimizer == 'Adam':
d_optimizer = optim.Adam(D.parameters(), opts.lr, [opts.beta1, opts.beta2])
g_optimizer = optim.Adam(G.parameters(), opts.lr, [opts.beta1, opts.beta2])
elif opts.optimizer == 'RMSProp':
d_optimizer = optim.RMSprop(D.parameters(), opts.lr)
g_optimizer = optim.RMSprop(G.parameters(), opts.lr)
print(d_optimizer)
print(g_optimizer)
# Generate fixed noise for sampling from the generator
fixed_noise = sample_noise(opts.noise_size) # batch_size x noise_size x 1 x 1
iteration = 1
total_train_iters = opts.num_epochs * len(train_dataloader)
device = opts.device
noise_dim = opts.noise_size
lambda_GP = 10
for epoch in range(opts.num_epochs):
for batch in train_dataloader:
real_images, _ = batch
batch_size = real_images.shape[0]
#print(real_images.device)
real_images = real_images.to(device)
#print(real_images.device)
#real_images, labels = utils.to_var(real_images), utils.to_var(labels).long().squeeze()
#print(real_images.shape)
################################################
### TRAIN THE DISCRIMINATOR ####
################################################
d_optimizer.zero_grad()
# 2. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
# 3. Generate fake images from the noise
fake_images = G(noise)
D_fake_loss = torch.mean(D(fake_images))
# 4. Calculate gradient penalty(GP)
random_eps = torch.rand(1, device=device)
#print(fake_images.shape)
#print(real_images.shape)
interpolates = (1 - random_eps) * fake_images + random_eps * real_images
D_interpolates = D(interpolates)
# 5. Compute the total discriminator loss
fake = torch.ones(D_interpolates.size(), device=device)
#print(fake_images.shape)
#print(D_fake_loss.shape)
gradients = torch.autograd.grad(
outputs=D_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True)[0]
#print(gradients[0].shape)
D_total_loss = D_fake_loss - \
torch.mean(D(real_images)) \
+ lambda_GP * \
(gradients.norm(2) - 1)**2
D_total_loss.backward()
d_optimizer.step()
###########################################
### TRAIN THE GENERATOR ###
###########################################
g_optimizer.zero_grad()
# FILL THIS IN
# 1. Sample noise
noise = 2 * torch.rand(batch_size, noise_dim) - 1
noise = noise.view(batch_size, noise_dim, 1, 1).to(device)
# 2. Generate fake images from the noise
fake_images = G(noise)
# 3. Compute the generator loss
G_loss = -torch.mean(D(fake_images))
#G_loss = torch.sum((D(fake_images) -0.9)**2)/ batch_size
G_loss.backward()
g_optimizer.step()
# Print the log info
with torch.no_grad():
if iteration % opts.log_step == 0:
print('Iteration [{:4d}/{:4d}] | D_total_loss: {:6.4f} | G_loss: {:6.4f}'.format(
iteration, total_train_iters, D_total_loss.data[0], G_loss.data[0]))
# Save the generated samples
if iteration % opts.sample_every == 0:
save_samples(G, fixed_noise, iteration, opts)
# Save the model parameters
if iteration % opts.checkpoint_every == 0:
checkpoint(iteration, G, D, opts)
iteration += 1
def main(opts):
"""Loads the data, creates checkpoint and sample directories, and starts the training loop.
"""
# Create a dataloader for the training images
train_dataloader, _ = get_emoji_loader(opts.emoji, opts)
# Create checkpoint and sample directories
utils.create_dir(opts.checkpoint_dir)
utils.create_dir(opts.sample_dir)
if opts.GAN_type == 'LSGAN':
training_loop_LSGAN(train_dataloader, opts)
elif opts.GAN_type == 'WGAN':
training_loop_WGAN(train_dataloader, opts)
elif opts.GAN_type == 'WGANGP':
training_loop_WGANGP(train_dataloader, opts)
def create_parser():
"""Creates a parser for command-line arguments.
"""
parser = argparse.ArgumentParser()
# Model hyper-parameters
parser.add_argument('--image_size', type=int, default=32, help='The side length N to convert images to NxN.')
parser.add_argument('--conv_dim', type=int, default=32)
parser.add_argument('--noise_size', type=int, default=100)
parser.add_argument('--disable_bn', action='store_true', help='Disable Batch Normalization(BN)')
# Training hyper-parameters
parser.add_argument('--num_epochs', type=int, default=40)
parser.add_argument('--batch_size', type=int, default=16, help='The number of images in a batch.')
parser.add_argument('--num_workers', type=int, default=0, help='The number of threads to use for the DataLoader.')
parser.add_argument('--lr', type=float, default=0.0003, help='The learning rate (default 0.0003)')
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
# Data sources
parser.add_argument('--emoji', type=str, default='Apple', choices=['Apple', 'Facebook', 'Windows'], help='Choose the type of emojis to generate.')
# Directories and checkpoint/sample iterations
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints_vanilla')
parser.add_argument('--sample_dir', type=str, default='./samples_vanilla')
parser.add_argument('--log_step', type=int , default=10)
parser.add_argument('--sample_every', type=int , default=200)
parser.add_argument('--checkpoint_every', type=int , default=400)
# GPU or CPU
parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
# GAN training object:
parser.add_argument('--GAN_type', type=str, default='WGANGP', choices=['LSGAN','WGAN','WGANGP'], help='Choose the type of GAN')
# optmizer
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam','RMSProp'], help='Choose the type of Optimizer')
return parser
if __name__ == '__main__':
parser = create_parser()
opts = parser.parse_args()
opts.device = None
if not opts.disable_cuda and torch.cuda.is_available():
opts.device = torch.device('cuda')
else:
opts.device = torch.device('cpu')
batch_size = opts.batch_size
print(opts)
main(opts)
| 2.390625 | 2 |
python/examples/core/variables.py | chelini/iree-llvm-sandbox | 29 | 12771076 | <reponame>chelini/iree-llvm-sandbox
"""Utilities for search space exploration over linalg operations."""
import typing as tp
class Variable:
'Abstract class used as a base for all search variables.'
def __init__(self, name):
self.name = name
def assign(self, assignments, value):
'Assigns variable to a given value in assignments dictionary.'
assignments[self.name] = value
class TypeVariable(Variable):
'Linalg operation-specific type variable that defines a scalar component.'
def __init__(self, name, scalar_types):
Variable.__init__(self, name)
self.scalar_types = scalar_types
def __repr__(self):
return f'TypeVariable({self.name})'
class IntVariable(Variable):
'Linalg operation-specific integer dimension variable.'
def __init__(self, name, value_range):
Variable.__init__(self, name)
self.value_range = value_range
def __repr__(self):
return f'IntVariable({self.name}, {self.value_range})'
class BoolVariable(Variable):
'Boolean flag variable.'
def __repr__(self):
return f'BoolVariable({self.name})'
class DimensionVariable(IntVariable):
'Variable that corresponds to the operation dimensions.'
def __init__(self, name, value_range):
IntVariable.__init__(self, name, value_range)
def __repr__(self):
return f'DimensionVariable({self.name}, {self.value_range})'
class TilingSizesVariable(Variable):
'Variable that corresponds to tiling sizes.'
def __init__(self, name, length_ranges, value_ranges):
Variable.__init__(self, name)
if name in length_ranges:
self.length_range = length_ranges[name]
else:
self.length_range = length_ranges['default']
if name in value_ranges:
self.value_range = value_ranges[name]
else:
self.value_range = value_ranges['default']
def __repr__(self):
return f'TilingSizesVariable({self.name}, {self.length_range}, {self.value_range})'
class InterchangeVariable(Variable):
'Variable that corresponds to a dimension interchange.'
def __init__(self, name, length_ranges):
Variable.__init__(self, name)
if name in length_ranges:
self.length_range = length_ranges[name]
else:
self.length_range = length_ranges['default']
def __repr__(self):
return f'InterchangeVariable({self.name}, {self.length_range})'
class PeelingVariable(Variable):
'Variable that corresponds to loop peeling.'
def __init__(self, name, length_ranges):
Variable.__init__(self, name)
if name in length_ranges:
self.length_range = length_ranges[name]
else:
self.length_range = length_ranges['default']
def __repr__(self):
return f'PeelingVariable({self.name}, {self.length_range})'
class PackPaddingVariable(Variable):
'Variable that corresponds to pack padding.'
def __init__(self, name, length_ranges):
Variable.__init__(self, name)
if name in length_ranges:
self.length_range = length_ranges[name]
else:
self.length_range = length_ranges['default']
def __repr__(self):
return f'PackPaddingVariable({self.name}, {self.length_range})'
class HoistPaddingVariable(IntVariable):
'Variable that corresponds to hoist padding.'
def __init__(self, name, length_ranges, value_ranges):
Variable.__init__(self, name)
if name in length_ranges:
self.length_range = length_ranges[name]
else:
self.length_range = length_ranges['default']
if name in value_ranges:
self.value_range = value_ranges[name]
else:
self.value_range = value_ranges['default']
def __repr__(self):
return f'HoistPaddingVariable({self.name}, {self.length_range}, {self.value_range})'
class ChoiceVariableBase(Variable):
"""Base class for choice variables.
Subclasses must define an `options` attribute with the sequence of options to
choose from.
"""
options: tp.Sequence
def __repr__(self):
return f'{self.__class__.__name__}({self.name}, {self.options})'
| 2.8125 | 3 |
wordrank/feature.py | shibing624/term-weighting | 28 | 12771077 | <reponame>shibing624/term-weighting<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
from wordrank import config
from wordrank.features.language_feature import LanguageFeature
from wordrank.features.statistics_feature import StatisticsFeature
from wordrank.features.text_feature import TextFeature
from wordrank.utils.logger import logger
class Feature(object):
def __init__(self,
stopwords_path=config.stopwords_path,
person_name_path=config.person_name_path,
place_name_path=config.place_name_path,
common_char_path=config.common_char_path,
segment_sep=config.segment_sep,
domain_sample_path=config.domain_sample_path,
ngram=config.ngram,
pmi_path=config.pmi_path,
entropy_path=config.entropy_path,
sentence_delimiters=config.sentence_delimiters,
):
self.stopwords_path = stopwords_path
self.person_name_path = person_name_path
self.place_name_path = place_name_path
self.common_char_path = common_char_path
self.segment_sep = segment_sep
self.domain_sample_path = domain_sample_path
self.ngram = ngram
self.pmi_path = pmi_path
self.entropy_path = entropy_path
self.sentence_delimiters = sentence_delimiters
self.text_feature = None
self.statistics_feature = None
self.feature_inited = False
def _init_feature(self):
"""
load data
:return:
"""
self.text_feature = TextFeature(
stopwords_path=self.stopwords_path,
person_name_path=self.person_name_path,
place_name_path=self.place_name_path,
common_char_path=self.common_char_path,
segment_sep=self.segment_sep
)
self.statistics_feature = StatisticsFeature(
domain_sample_path=self.domain_sample_path,
ngram=self.ngram,
pmi_path=self.pmi_path,
entropy_path=self.entropy_path)
self.language_feature = LanguageFeature()
self.feature_inited = True
def check_feature_inited(self):
"""
check if data loaded
:return:
"""
if not self.feature_inited:
self._init_feature()
def get_feature(self, query, is_word_segmented=False):
"""
Get feature from query
:param query: input query
:param is_word_segmented: bool, is word segmented or not
:return: features, terms
"""
features = []
terms = []
self.check_feature_inited()
text_terms, text_sent = self.text_feature.get_feature(query, is_word_segmented=is_word_segmented)
stat_terms, stat_sent = self.statistics_feature.get_feature(query, is_word_segmented=is_word_segmented)
lang_terms, lang_sent = self.language_feature.get_feature(query, is_word_segmented=is_word_segmented)
# sentence feature
text_sent.update(stat_sent)
text_sent.update(lang_sent)
logger.debug('sentence features: %s' % text_sent)
sent_feature = [text_sent.query_length, text_sent.term_size, text_sent.ppl]
# term feature
for text, stat, lang in zip(text_terms, stat_terms, lang_terms):
text.update(stat)
text.update(lang)
# logger.debug('term features: %s' % text)
term_feature = [text.term_length, text.idx, text.offset, float(text.is_number),
float(text.is_chinese), float(text.is_alphabet), float(text.is_stopword),
float(text.is_name), float(text.is_common_char), text.embedding_sum, text.del_term_score,
text.idf, text.text_rank_score, text.tfidf_score, text.pmi_score, text.left_entropy_score,
text.right_entropy_score, text.del_term_ppl, text.term_ngram_score, text.left_term_score,
text.right_term_score]
feature = sent_feature + term_feature
features.append(feature)
terms.append(text.term)
logger.debug("[query]feature size: %s, term size: %s" % (len(features), len(terms)))
return features, terms
| 2.421875 | 2 |
snapchat_problems/problem_5.py | loftwah/Daily-Coding-Problem | 129 | 12771078 | <filename>snapchat_problems/problem_5.py
"""This problem was asked by Snapchat.
You are given an array of length N, where each element i represents the number of ways
we can produce i units of change. For example, [1, 0, 1, 1, 2] would indicate that
there is only one way to make 0, 2, or 3 units, and two ways of making 4 units.
Given such an array, determine the denominations that must be in use.
In the case above, for example, there must be coins with value 2, 3, and 4.
""" | 4.0625 | 4 |
ex068.py | juniorpedroso/Exercicios-CEV-Python | 0 | 12771079 | <reponame>juniorpedroso/Exercicios-CEV-Python<gh_stars>0
from random import randint
venceu = 0 # iniciando a variável de quantas vezes o usuário venceu
print('-=' * 20)
print('VAMOS JOGAR PAR OU IMPAR')
while True:
print('-=' * 20)
usuario_valor = int(input('Digite um valor: ')) # Recebe o valor do usuário
computador = randint(0, 10) # O computador escolhe um número aleatório
soma = usuario_valor + computador # Calcula a soma entre o valor do usuário e do computador
usuario_pi = ' '
while usuario_pi not in 'PI':
usuario_pi = str(input('Par ou Ímpar? [P/I]: ')).strip().upper()[0] # Recebe a opção Par ou Ímpar
print(f'Você jogou {usuario_valor} e o computador {computador}. Total de {soma} ', end='')
print('DEU PAR' if soma % 2 ==0 else 'DEU [IMPAR')
if usuario_pi == 'P':
if soma % 2 == 0:
print('Você VENCEU!\nVamos jogar novamente...')
venceu += 1
else:
break
if usuario_pi == 'I':
if soma % 2 == 1:
print('Você VENCEU!\nVamos jogar novamente...')
venceu += 1
else:
break
print(f'GAME OVER! Você venceu {venceu} vezes.')
| 3.84375 | 4 |
ejemplos/worldcup/bpgoleslv.py | amherag/databook | 0 | 12771080 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('fifa-world-cup/WorldCupMatches.csv')
goles = map(sum,zip(df['Home Team Goals'], df['Away Team Goals']))
fig, ax = plt.subplots()
# the histogram of the data
ax.boxplot(list(goles),
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=['Goles']) # will be used to label x-ticks
ax.set_ylim(-1, 15)
# add a 'best fit' line
ax.set_ylabel('Goles')
ax.set_title(r'Goles por partido en Copas del Mundo hasta 2014')
ax.yaxis.grid(True)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
| 3.125 | 3 |
neighbourhoodapp/migrations/0005_auto_20200326_1744.py | marknesh/neighbourhood-watch | 0 | 12771081 | # Generated by Django 3.0.4 on 2020-03-26 14:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('neighbourhoodapp', '0004_delete_signupforms'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Users',
),
migrations.AlterField(
model_name='business',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='posted')),
('comment', models.CharField(max_length=300)),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='neighbourhoodapp.Neighbourhood')),
],
),
]
| 1.609375 | 2 |
anchor_txt/__init__.py | vitiral/anchor | 1 | 12771082 | <reponame>vitiral/anchor<gh_stars>1-10
# anchor_txt: attributes in markdown
#
# Copyright (C) 2019 <NAME> <github.com/vitiral>
#
# The source code is Licensed under either of
#
# * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
#
# at your option.
#
# Unless you explicitly state otherwise, any contribution intentionally submitted
# for inclusion in the work by you, as defined in the Apache-2.0 license, shall
# be dual licensed as above, without any additional terms or conditions.
"""
anchor_txt: markdown with attributes
anchor_txt adds the ability to embed attributes in markdown files so that
external tools can more easily link them to eachother and code, as well as
perform other operations.
Use ``anchor_txt.Section.from_md_path`` to load a markdown file.
The syntax used is in the README.md
"""
from __future__ import print_function
from .section import Section
from .mdsplit import Header
from .mdsplit import ReferenceLink
from .mdsplit import Code
from .mdsplit import Text
def main(argv):
"""Main function for cmdline."""
import sys
import argparse
parser = argparse.ArgumentParser(
description=
'Process a markdown file into sections and attributes and print to stdout'
)
parser.add_argument('path', help='path to a markdown file')
parser.add_argument('--format',
help='format to output to on of [json, yaml]',
default='yaml')
args = parser.parse_args(argv)
root = Section.from_md_path(args.path).to_dict()
if args.format == 'yaml':
import yaml
print(yaml.safe_dump(root, indent=4))
elif args.format == 'json':
import json
print(json.dumps(root, indent=4))
else:
sys.stderr.write('Invalid --format={}\n'.format(args.format))
return 1
return 0
| 2.984375 | 3 |
ttt/t2t_trainer.py | wangcongcong123/ | 42 | 12771083 | ''''
this is a customize trainer for T5-like mode training,
in this class, the training loop is customized for more flexibility and control over
'''
import math
import os
import sys
import warnings
import tensorflow as tf
from tqdm import tqdm
from sklearn.metrics import accuracy_score, classification_report
import numpy as np
from keras import backend as K
from ttt.utils import add_filehandler_for_logger, get_existing_cks
from tensorboardX import SummaryWriter
# for translation evaluation from: https://github.com/mjpost/sacrebleu
# which is also used in the original T5 paper
import sacrebleu
from .utils import write_args_enhance, save_ck, dictionize_t2t_dataset, set_seed
class T2TTrainer():
def __init__(self, args, logger):
self.eval_on = args.eval_on
assert self.eval_on in ["acc",
"bleu"], "now t2t training only supports --eval_on acc, bleu, only works when --do_eval=True"
# self.best = -np.Inf
self.patience = args.patience
self.wait = 0
self.logger = logger
self.args = args
self.use_tb = self.args.__dict__.get('use_tb', False)
self._tb_writer = None
if self.use_tb:
self._tb_writer = SummaryWriter(log_dir=self.args.__dict__.get('output_folder', "runs"))
self.scheduler = args.scheduler
if "learning_rate" in self.args.__dict__:
self.lr_to_reach = args.learning_rate
else:
self.lr_to_reach = args.lr
self.args.best = np.Inf if self.args.eval_on == "loss" or self.args.eval_on == "perplexity" else - np.Inf
self.best = self.args.best
def train(self, model, strategy, tokenizer, inputs=None, train_dataset=None, eval_dataset=None, evaluate_fn=None, verbose=False):
if inputs is None:
assert train_dataset is not None, "you have to pass either inputs or train_dataset"
else:
warnings.warn(
"Passing `inputs` as a keyword argument is deprecated. Use train_dataset and eval_dataset instead.",
FutureWarning,
)
if isinstance(inputs, tuple):
inputs = dictionize_t2t_dataset(*inputs)
if inputs is not None:
x_train, y_train = inputs["x_train"], inputs["y_train"]
num_train_examples = len(inputs["y_train"]["target_input_ids"])
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
else:
if hasattr(train_dataset, "num_examples"):
num_train_examples = train_dataset.num_examples
else:
num_train_examples = tf.data.experimental.cardinality(train_dataset).numpy()
self.logger.info(f"set random seed for everything with {self.args.seed}")
set_seed(self.args.seed)
global_batch_size = self.args.per_device_train_batch_size * strategy.num_replicas_in_sync
train_dataset = train_dataset.shuffle(buffer_size=self.args.seed).batch(global_batch_size)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
# THERE WILL BE exceptions when switching to distributed_dataset when running on tpus if
# val_dist_dataset = strategy.experimental_distribute_dataset(eval_dataset)
train_length = math.ceil(num_train_examples / global_batch_size)
self.steps_per_epoch = train_length
if inputs is not None:
if self.args.do_eval:
assert "x_eval" in inputs and "y_eval" in inputs, "do_eval=True, and no validation data is found"
x_val, y_val = inputs["x_eval"], inputs["y_eval"]
eval_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
eval_dataset = eval_dataset.batch(self.args.eval_batch_size)
eval_steps = math.ceil(
len(inputs["y_eval"]["target_input_ids"]) / (self.args.eval_batch_size))
else:
if self.args.do_eval:
if hasattr(eval_dataset, "num_examples"):
eval_num_examples = eval_dataset.num_examples
else:
eval_num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
eval_steps = math.ceil(eval_num_examples / (self.args.eval_batch_size))
eval_dataset = eval_dataset.batch(self.args.eval_batch_size)
if verbose:
self.logger.info(model.summary())
# these are used for non-constant lr scheduler
if "num_train_epochs" in self.args.__dict__:
self.args.num_epochs_train = self.args.num_train_epochs
if "log_and_save_steps" in self.args.__dict__:
self.args.log_steps = self.args.log_and_save_steps
self.total_steps = self.steps_per_epoch * self.args.num_epochs_train
if "warmup_steps_or_ratio" in self.args.__dict__:
if self.args.warmup_steps_or_ratio <= 1 and self.args.warmup_steps_or_ratio > 0:
self.args.warmup_steps = int(self.total_steps * self.args.warmup_steps_or_ratio)
else:
self.args.warmup_steps = self.args.warmup_steps_or_ratio
else:
self.args.warmup_steps = int(self.total_steps * self.args.warmup_ratio)
self.warmup_steps = self.args.warmup_steps
write_args_enhance(self.args, logger=self.logger)
with strategy.scope():
optimizer = tf.keras.optimizers.Adam(lr=self.args.lr if self.scheduler.startswith("constant") else 0.0)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
def compute_loss(labels, predictions):
per_example_loss = loss_fn(labels, predictions)
return tf.nn.compute_average_loss(per_example_loss, global_batch_size=global_batch_size)
def train_step(x_train, y_train):
with tf.GradientTape() as tape:
# here some changes has been made (compared to before commit `a07c58e` ) to fix a bug reported here: https://github.com/wangcongcong123/ttt/issues/2
# The following describes how this bug is fixed
# the compute_loss function in transformers:TFT5ForConditionalGeneration has already taken care of the loss computation (already averaged!!!!) that failed
# when switching to TPU, hence we re-compute it here using the returned logits from the model ready for backprop instead of using the internally calculated loss
outputs = model(inputs=x_train["source_input_ids"], attention_mask=x_train["source_attention_mask"],
decoder_attention_mask=x_train["target_attention_mask"],
labels=y_train["target_input_ids"], training=True, return_dict=True)
logits = outputs.logits
loss = compute_loss(tf.reshape(y_train["target_input_ids"], (-1, y_train["target_input_ids"].shape[-1])),
tf.reshape(logits, (-1, logits.shape[-1])))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
@tf.function
def distributed_train_step(x_train, y_train):
per_replica_losses = strategy.experimental_run_v2(train_step, args=(x_train, y_train,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
# evaluate
def evaluate(steps, tag="epoch"):
assert tag in ["epoch", "global_step"]
gts = []
preds = []
for x_eval, y_eval in tqdm(eval_dataset, total=eval_steps, desc="evaluating..."):
predictions = model.generate(input_ids=x_eval["source_input_ids"],
attention_mask=x_eval["source_attention_mask"],
max_length=self.args.max_tgt_length)
pred = [tokenizer.decode(ids) for ids in predictions]
gt = [tokenizer.decode(ids) for ids in y_eval["target_input_ids"]]
# labels (not -100 replaced since it is not used to calculate loss here)
preds.extend(pred)
gts.extend(gt)
if self.eval_on == "bleu":
# bleu = 0
bleu = sacrebleu.corpus_bleu(preds, [gts])
eval_score = bleu.score
else:
eval_score = accuracy_score(gts, preds)
self.logger.info(f"val_cls_report: {classification_report(gts, preds, digits=4)}")
if self.use_tb:
self._tb_writer.add_scalar(f"val_{self.eval_on}_{tag}", eval_score, steps)
self.logger.info("\n")
self.logger.info(f"*******eval at {tag} = {steps} on validation dataset*********")
self.logger.info(f"val_{self.eval_on}: {eval_score}")
if self.eval_on == "acc" or self.eval_on == "bleu":
if eval_score >= self.best:
self.wait = 0
self.best = eval_score
self.logger.info(
f"so far the best check point at {tag}={steps} based on eval_on {self.eval_on}")
# self.save_ck(model, steps, tag, best_ck=True)
save_ck(self.args, self.logger, model, tokenizer=tokenizer, steps=steps,
tag=tag, best_ck=False, from_tf=True)
else:
self.wait += 1
else:
raise ValueError("not support yet")
self.logger.info(f"best so far({self.eval_on}): {self.best}")
self.logger.info(f"early stop count: {self.wait}/{self.patience}")
# self.save_ck(model, steps, tag)
save_ck(self.args, self.logger, model, tokenizer=tokenizer, steps=steps,
tag=tag, best_ck=False, from_tf=True)
if self.wait >= self.patience:
self.logger.info("run out of patience, early stop")
if self.use_tb:
self._tb_writer.close()
sys.exit(0)
def update_lr(global_step):
# already tested on tpu, works fine
# global_step is dynamically passed here
if global_step <= self.warmup_steps:
if self.scheduler == "warmuplinear" or self.scheduler == "warmupcostant":
inc = self.lr_to_reach / self.warmup_steps
K.set_value(optimizer.learning_rate, K.eval(optimizer.lr) + inc)
else:
if self.scheduler == "warmuplinear" or self.scheduler == "constantlinear":
dec = self.lr_to_reach / (self.total_steps - self.warmup_steps)
K.set_value(optimizer.learning_rate, K.eval(optimizer.lr) - dec)
# for "constant" scheduler, nothing to do here
global_step = 0
early_exit = False
interval_loss = 0.0
interval_count = 0
for epoch in tqdm(range(self.args.num_epochs_train), desc="epochs"):
self.logger.info(f"start training at epoch = {epoch}")
self.logger.info(f"global train batch size = {global_batch_size}")
self.logger.info(f"using learning rate scheduler: {self.scheduler}")
self.logger.info(
f"num_train_examples: {num_train_examples}, total_steps: {self.total_steps}, steps_per_epoch: {self.steps_per_epoch}")
if self.scheduler != "constant":
self.logger.info(f"warmup_steps:{self.warmup_steps}")
pbar = tqdm(enumerate(train_dist_dataset), total=train_length)
for step, (x_train, y_train) in pbar:
# learning rate scheduler
update_lr(global_step)
loss = distributed_train_step(x_train, y_train)
interval_loss += loss.numpy()
interval_count += 1
global_step += 1
pbar.set_description(f"training - epoch {epoch + 1}/{self.args.num_epochs_train} iter {step}: train loss {loss.numpy():.5f}. lr {optimizer.lr.numpy():e}")
if self.args.log_steps != -1 and global_step % self.args.log_steps == 0:
if self.use_tb:
self._tb_writer.add_scalar("train_loss_global_step", interval_loss / interval_count,
global_step)
self._tb_writer.add_scalar("train_lr_global_step", optimizer.lr.numpy(), global_step)
if self.args.do_eval:
if evaluate_fn is not None and eval_dataset is not None:
eval_dict = evaluate_fn(self.args, self.logger, model, tokenizer, eval_dataset, steps=global_step, tag="global_step", eval_length=eval_steps)
if self._tb_writer:
if "eval_scores" in eval_dict:
for key, value in eval_dict["eval_scores"].items():
self._tb_writer.add_scalar(f"eval_{key}_global_step", value, global_step)
if "is_early_stop" in eval_dict and eval_dict["is_early_stop"]:
self.logger.info(f"run out of patience at global step = {global_step}, early stop")
if self._tb_writer:
self._tb_writer.close()
early_exit = True
break
else:
evaluate(global_step, tag="global_step")
self.logger.info(f"train loss at global_step {global_step}: {interval_loss / interval_count}")
interval_loss = 0.0
interval_count = 0
if early_exit:
break
train_loss = interval_loss / interval_count
interval_loss = 0.0
interval_count = 0
if self.args.log_steps == -1:
if self.args.do_eval:
if evaluate_fn is not None and eval_dataset is not None:
eval_dict = evaluate_fn(self.args, self.logger, model, tokenizer, eval_dataset, steps=epoch + 1, tag="epoch", eval_length=eval_steps)
if self._tb_writer:
if "eval_scores" in eval_dict:
for key, value in eval_dict["eval_scores"].items():
self._tb_writer.add_scalar(f"eval_{key}_epoch", value, epoch + 1)
if "is_early_stop" in eval_dict and eval_dict["is_early_stop"]:
self.logger.info(f"run out of patience at epoch = {epoch + 1}, early stop")
if self._tb_writer:
self._tb_writer.close()
break
else:
evaluate(epoch + 1, tag="epoch")
if self.use_tb:
self._tb_writer.add_scalar("train_loss_epoch", train_loss,
global_step)
self._tb_writer.add_scalar("train_lr_epoch", optimizer.lr.numpy(), global_step)
self.logger.info(f"train loss at end of epoch {epoch + 1}: {train_loss}")
if not self.args.do_eval:
# if do not do evaluate, the checkpoint at the end of epoch needs to be saved
# self.save_ck(model, epoch + 1, tag="epoch")
save_ck(self.args, self.logger, model, tokenizer=tokenizer, steps=epoch + 1,
tag="epoch", best_ck=False, from_tf=True)
if self.use_tb:
self._tb_writer.close()
| 2.515625 | 3 |
jp.atcoder/abc032/abc032_d/8538935.py | kagemeka/atcoder-submissions | 1 | 12771084 | <reponame>kagemeka/atcoder-submissions
# 2019-11-20 22:09:02(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# import re
# import heapq
# import array
# from scipy.misc import comb # (default: exact=False)
# import numpy as np
def main():
n, W, *VW = [int(x) for x in sys.stdin.read().split()]
v, w = VW[0 : n * 2 : 2], VW[1 : n * 2 : 2]
dp = [[0 for _ in range(W + 1)] for _ in range(n + 1)]
for i in range(n):
for j in range(W + 1):
if w[i] > j:
dp[i + 1][j] = dp[i][j]
else:
dp[i + 1][j] = max(dp[i][j], dp[i][j - w[i]] + v[i])
print(dp[n][W])
if __name__ == "__main__":
main()
| 2.28125 | 2 |
src/utils/callbacks.py | Neelesh-Jaiswal/perceptron_implementation | 0 | 12771085 | import tensorflow as tf
import os
import numpy as np
import time
def get_timestamp(name):
timestamp = time.asctime().replace(' ', '_').replace(':', '')
unique_name = f'{name}_at_{timestamp}'
return unique_name
def get_callbacks(config, X_train):
logs = config['logs']
unique_dir_name = get_timestamp('tb_logs')
TENSORBOARD_ROOT_LOG_DIR = os.path.join(logs['logs_dir'], logs[TENSORBOARD_ROOT_LOG_DIR], unique_dir_name)
os.makedirs(TENSORBOARD_ROOT_LOG_DIR, exist_ok=True)
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir=TENSORBOARD_ROOT_LOG_DIR)
file_writer = tf.summary.create_file_writer(log_dir=TENSORBOARD_ROOT_LOG_DIR)
with file_writer.as_default():
images = np.reshape(X_train[10:30], (-1, 28, 28, 1))
tf.summary.image('20 handwritten digit samples', images, max_outputs=25, step=0)
params = config['params']
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=params['patience'],
restore_best_weights=params['restore_best_weights'])
artifacts = config['artifacts']
CKPT_dir = os.path.join(artifacts['artifacts_dir'], artifacts['CHECKPOINT_DIR'])
os.makedirs(CKPT_dir, exist_ok=True)
CKPT_path = os.path.join(CKPT_dir, 'model_ckpt.h5')
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(CKPT_path, save_best_only=True)
return [tensorboard_cb, early_stopping_cb, checkpoint_cb]
| 2.078125 | 2 |
Laplace_experiments/cifar/new_models/__init__.py | Sanaelotfi/Bayesian_model_comparison | 11 | 12771086 | <reponame>Sanaelotfi/Bayesian_model_comparison
from .fixup_resnet_cifar import *
from .resnet_cifar import *
from .cnn_models import * | 0.824219 | 1 |
pyringe/plugins/mod_base.py | Freezind/pyringe | 507 | 12771087 | #! /usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides basic testing modes for the remote debugger."""
import abc
class DebuggingPlugin(object):
"""Superclass for all debugging plugins."""
__metaclass__ = abc.ABCMeta
def __init__(self, inferior, name):
self.name = name
self.position = None
self.inferior = inferior
super(DebuggingPlugin, self).__init__()
@abc.abstractproperty
def commands(self):
return []
| 2.1875 | 2 |
next/database_client/CacheStore/redis_data_structures/matrix-test2.py | sumeetsk/NEXT-1 | 0 | 12771088 | import redismatrix
import numpy
import time
import matplotlib.pyplot as plot
from numpy import *
from numpy.random import *
from redismatrix import *
from matplotlib.font_manager import FontProperties
def main():
file2 = open('T[i][j].txt', 'w')
file3 = open('test.txt', 'w')
n = 100 #
m = 10 #
startC = 0
deltaC = 0
totalC = 0
total = 0
averageR = 0
T = zeros((n, m))
Y = zeros((n, 1))
for i in range(n):
file3.write('\nCreating Matrix %s\n' % i)
print '\nCreating Matrix %s' % i
X = randn(1000, 1000) # Creating random matrix
startC = time.time()
A_i = Matrix(i, 'E', X) # Allocating new matrix 1000*1000 on Redis
deltaC = time.time() - startC # Calculating time for a matrix creation
totalC += deltaC # Calculating time taken for creation of n matrices
file3.write('Time: {}\n'.format(deltaC))
total = 0
for j in range(m):
k = random.random_integers(0,i) # Selecting random matrix
ell = random.random_integers(0,n-1) # Selecting random index
file3.write('\tMatrix %s, row %s\n' % (k, ell))
start = time.time()
A_k = Matrix(k,'E')
A_k = A_k[ell]
t_j = time.time() - start # Calculating time taken to retrieve row
file3.write('Row %s: %s\n' % (ell, A_k))
# print A_k
# print t_j
total += t_j # Calculating total time taken for retrieving all rows
T[i][j] = t_j
file2.write('%s\n' % T[i][j])
Y[i] = (1.0/m)*total # Calculating average time taken to retrieve m rows
averageR += Y[i][0] # Calculating total time taken to retrieve a row for n matrices
file3.write('Time retrieving row from matrix %s: %s\n' % (i, Y[i][0]))
savetxt('T (numpy_array).txt', T)
averageC = totalC/i # Calculating average time taken to create a matrix
averageR /=i # Calculating average time taken to retrieve a row for n matrices
file3.write('\nAverage time creating matrices: %s s\n' % averageC)
file3.write('\nAverage time retrieving rows: %s s\n' % averageR)
file2.close()
file3.close()
A_i.deleteAll() # Deleting all keys from redis
# Making a plot from results
X = loadtxt("T (numpy_array).txt")
X = X.reshape(n,m)
# list = []
file = open('max.txt', 'w')
# for i in range(1000):
# l = X[i]
# # print l
# list.append(max(l))
# print list[i]
# file.write('%s \t%s\n' % (i, list[i]))
# legend = []
# for i in range(1000):
# legend.append('Matrix {}'.format(i))
plot.plot(X, linewidth=2.0, marker='o')
plot.title('Time VS Matrices')
plot.xlabel('Matrices')
plot.ylabel('Time (seconds)')
plot.show()
if __name__ == "__main__": main() | 3.234375 | 3 |
tests/utils.py | FantasqueX/censys-python | 1 | 12771089 | <gh_stars>1-10
import unittest
import responses
from censys.common.base import CensysAPIBase
BASE_URL = "https://search.censys.io/api"
V1_URL = BASE_URL + "/v1"
V2_URL = BASE_URL + "/v2"
class CensysTestCase(unittest.TestCase):
api_id = "test-api-id"
api_secret = "test-api-secret"
api_key = "test-api-key"
cli_args = [
"--api-id",
api_id,
"--api-secret",
api_secret,
]
asm_cli_args = [
"--api-key",
api_key,
]
api: CensysAPIBase
def setUp(self):
self.responses = responses.RequestsMock()
self.responses.start()
self.addCleanup(self.responses.stop)
self.addCleanup(self.responses.reset)
def setUpApi(self, api: CensysAPIBase): # noqa: N802
self.api = api
self.base_url = self.api._api_url
| 2.65625 | 3 |
extra_files/helper.py | aldpdz/ssd_keras | 0 | 12771090 | <reponame>aldpdz/ssd_keras
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import os
from scipy import misc
from imageio import imwrite, imread
def save_images_to(path_from, path_to, list_img_names):
'''
Save images into a path
path_from: path where to load the images
path_to: path where to save the images
list_img_names: list of string with the name's images
'''
for name_img in list_img_names:
# Read image
img = imread(path_from + '/' + name_img)
imwrite(path_to + '/' + name_img, img)
def create_sub_dict(original_dict, list_keys):
'''
Creat a new dictionary from another one
original_dict: original dictionary
list_keys: list of string that containts the keys
return: sub dictionary
'''
new_dict = {}
for key in list_keys:
new_dict[key] = original_dict[key]
return new_dict
def load_images_labels(path, dict_json):
'''
Load images from a directory
path: path to the image directory
dict_json: json file
return: images, labels as numpy arrays
'''
images = []
labels = []
for image in os.listdir(path):
# Read image
img = misc.imread(path + '/' + image)
images.append(img)
# Get data from json file
b_boxes = get_ground_truth(dict_json[image])
# Normilize bounding boxes
b_boxes = normilize_boxes(b_boxes, img.shape[1], img.shape[0])
labels.append(b_boxes)
return np.array(images), np.array(labels)
def normilize_boxes(box_list, width, height):
'''
Convert boxes to float [0,1]
box_list: list of bounding boxes
width: original width
height: original height
'''
normilize_list = []
for box in box_list:
new_box = [box[0] / width, box[1] / height, box[2] / width, box[3] / height]
normilize_list.append(new_box)
return np.array(normilize_list)
def get_ground_truth(list_gt):
'''
Return ground truth bounding boxes
list_gt: dictionary that contains the boxes
return: boxes as numpy arrays
'''
ground_truth_list = []
for shape in list_gt:
value = shape['shape_attributes']
ground_truth = [value['x'], value['y'], value['width'], value['height']]
ground_truth_list.append(ground_truth)
return ground_truth_list
def clean_dict(json_file):
'''
Keep only useful information from the json file
return: clean json file as a dictionary
'''
new_dict = {}
for key, value in json_file.items():
new_dict[value['filename']] = value['regions']
return new_dict
def resize_images(list_image, width, height):
'''
Resize images from a numpy array
list_image: numpy array of images
width: new width
height: new height
return: numpy image
'''
return np.array([misc.imresize(img, size=(width, height)) for img in list_image])
def show_image_bb(img, bounding_boxs):
'''
Show image and its bounding boxes
img: image
bounding_box: list of size 4, x, y, width, height
'''
# Create figure and axes
fig, ax = plt.subplots(1, figsize=(12, 12))
# Display the image
ax.imshow(img)
for item in bounding_boxs:
# Create a Rectangle patch
if len(bounding_boxs[0]) > 4:
rect = patches.Rectangle((item[2], item[3]), item[4], item[5], linewidth=3, edgecolor='r', facecolor='none')
else:
rect = patches.Rectangle((item[0], item[1]), item[2], item[3], linewidth=3, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
def show_image_bb_2(img, bounding_boxs_pred = [], bounding_boxs_gr = [], confidence = False):
'''
Show image and its bounding boxes
img: image
bounding_boxs_pred: bounding boxes from prediction x, y, width, height
bounding_boxs_gr: ground true bounding boxes x, y, width, height, confidence
'''
# Create figure and axes
fig, ax = plt.subplots(1, figsize=(12, 12))
# Display the image
ax.imshow(img)
if len(bounding_boxs_pred) > 0:
for item in bounding_boxs_pred:
# Create a Rectangle patch
rect = patches.Rectangle((item[0], item[1]), item[2], item[3], linewidth=3, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
if confidence:
ax.text(item[0], item[1], round(item[4], 4), size='x-large', bbox=dict(facecolor='r', alpha=1))
if len(bounding_boxs_gr) > 0:
for item in bounding_boxs_gr:
# Create a Rectangle patch
rect = patches.Rectangle((item[0], item[1]), item[2], item[3], linewidth=3, edgecolor='b', facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
def normilize_to_pixel(list_elements, width, height):
'''
Convert float value to pixel
list_elements: list of elements that contains the bounding boxes
width: new width
height: new height
'''
new_list = []
for item in list_elements:
normilize_list = []
for box in item:
x = np.ceil(box[0] * width)
y = np.ceil(box[1] * height)
w = np.ceil(box[2] * width)
h = np.ceil(box[3] * height)
new_box = [x, y, w, h]
normilize_list.append(new_box)
new_list.append(normilize_list)
return new_list
def get_batch(batch_size, features):
"""
Send batch
:param batch_size, tamaño del batch
:param features: lista de imagenes
:yield: batches de features
"""
for start_i in range(0, len(features), batch_size):
end_i = start_i + batch_size
yield features[start_i:end_i]
def clean_predictions(pred, id_class=15):
'''
Keep just prediction with id 15 (person)
pred: list of predictions
'''
new_pred = []
for p in pred:
box = []
for item in p:
if item[0] == id_class:
box.append(item)
new_pred.append(box)
return np.array(new_pred)
def adjust_predictions(pred):
'''
Ajust predictions
pred: list of predictions
'''
new_pred = []
for i in pred:
item = []
for box in i:
# Convert corners to width and height
b = [box[0], box[1], box[2], box[3], box[4] - box[2], box[5] - box[3]]
b = np.array(b)
item.append(b)
new_pred.append(item)
return np.array(new_pred)
def bb_intersection_over_union(boxA, boxB):
'''
Calculate Intersection over union
boxA: first bounding box
boxB: second bounding box
return: iou
'''
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2] + boxA[0], boxB[2] + boxB[0])
yB = min(boxA[3] + boxA[1], boxB[3] + boxB[1])
# compute the area of intersection rectangle
interArea = max(0, xB - xA) * max(0, yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = boxA[2] * boxA[3]
boxBArea = boxB[2] * boxB[3]
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground truth areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_coordinates(list_items):
'''
Get just the coordinates from the list
list_items: numpy that contains the predicted bounding boxes
return: list with just the coordinates of the bounding boxes
'''
new_list = []
for item in list_items:
new_item = []
for box in item:
new_item.append([box[2], box[3], box[4], box[5]])
new_list.append(new_item)
return new_list
def best_match(square, list_two):
'''
Finds the best match by itersection over union
square: item to compare with all items in list_two
list_two: list of bounding boxes
return: best iou
'''
max_iou = 0
for bb in list_two:
iou = bb_intersection_over_union(square, bb)
if iou > max_iou:
max_iou = iou
return max_iou
def cal_precision(to_eval, to_compare, iou):
'''
Calculate the precision
to_eval: cal precision for this item
to_compare: item to compare with
return: precision
'''
sum_iou = 0
number_detection = 0
# Iter over to_eval list
for index_pred in range(len(to_eval)):
# Number of detections on the item
number_detection += len(to_eval[index_pred])
# Iter each bounding box
for item_to_eval in to_eval[index_pred]:
best_iou = best_match(item_to_eval, to_compare[index_pred])
if iou != None:
if best_iou > iou:
best_iou = 1
else:
best_iou = 0
sum_iou += best_iou
if number_detection == 0:
return 0.0
return sum_iou / number_detection
def cal_performance(ground_t, pred, verborse=True, iou=None):
'''
Calculate presicion, recall and F1 score
ground_t: Bounding boxes of ground_t
pred: Bounding boxes of prediction
return: Presicion, Recall and F1 score
'''
presicion = cal_precision(pred, ground_t, iou)
recall = cal_precision(ground_t, pred, iou)
if (presicion + recall) > 0:
f1_score = (2 * presicion * recall) / (presicion + recall)
else:
f1_score = 0.0
if(verborse):
print('Number of images:', len(ground_t))
print('Presicion:', round(presicion, 4))
print('Recall:', round(recall, 4))
print('F1 score:', round(f1_score, 4))
return presicion, recall, f1_score
| 2.625 | 3 |
apps/home.py | nikhil-96/Hawkeye_Viz | 1 | 12771091 | from dash import html
import dash_bootstrap_components as dbc
import pandas as pd
import json
# Reading accidents, casualty and vehicles data from last 5 years
dfa = pd.read_csv('data/dft-road-casualty-statistics-accident-last-5-years.csv', low_memory=False)
dfc = pd.read_csv('data/dft-road-casualty-statistics-casualty-last-5-years.csv', low_memory=False)
dfv = pd.read_csv('data/dft-road-casualty-statistics-vehicle-last-5-years.csv', low_memory=False)
# Reading Road safety Data guide
road_guide = pd.read_excel('data/Road-Safety-Open-Dataset-Data-Guide.xlsx')
# Loading UK districts geojson to draw choropleth map
uk_cities = json.load(open("data/uk_districts.geojson", "r"))
layout = html.Div([
dbc.Container([
dbc.Row([
dbc.Col(html.H2("Welcome to the Hawkeye Visualization tool of Group 45", className="text-center")
, className="mb-5 mt-5")
]),
dbc.Row([
dbc.Col(html.H5(children='A dataset about the Road accidents in UK in last 5 years has been analysed in this web tool. The link for the dataset you can find below.'
)
, className="mb-4")
]),
dbc.Row([
dbc.Col(html.H5(children='The tool consists of two main pages: Home tab '
'which is an Introduction page to the Group 45 visualization tool and '
'Explore tab, which gives the oppurtunity to explore the dataset and '
'find interesting patterns')
, className="mb-5")
]),
dbc.Row([
dbc.Col(dbc.Card(children=[html.H3(children='Get the original datasets used in this project',
className="text-center"),
dbc.Button("UK-Road-Safety",
href="https://data.gov.uk/dataset/cb7ae6f0-4be6-4935-9277-47e5ce24a11f/road-safety-data",
color="primary",
target="_blank",
className="mt-3")
],
body=True, color="dark", outline=True)
, width=6, className="mb-4"),
dbc.Col(dbc.Card(children=[html.H3(children='You can find the code for this project in',
className="text-center"),
dbc.Button("GitHub",
href="https://github.com/nikhil-96/Hawkeye_Viz",
color="primary",
target="_blank",
className="mt-3"),
],
body=True, color="dark", outline=True)
, width=6, className="mb-4")
], className="mb-5")
]),
dbc.Navbar(
dbc.Container(
[
html.A([
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(html.H6(children='Created By: '), className="mt-3", width=2),
dbc.Col(html.H6(children='<NAME>'), className="mt-3", width=2),
dbc.Col(html.H6(children='<NAME>'), className="mt-3", width=2),
dbc.Col(html.H6(children='<NAME>'), className="mt-3", width=2),
dbc.Col(html.H6(children='<NAME>'), className="mt-3", width=2)
],
align="center",
no_gutters=True,
),
dbc.Row(
[
dbc.Col(html.H6(children='Developed at Eindhoven University of Technology TU/e'), className="mt-3")
],
align="center",
no_gutters=True,
)
])
]
),
color="dark",
dark=True,
className="mb-4",
)
])
| 3.0625 | 3 |
app/models/cart.py | ea137/mini-amazon-skeleton | 0 | 12771092 | from flask import current_app as app
from .cart_product import Cart_Product
class Cart:
def __init__(self, id, pid, quantity):
self.id = id
self.pid = pid
self.quantity = quantity
@staticmethod
def get(id):
try:
rows = app.db.execute('''
SELECT p.id, c.seller_id, CONCAT(u.firstname, u.lastname), p.name, CAST((c.quantity * i.price) AS numeric(36, 2)), c.quantity
FROM Cart c, Products p, Inventory i, Users u
WHERE c.id = :id AND c.pid = p.id AND p.id = i.product_id AND c.seller_id = i.seller_id AND c.seller_id = u.id;
''',
id=id)
print([Cart_Product(*row) for row in rows])
#eturn [Product(*row) for row in rows]
return [Cart_Product(*row) for row in rows] #if rows is not None else None
except Exception as e:
print(e)
return None
@staticmethod
def get_all():
rows = app.db.execute('''
SELECT *
FROM Cart
''')
return [Cart(*row) for row in rows]
@staticmethod
def checkIfProductInCart(id, uid, sid):
try:
rows = app.db.execute('''
SELECT *
FROM Cart
WHERE id = :uid AND pid = :pid AND seller_id = :sid
''',
uid = uid,
pid = id,
sid = sid
)
if len(rows) == 0:
return False
else:
return True
except Exception as e:
print(e)
return False
@staticmethod
def update_product(id, uid, sid, state):
try:
rows = app.db.execute(
'''
SELECT quantity
FROM Cart
WHERE id = :uid AND pid = :pid AND seller_id = :sid
''',
uid = uid,
pid = id,
sid = sid
)
curr_quantity = int(rows[0][0])
if state == "add":
new_quantity = curr_quantity + 1
else:
new_quantity = curr_quantity - 1
if new_quantity == 0:
app.db.execute('''
DELETE FROM Cart WHERE pid = :pid AND id = :uid AND seller_id = :sid
''',
uid = uid,
pid = id,
sid = sid
)
else:
app.db.execute(
'''
UPDATE Cart
SET quantity = :new_quantity
WHERE id = :uid AND pid = :pid AND seller_id= :sid
''',
new_quantity = new_quantity,
uid = uid,
pid = id,
sid = sid
)
except Exception as e:
print(e)
@staticmethod
def add_product(id, uid, sid):
#quantity = request.args.get("quantity")
try:
app.db.execute(
'''
INSERT INTO Cart VALUES(:uid, :id, :sid, 1)
''',
id = id,
uid = uid,
sid = sid
)
except Exception as e:
print(e)
@staticmethod
def remove_product(pid, uid, seller_id):
try:
app.db.execute(
'''
DELETE FROM Cart WHERE pid = :pid AND id = :uid AND seller_id = :sid
''',
uid = uid,
pid = pid,
sid = seller_id
)
except Exception as e:
print(e)
@staticmethod
def save_for_later(uid, pid, seller_id, quantity):
try:
rows = app.db.execute(
'''
DELETE FROM Cart c WHERE c.pid = :pid AND c.id = :uid AND c.seller_id = :sid
RETURNING c.quantity
''',
uid = uid,
pid = pid,
sid = seller_id
)
app.db.execute(
'''
INSERT INTO Saved_Cart VALUES(:id, :pid, :sid, :quantity)
''',
id = uid,
pid = pid,
sid = seller_id,
quantity = quantity
)
except Exception as e:
print(e)
@staticmethod
def get_saved(uid):
try:
rows = app.db.execute('''
SELECT p.id, s.seller_id, CONCAT(u.firstname, u.lastname), p.name, CAST((s.quantity * i.price) AS numeric(36, 2)), s.quantity
FROM Saved_Cart s, Products p, Inventory i, Users u
WHERE s.id = :id AND s.pid = p.id AND p.id = i.product_id AND s.seller_id = i.seller_id AND s.seller_id = u.id;
''',
id=uid)
print([Cart_Product(*row) for row in rows])
#eturn [Product(*row) for row in rows]
return [Cart_Product(*row) for row in rows] #if rows is not None else None
except Exception as e:
print(e)
return None
@staticmethod
def get_total(uid):
try:
rows = app.db.execute(
'''
SELECT CAST(SUM(c.quantity * i.price) as numeric(36,2)) AS total
FROM Cart c, Inventory i
WHERE c.pid = i.product_id AND c.id = :uid AND c.seller_id = i.seller_id
''',
uid = uid
)
return rows[0][0]
except Exception as e:
print(e)
@staticmethod
def remove_from_saved(uid, pid, sid):
try:
app.db.execute(
'''
DELETE FROM Saved_Cart WHERE pid = :pid AND id = :uid AND seller_id = :sid
''',
uid = uid,
pid = pid,
sid = sid
)
except Exception as e:
print(e)
@staticmethod
def move_to_cart(uid, pid, sid, quantity):
try:
rows = app.db.execute(
'''
DELETE FROM Saved_Cart WHERE pid = :pid AND id = :uid AND seller_id = :sid
RETURNING pid
''',
uid = uid,
pid = pid,
sid = sid
)
pid = rows[0][0]
app.db.execute(
'''
INSERT INTO Cart VALUES(:id, :pid, :sid, :quantity)
''',
id = uid,
pid = pid,
sid = sid,
quantity = quantity
)
except Exception as e:
print(e)
| 2.90625 | 3 |
Day 17/main.py | anti-batman/100-Days-of-Code | 72 | 12771093 | <reponame>anti-batman/100-Days-of-Code
from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
# Loading the question & answers
question_text = question["question"]
question_answer = question["correct_answer"]
# Creating the questions object
new_question = Question(question_text, question_answer)
# Append the question to the list
question_bank.append(new_question)
quiz = QuizBrain(question_bank)
while quiz.still_has_questions():
quiz.next_question()
print("Congrats! You have completed the quiz.")
print(f"You Final score is: {quiz.score}/{quiz.question_number}. ") | 3.296875 | 3 |
cloudberry-py/cloudberry/api/query.py | olliekrk/cloud-berry | 0 | 12771094 | <reponame>olliekrk/cloud-berry<filename>cloudberry-py/cloudberry/api/query.py
import requests
from .backend import CloudberryConfig, CloudberryApi
from .model import DataSeries
class Query(CloudberryApi):
def __init__(self, config: CloudberryConfig) -> None:
super().__init__(config)
self.base_url = f'{config.base_url()}/flux'
def query_series(self, raw_query: str) -> DataSeries:
r = requests.post(f'{self.base_url}/querySeries', data=raw_query)
return DataSeries.from_json(r.json())
| 2.359375 | 2 |
tools/timestamper/timestamper.py | NatLibFi/Finto-data | 14 | 12771095 | <gh_stars>10-100
#!/usr/bin/env python
# Utility to generate dct:created and dct:modified timestamps (xsd:date) for
# SKOS concepts.
# The idea is that for each concept, a hash (MD5) is calculated based on the
# triples where that concept is either the subject or object. Whenever the
# triples change, the hash will be different. A separate TSV data file of
# earlier hashes and timestamps is maintained.
# First run
# If the tool is run with an empty or nonexistent timestamp file, no
# timestamps are added but hashes are stored in the timestamp file for later
# reference. The idea is that without preexisting timestamp information, we
# cannot know anything about how long the concepts have existed and when
# they were last modified. However, the next time a concept changes, the
# hash will be different and thus a modified timestamp is added and
# thereafter maintained whenever the concept changes.
# When a timestamp file exists and a new concept is encountered, it will be
# given a modified timestamp. It will also be given a created timestamp,
# unless it already has one in the data. Existing created timestamps will
# not be touched.
# The timestamp to give can be passed on the command line as the last
# parameter. If not given, today's date will be used as the default.
# input is Turtle syntax on stdin
# output is Turtle syntax on stdout
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD
import sys
import datetime
import hashlib
import os.path
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
DCT = Namespace('http://purl.org/dc/terms/')
if len(sys.argv) != 2 and len(sys.argv) != 3:
print >>sys.stderr, "Usage: %s <timestampfile> [date]" % sys.argv[0]
sys.exit(1)
tsfile = sys.argv[1]
if len(sys.argv) > 2:
timestamp = sys.argv[2]
else:
timestamp = datetime.date.today().isoformat()
# load existing timestamps
old_timestamps = {}
if os.path.exists(tsfile):
with open(tsfile, 'r') as f:
for line in f:
tsdata = line.strip().split()
if len(tsdata) > 3:
# new style timestamp file, both mtime and ctime
uri, hash, mtime, ctime = tsdata
else:
# old style timestamp file, only mtime
uri, hash, mtime = tsdata
ctime = '-'
old_timestamps[URIRef(uri)] = (hash, mtime, ctime)
# load SKOS file
g = Graph()
g.load(sys.stdin, format='turtle')
# calculate hashes and timestamps for concepts
def concept_hash(concept):
cgraph = Graph()
for triple in g.triples((concept, None, None)):
cgraph.add(triple)
for triple in g.triples((None, None, concept)):
cgraph.add(triple)
nt = cgraph.serialize(destination=None, format='nt')
sorted_nt = ''.join(sorted(nt.splitlines(True)))
md5 = hashlib.md5()
md5.update(sorted_nt)
return md5.hexdigest()
new_timestamps = {}
for concept in g.subjects(RDF.type, SKOS.Concept):
hash = concept_hash(concept)
if concept in old_timestamps:
old_hash, old_mtime, old_ctime = old_timestamps[concept]
if old_hash == hash:
# hash is the same, maintain old timestamp
new_timestamps[concept] = (old_hash, old_mtime, old_ctime)
continue
else:
# hash has changed, update timestamp
new_timestamps[concept] = (hash, timestamp, old_ctime)
else:
if len(old_timestamps) == 0:
# first run: we don't know anything about history - no timestamps
new_timestamps[concept] = (hash, '-', '-')
else:
# the concept is new, update timestamps
# check whether the concept already has a created timestamp
graph_ctime = g.value(concept, DCT.created, None)
if graph_ctime is not None:
# there is already a created timestamp in the graph
# don't try to override it here
ctime = '-'
else:
# no created timestamp in the graph
# we will set the current timestamp as the ctime
ctime = timestamp
new_timestamps[concept] = (hash, timestamp, ctime)
# Add the new timestamps to the graph
for concept, cdata in new_timestamps.items():
hash, mtime, ctime = cdata
if mtime != '-':
g.add((concept, DCT.modified, Literal(mtime, datatype=XSD.date)))
if ctime != '-':
g.add((concept, DCT.created, Literal(ctime, datatype=XSD.date)))
# Copy old timestamps that were not in the vocabulary, just in case
# so we don't lose data if the vocabulary is temporarily missing concepts
for concept in old_timestamps:
if concept not in new_timestamps:
new_timestamps[concept] = old_timestamps[concept]
# store the new timestamps in the timestamp data file
with open(tsfile, 'w') as f:
for concept, cdata in sorted(new_timestamps.items()):
hash, mtime, ctime = cdata
print >>f, "\t".join((concept, hash, mtime, ctime))
g.serialize(destination=sys.stdout, format='turtle')
| 2.640625 | 3 |
certificate_mailer/__init__.py | GauravPatel89/EPAi2_capstone | 0 | 12771096 | from .core import *
from .helpers import *
from .mailer_utils import * | 1.039063 | 1 |
webtable_recognition/evaluator.py | jonashering/webtable-recognition | 0 | 12771097 | <gh_stars>0
from sklearn.metrics import classification_report
def evaluation_report(Y_true, Y_pred, classes=None):
print(classification_report(Y_true, Y_pred, target_names=classes))
| 2.078125 | 2 |
examples/volumetric/mesh2volume.py | danielhrisca/vedo | 0 | 12771098 | <filename>examples/volumetric/mesh2volume.py
"""Left: build a volume (grey) from a mesh where the
foreground voxels are 1 and the background voxels are 0.
Right: the Volume is isosurfaced.
"""
from vedo import *
s = load(datadir+"bunny.obj").normalize().wireframe()
v = mesh2Volume(s, spacing=(0.02, 0.02, 0.02)).alpha([0,0.5]).c('blue')
iso = v.isosurface().color("b")
show(v, s.scale(1.05), __doc__, at=0, N=2)
show(iso, at=1, interactive=1)
| 2.96875 | 3 |
chapter01/common.py | Alchemy2011/spider | 0 | 12771099 | <gh_stars>0
# -*- coding: utf-8 -*-
import urllib2
import urlparse
def download1(url):
"""Simple downloader"""
return urllib2.urlopen(url).read()
def download2(url):
"""Download function that catches errors"""
print 'Downloading:', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
return html
def download3(url, num_retries=2):
"""Download function that also retries 5XX errors"""
print 'Downloading:', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download3(url, num_retries-1)
return html
def download4(url, user_agent='<PASSWORD>', num_retries=2):
"""Download function that includes user agent support"""
# wswp: web scraping with python
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download4(url, user_agent, num_retries-1)
return html
def download5(url, user_agent='<PASSWORD>', proxy=None, num_retries=2):
"""Download function with support for proxies"""
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
opener = urllib2.build_opener()
if proxy:
# 代理,处理器这个地方第一次见,不太懂,找个时间读读文档
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
html = opener.open(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download5(url, user_agent, proxy, num_retries-1)
return html
# 这一行的这种写法,很不错,收了,能保证不改变对外的接口。
download = download5
if __name__ == '__main__':
print download('http://127.0.0.1:8000/places/default')
| 3.390625 | 3 |
metrics/log_collectors/training_data_service_client/tail_to_tds.py | just4jc/FfDL | 0 | 12771100 | <gh_stars>0
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import sys
from log_collectors.training_data_service_client import extract_datetime as extract_datetime
from . import push_log_line
from . import connect
def collect_and_send(log_file: str, should_connect: bool=True):
if should_connect:
try:
print("Trying to connect to Training Data Service (log lines)")
sys.stdout.flush()
tdClient = connect.get_connection()
if tdClient is not None:
print("Have connection to Training Data Service (log lines)")
sys.stdout.flush()
except Exception as inst:
print("Unexpected error when attempting to process evaluation metric record (log lines):",
sys.exc_info()[0])
print(inst)
sys.stdout.flush()
log_line_index = 1
while not os.path.exists(log_file):
time.sleep(1)
logfile_year = extract_datetime.get_log_created_year(log_file)
# TODO: Keep file_pos stored in file, in case of this container's restart
file_pos = 0
while True:
with open(log_file, 'r') as em_stream:
try:
em_stream.seek(file_pos)
for line in iter(em_stream):
log_line_index = push_log_line.push (tdClient, line, logfile_year, log_line_index)
except Exception as inst:
print("Unexpected error:", str(inst))
sys.stdout.flush()
file_pos = em_stream.tell() | 2.203125 | 2 |
Setup.py | prbpedro/simple_peewee_flask_webapi | 0 | 12771101 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="simple_peewee_flask_webapi",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Simple peewee Flask WEB-API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/prbpedro/simple_peewee_flask_webapi",
install_requires=[
"Flask==1.1.1",
"peewee==3.10.0"],
packages=setuptools.find_packages(exclude=("tests",)),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 1.453125 | 1 |
backend/api/views/tips.py | hack4impact-uiuc/c2tc-fall-2018 | 7 | 12771102 | <reponame>hack4impact-uiuc/c2tc-fall-2018<gh_stars>1-10
from flask import Blueprint, request, jsonify
from api.models.Tips import Tips
from api.models.User import User
from api.core import (
create_response,
serialize_list,
logger,
authenticated_route,
necessary_post_params,
can_be_authenticated,
)
from datetime import datetime
import functools
from bson.objectid import ObjectId
import json
from geopy import distance
from api.constants import UPVOTE, DOWNVOTE
tips = Blueprint("tips", __name__)
@tips.route("/tips", methods=["GET"])
def get_all_tips():
latitude = request.args.get("lat")
longitude = request.args.get("long")
if latitude is None or longitude is None:
response = [tips.to_mongo() for tips in Tips.objects]
else:
response = [
tips.to_mongo()
for tips in Tips.objects
if distance.distance(
(tips.latitude, tips.longitude), (latitude, longitude)
).miles
<= 0.1
]
response = {"tips": response}
return create_response(data=response)
@tips.route("/tips/<id>", methods=["GET"])
def get_tip(id):
response = Tips.objects.get(id=id).to_mongo()
return create_response(data=dict(response))
@tips.route("/user/tips", methods=["GET"])
@authenticated_route
def get_tips_by_user(user_db):
posted_tips = (user_db.to_mongo())["posted_tips"]
posted_tips_list = [
Tips.objects.get(id=str(tips)).to_mongo() for tips in posted_tips
]
return create_response(data={"tips": posted_tips_list})
@tips.route("/tips_category/<category>", methods=["GET"])
def get_tips_by_category(category):
response = [tips.to_mongo() for tips in Tips.objects if tips.category == category]
response = {"tips": response}
return create_response(data=response)
@tips.route("/tips_upvotes/<tips_id>", methods=["GET"])
def get_tip_upvotes(tips_id):
tip = Tips.objects.get(id=tips_id)
tips_upvotes = (tip.to_mongo())["upvotes"]
tips_upvotes_list = [
User.objects.get(id=str(user)).to_mongo() for user in tips_upvotes
]
response = {"users": tips_upvotes_list}
return create_response(data=response)
@tips.route("/tips_downvotes/<tips_id>", methods=["GET"])
def get_tip_downvotes(tips_id):
"""
GET function for retrieving all User objects that have downvoted a tip
"""
tip = Tips.objects.get(id=tips_id)
tips_downvotes = (tip.to_mongo())["downvotes"]
tips_downvotes_list = [
User.objects.get(id=str(user)).to_mongo() for user in tips_downvotes
]
response = {"users": tips_downvotes_list}
return create_response(data=response)
@tips.route("/tips/verified", methods=["GET"])
@can_be_authenticated
def get_verified_tips(user_db):
if user_db is None:
response = [tip.to_mongo() for tip in Tips.objects if tip.status == "verified"]
else:
user_id = user_db.id
response = [
tip.to_mongo()
for tip in Tips.objects
if tip.status == "verified" and str(user_id) == str(tip.author)
]
response = {"verified_tips": response}
return create_response(data=response)
@tips.route("/tips/pending", methods=["GET"])
@can_be_authenticated
def get_pending_tips(user_db):
if user_db is None:
response = [tip.to_mongo() for tip in Tips.objects if tip.status == "pending"]
else:
user_id = user_db.id
response = [
tip.to_mongo()
for tip in Tips.objects
if tip.status == "pending" and str(user_id) == str(tip.author)
]
response = {"pending_tips": response}
return create_response(data=response)
@tips.route("/tips/denied", methods=["GET"])
@can_be_authenticated
def get_denied_tips(user_db):
if user_db is None:
response = [tip.to_mongo() for tip in Tips.objects if tip.status == "denied"]
else:
user_id = user_db.id
response = [
tip.to_mongo()
for tip in Tips.objects
if tip.status == "denied" and str(user_id) == str(tip.author)
]
response = {"denied_tips": response}
return create_response(data=response)
@tips.route("/tips", methods=["POST"])
@authenticated_route
@necessary_post_params("title", "content", "latitude", "longitude", "category")
def create_tip(db_user):
data = request.get_json()
tips = Tips.objects.create(
title=data["title"],
content=data["content"],
author=db_user.id,
posted_time=datetime.now(),
status="pending",
latitude=data["latitude"],
longitude=data["longitude"],
category=data["category"],
)
tips.save()
posted_tips = (db_user.to_mongo())["posted_tips"]
posted_tips.append(ObjectId(tips.id))
db_user.update(posted_tips=posted_tips)
return create_response(message="success!")
@tips.route("/tips/<tips_id>", methods=["PUT"])
def edit_tip(tips_id):
"""
PUT function for editing Tips objects
"""
data = request.get_json()
tip = Tips.objects.get(id=tips_id)
if "title" in data:
tip.title = data["title"]
if "content" in data:
tip.content = data["content"]
if "status" in data:
tip.status = data["status"]
if "latitude" in data:
tip.latitude = data["latitude"]
if "longitude" in data:
tip.longitude = data["longitude"]
if "category" in data:
tip.category = data["category"]
tip.save()
return create_response(message="success!")
@tips.route("/tips/<id>/status", methods=["PUT"])
def update_status(id):
"""
PUT function for changing the tip's status
"""
data = request.get_json()
tip = Tips.objects.get(id=id)
if (
data["status"] != "verified"
and data["status"] != "pending"
and data["status"] != "denied"
):
return create_response(message="Please enter a valid status")
tip.update(status=data["status"])
return create_response(message="success!")
@tips.route("/tips_votes", methods=["PUT"])
@authenticated_route
def change_vote(user_db):
"""
PUT function for changing a user's upvote or downvote
"""
data = request.get_json()
data["user_id"] = user_db.id
tip = Tips.objects.get(id=data["tips_id"])
if data["vote_type"] == UPVOTE:
if ObjectId(data["user_id"]) in tip.to_mongo()["upvotes"]:
tip.update(pull__upvotes=ObjectId(data["user_id"]))
else:
tip_upvotes = tip.to_mongo()["upvotes"]
tip_upvotes.append(ObjectId(data["user_id"]))
tip.update(upvotes=tip_upvotes)
if ObjectId(data["user_id"]) in tip.to_mongo()["downvotes"]:
tip.update(pull__downvotes=ObjectId(data["user_id"]))
if data["vote_type"] == DOWNVOTE:
if ObjectId(data["user_id"]) in tip.to_mongo()["downvotes"]:
tip.update(pull__downvotes=ObjectId(data["user_id"]))
else:
tip_downvotes = tip.to_mongo()["downvotes"]
tip_downvotes.append(ObjectId(data["user_id"]))
tip.update(downvotes=tip_downvotes)
if ObjectId(data["user_id"]) in tip.to_mongo()["upvotes"]:
tip.update(pull__upvotes=ObjectId(data["user_id"]))
return create_response(message="success!")
@tips.route("/tips/<tips_id>", methods=["DELETE"])
def delete_tips_by_id(tips_id):
"""
DELETE function for deleting a tips object by id
"""
for tips in Tips.objects:
if tips_id == str(tips.id):
user = User.objects.get(id=str(tips.author))
posted_tips = (user.to_mongo())["posted_tips"]
posted_tips.remove(ObjectId(tips.id))
user.update(posted_tips=posted_tips)
tips.delete()
return create_response(message="success!")
return create_response(message="Tip not found")
@tips.route("/tips", methods=["DELETE"])
def clear_tips():
"""
DELETE method which wraps the clear tips function as
an API endpoint.
"""
try:
count = delete_tips_collection()
return create_response(
status=200, message="Success! Deleted " + str(count) + " records."
)
except Exception as e:
return create_response(
status=500, message="Could not clear collection: " + repr(e)
)
def delete_tips_collection():
"""
Helper function to delete tips collection in db.
"""
result = Tips.objects().delete()
return result
| 2.578125 | 3 |
AutoDL_sample_code_submission/at_toolkit/interface/adl_classifier.py | dianjixz/AutoDL | 1,044 | 12771103 | <reponame>dianjixz/AutoDL
import numpy as np
class AdlClassifier(object):
def init(self, class_num: int, init_params: dict):
self.class_num = class_num
self.label_map = list()
self.clf_name = None
raise NotImplementedError
def fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
def predict_proba(self, test_examples: np.ndarray, predict_prob_params: dict) -> np.ndarray:
raise NotImplementedError
def rebuild_prob_res(self, input_label_list, orig_prob_array):
new_prob_arary = np.zeros((orig_prob_array.shape[0], self.class_num))
for i, cls in enumerate(input_label_list):
new_prob_arary[:, cls] = orig_prob_array[:, i]
empty_cls_list = list()
for i in range(self.class_num):
if i not in input_label_list:
empty_cls_list.append(i)
for sample_i in range(orig_prob_array.shape[0]):
np_median_value = np.median(new_prob_arary[sample_i])
for empty_cls in empty_cls_list:
new_prob_arary[sample_i][empty_cls] = np_median_value
return new_prob_arary
class AdlOfflineClassifier(AdlClassifier):
def offline_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
class AdlOnlineClassifier(AdlClassifier):
def online_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
| 2.46875 | 2 |
reportPupilEngagement.py | dhicks6345789/data-tools | 0 | 12771104 | #!/usr/bin/python
import os
import io
import sys
import json
import shutil
import pandas
import dataLib
import datetime
# PIL - the Python Image Library, used for bitmap image manipulation.
import PIL
import PIL.ImageFont
import PIL.ImageDraw
# ReportLab - used for PDF document generation.
import reportlab.lib.units
import reportlab.lib.utils
import reportlab.lib.colors
import reportlab.pdfgen.canvas
import reportlab.lib.pagesizes
import reportlab.graphics.renderPM
def intToConstrainedPercentage(theValue, theMin, theMax):
result = theValue
if result < theMin:
result = theMin
if result > theMax:
result = theMax
return (result - theMin) / (theMax - theMin)
def roundDatetime(theDate):
return theDate.replace(hour=0, minute=0, second=0, microsecond=0)
def dateToWorkingDaysAgo(theDate):
if theDate == "Never":
return "Never"
daysAgo = 0
today = roundDatetime(datetime.datetime.now())
currentDate = roundDatetime(datetime.datetime.strptime(theDate, "%Y-%m-%dT%H:%M:%S.%fZ"))
while currentDate < today:
if not currentDate.isoweekday() in [6, 7]:
daysAgo = daysAgo + 1
currentDate = currentDate + datetime.timedelta(days=1)
return daysAgo
def parseDate(theDate):
if theDate == "Never":
return "Never"
return datetime.datetime.strptime(theDate, "%Y-%m-%dT%H:%M:%S.%fZ")
# Load the config file (set by the system administrator).
config = dataLib.loadConfig(["dataFolder"])
# Make sure the output folder exists.
reportsRoot = config["dataFolder"] + os.sep + "Reports"
outputRoot = reportsRoot + os.sep + "Pupil Engagement"
historyRoot = outputRoot + os.sep + "History"
os.makedirs(historyRoot, exist_ok=True)
pupils = pandas.read_csv(config["dataFolder"] + os.sep + "pupils.csv", header=0)
activity = pandas.read_csv(config["dataFolder"] + os.sep + "Reports" + os.sep + "userActivity.csv", header=0)
columnPos = {"Name":0,"Username":70,"Year":105,"Login":None,"Classroom":None,"Last Active(Working Days)":125}
columnNames = columnPos.keys()
report = pandas.DataFrame(columns=columnNames)
yearGroups = {}
for pupilsIndex, pupilsValues in pupils.iterrows():
yearGroups[dataLib.yearCohortToGroup(pupilsValues["YearGroup"])] = 1
reportIndex = 0
todaysDate = datetime.datetime.now()
todaysDateString = todaysDate.strftime("%d-%m-%Y")
reportTitle = "report-" + todaysDateString + ".csv"
mostRecentDate = datetime.datetime(2000, 1, 1)
print("Processing data by year group...")
for yearGroup in yearGroups.keys():
print("Processing " + yearGroup + "...")
for pupilsIndex, pupilsValues in pupils.iterrows():
if dataLib.yearCohortToGroup(pupilsValues["YearGroup"]) == yearGroup:
for activityIndex, activityValues in activity.iterrows():
username = ""
if activityValues["email"] == pupilsValues["Username"] + "@knightsbridgeschool.com":
username = pupilsValues["Username"]
altUsername = pupilsValues["OldUsername"]
elif activityValues["email"] == pupilsValues["OldUsername"] + "@knightsbridgeschool.com":
username = pupilsValues["OldUsername"]
altUsername = pupilsValues["Username"]
if not username == "":
indexToUse = reportIndex
usernameList = report["Username"].tolist()
if altUsername in usernameList:
altUsernameIndex = usernameList.index(altUsername)
if report.at[altUsernameIndex, "Login"] == "Never":
indexToUse = altUsernameIndex
else:
reportIndex = reportIndex + 1
report.at[indexToUse, "Name"] = pupilsValues["GivenName"] + " " + pupilsValues["FamilyName"]
report.at[indexToUse, "Username"] = username
report.at[indexToUse, "Year"] = dataLib.yearCohortToGroup(yearGroup)
report.at[indexToUse, "Login"] = activityValues["accounts:last_login_time"]
report.at[indexToUse, "Classroom"] = activityValues["classroom:last_interaction_time"]
lastLogin = parseDate(activityValues["accounts:last_login_time"])
if (not lastLogin == "Never") and lastLogin > mostRecentDate:
mostRecentDate = lastLogin
lastLoginDays = dateToWorkingDaysAgo(activityValues["accounts:last_login_time"])
lastClassroom = parseDate(activityValues["classroom:last_interaction_time"])
if (not lastClassroom == "Never") and lastClassroom > mostRecentDate:
mostRecentDate = lastClassroom
lastClassroomDays = dateToWorkingDaysAgo(activityValues["classroom:last_interaction_time"])
if lastLogin == "Never":
lastActive = lastClassroom
lastActiveDays = lastClassroomDays
elif lastClassroom == "Never":
lastActive = lastLogin
lastActiveDays = lastLoginDays
elif lastClassroom > lastLogin:
lastActive = lastClassroom
lastActiveDays = lastClassroomDays
else:
lastActive = lastLogin
lastActiveDays = lastLoginDays
if lastActive == "Never":
report.at[indexToUse, "Last Active(Working Days)"] = "Never"
else:
report.at[indexToUse, "Last Active(Working Days)"] = lastActive.strftime("%d/%m/%Y") + "(" + str(lastActiveDays) + ")"
# pdfCanvas.setFillColorRGB(colourValue,1-colourValue,0)
# Write out the CSV report.
report.to_csv(outputRoot + os.sep + reportTitle, index=False)
for item in os.listdir(outputRoot):
if item.endswith(".csv") and not item == reportTitle:
print("Moving historial report " + item + " to " + historyRoot)
shutil.move(outputRoot + os.sep + item, historyRoot + os.sep + item)
# Get ready to write out a formatted PDF document per year group.
# We are printing on A4 paper - set the page size and borders, in mm.
pageWidth = 210
pageHeight = 297
lineHeight = 8
leftBorder = 10
topBorder = 10
# A mid-gray background to make following lines on the page a bit easier.
lineImage = PIL.Image.new("RGB", (pageWidth-(leftBorder*2), lineHeight), (200, 200, 200))
for yearGroup in yearGroups.keys():
print("Generating report: " + yearGroup + ".pdf")
lineNumber = 1
pdfCanvas = reportlab.pdfgen.canvas.Canvas(outputRoot + os.sep + yearGroup + ".pdf")
for reportIndex, reportValues in report.iterrows():
# Draw the report name and column headers.
if lineNumber == 1:
pdfCanvas.drawString(leftBorder*reportlab.lib.units.mm, (pageHeight-topBorder)*reportlab.lib.units.mm, "Year: " + str(yearGroup) + ", Most recent date: " + roundDatetime(mostRecentDate).strftime("%d/%m/%Y"))
for columnName in columnNames:
if not columnPos[columnName] == None:
pdfCanvas.drawString((leftBorder+columnPos[columnName])*reportlab.lib.units.mm, ((pageHeight-lineHeight)-topBorder)*reportlab.lib.units.mm, columnName)
lineNumber = 2
if reportValues["Year"] == yearGroup:
for columnName in columnNames:
if not columnPos[columnName] == None:
if lineNumber % 2 == 0:
pdfCanvas.drawInlineImage(lineImage, leftBorder*reportlab.lib.units.mm, ((pageHeight-(lineHeight*(lineNumber+1))-(int(lineHeight/4)))-topBorder)*reportlab.lib.units.mm, (pageWidth-(leftBorder*2))*reportlab.lib.units.mm, lineHeight*reportlab.lib.units.mm)
pdfCanvas.setFillColorRGB(0,0,0)
columnValue = str(reportValues[columnName])
if columnName == "Year":
columnValue = columnValue.replace("Reception","Rec").replace("Year ","")
pdfCanvas.drawString((leftBorder+columnPos[columnName])*reportlab.lib.units.mm, ((pageHeight-(lineHeight*lineNumber))-topBorder)*reportlab.lib.units.mm, columnValue)
lineNumber = lineNumber + 1
if lineNumber == 36:
pdfCanvas.showPage()
lineNumber = 1
# Save the PDF document.
pdfCanvas.save()
| 2.328125 | 2 |
rasalit/apps/nlucluster/app.py | omidforoqi/rasalit | 261 | 12771105 | <reponame>omidforoqi/rasalit<gh_stars>100-1000
import os
import pathlib
from io import StringIO
from pkg_resources import resource_filename
import streamlit as st
from whatlies.language import CountVectorLanguage
from whatlies.transformers import Pca, Umap
from whatlies import EmbeddingSet, Embedding
import sentencepiece as spm
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow.compat.v1 as tf # noqa: F811
tf.disable_v2_behavior()
with tf.Session() as sess:
module = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/1")
spm_path = sess.run(module(signature="spm_path"))
sp = spm.SentencePieceProcessor()
sp.Load(spm_path)
input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])
encodings = module(
inputs=dict(
values=input_placeholder.values,
indices=input_placeholder.indices,
dense_shape=input_placeholder.dense_shape,
)
)
def process_to_IDs_in_sparse_format(sp, sentences):
ids = [sp.EncodeAsIds(x) for x in sentences]
max_len = max(len(x) for x in ids)
dense_shape = (len(ids), max_len)
values = [item for sublist in ids for item in sublist]
indices = [[row, col] for row in range(len(ids)) for col in range(len(ids[row]))]
return values, indices, dense_shape
def calculate_embeddings(messages, encodings):
values, indices, dense_shape = process_to_IDs_in_sparse_format(sp, messages)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(
encodings,
feed_dict={
input_placeholder.values: values,
input_placeholder.indices: indices,
input_placeholder.dense_shape: dense_shape,
},
)
return message_embeddings
st.sidebar.markdown("Made with love over at [Rasa](https://rasa.com/).")
uploaded = st.sidebar.file_uploader(
"Upload a `.txt` file for clustering. Each utterance should appear on a new line."
)
if not uploaded:
filepath = resource_filename("rasalit", os.path.join("data", "nlu.md"))
txt = pathlib.Path(filepath).read_text()
texts = list(set([t for t in txt.split("\n") if len(t) > 0]))
else:
bytes_data = uploaded.read()
stringio = StringIO(bytes_data.decode("utf-8"))
string_data = stringio.read()
texts = [
t.replace(" - ", "")
for t in string_data.split("\n")
if len(t) > 0 and t[0] != "#"
]
method = st.sidebar.selectbox(
"Select Embedding Method", ["Lite Sentence Encoding", "CountVector SVD"]
)
if method == "CountVector SVD":
n_svd = st.sidebar.slider(
"Number of SVD components", min_value=2, max_value=100, step=1
)
min_ngram, max_ngram = st.sidebar.slider(
"Range of ngrams", min_value=1, max_value=5, step=1, value=(2, 3)
)
reduction_method = st.sidebar.selectbox("Reduction Method", ("Umap", "Pca"))
if reduction_method == "Umap":
n_neighbors = st.sidebar.slider(
"Number of UMAP neighbors", min_value=1, max_value=100, value=15, step=1
)
min_dist = st.sidebar.slider(
"Minimum Distance for UMAP",
min_value=0.01,
max_value=0.99,
value=0.8,
step=0.01,
)
reduction = Umap(2, n_neighbors=n_neighbors, min_dist=min_dist)
else:
reduction = Pca(2)
st.markdown("# Simple Text Clustering")
st.markdown(
"Let's say you've gotten a lot of feedback from clients on different channels. You might like to be able to distill main topics and get an overview. It might even inspire some intents that will be used in a virtual assistant!"
)
st.markdown(
"This tool will help you discover them. This app will attempt to cluster whatever text you give it. The chart will try to clump text together and you can explore underlying patterns."
)
if method == "CountVector SVD":
lang = CountVectorLanguage(n_svd, ngram_range=(min_ngram, max_ngram))
embset = lang[texts]
if method == "Lite Sentence Encoding":
embset = EmbeddingSet(
*[
Embedding(t, v)
for t, v in zip(texts, calculate_embeddings(texts, encodings=encodings))
]
)
p = (
embset.transform(reduction)
.plot_interactive(annot=False)
.properties(width=500, height=500, title="")
)
st.write(p)
st.markdown(
"While the tool helps you in discovering clusters, it doesn't do labelling (yet). We do offer a [jupyter notebook](https://github.com/RasaHQ/rasalit/tree/master/notebooks/bulk-labelling) that might help out though."
)
| 2.171875 | 2 |
main/views.py | lgb2002/club | 1 | 12771106 | from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from main.forms import SignupForm
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from main.models import ClubList
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
'''
def login(request):
return render(request, 'main/login.html')
'''
def home(request, tag=None):
clublist = ClubList.objects.order_by('-ClubMemberSum')
return render(request, "index.html", {
'clublists': clublist
})
def signup(request):
"""signup
to register users
"""
if request.method == "POST":
signupform = SignupForm(request.POST)
if signupform.is_valid():
user = signupform.save(commit=False)
user.save()
return HttpResponseRedirect(
reverse("signup_ok")
)
elif request.method == "GET":
signupform = SignupForm()
return render(request, "registration/signup.html",{
'signupform' : signupform
})
@login_required
@csrf_exempt
def join(request):
clublist = ClubList.objects.order_by('-ClubMemberSum')
if request.method == "POST":
join_name = request.POST.get('username')
join_club = request.POST.get('clubname')
ClubMemberSum = ClubList.objects.get(ClubName = join_club).ClubMemberSum
ClubList.objects.filter(ClubName = join_club).update(ClubMemberSum=ClubMemberSum+1)
return render(request, "index.html",{
'clublists': clublist
}) | 2.21875 | 2 |
couplet_composer/util/cache.py | anttikivi/ode-composer | 0 | 12771107 | # Copyright (c) 2020 <NAME>
# Licensed under the MIT License
"""A module that contains caching helpers.
"""
from functools import update_wrapper
__all__ = ["cached"]
def cached(func):
"""Decorator that caches result of method or function.
"""
cache = {}
def wrapper(*args, **kwargs):
key = tuple(args) + tuple(kwargs.items())
if key not in cache:
result = func(*args, **kwargs)
cache[key] = result
return result
else:
return cache[key]
return update_wrapper(wrapper, func)
| 3.015625 | 3 |
replay_buffer.py | DrArryYao/PVE-MCC_for_unsignalized_intersection | 12 | 12771108 | from collections import deque
import random
import rank_based
class ReplayBuffer(object):
def __init__(self, buffer_size, batch_size=32, learn_start=2000, steps=100000, rand_s=False):
self.buffer_size = buffer_size
self.num_experiences = 0
self.buffer = deque()
self.rand_s = rand_s
conf = {'size': self.buffer_size,
'learn_start': learn_start,
'partition_num': 32,
'steps': steps,
'batch_size': batch_size}
self.replay_memory = rank_based.Experience(conf)
def getBatch(self, batch_size):
# random draw N
if self.rand_s:
return random.sample(self.buffer, batch_size), None, None
batch, w, e_id = self.replay_memory.sample(self.num_experiences)
self.e_id = e_id
self.w_id = w
'''#state t
self.state_t_batch = [item[0] for item in batch]
self.state_t_batch = np.array(self.state_t_batch)
#state t+1
self.state_t_1_batch = [item[1] for item in batch]
self.state_t_1_batch = np.array( self.state_t_1_batch)
self.action_batch = [item[2] for item in batch]
self.action_batch = np.array(self.action_batch)
self.action_batch = np.reshape(self.action_batch,[len(self.action_batch),self.num_actions])
self.reward_batch = [item[3] for item in batch]
self.reward_batch = np.array(self.reward_batch)
self.done_batch = [item[4] for item in batch]
self.done_batch = np.array(self.done_batch)'''
return batch, self.w_id, self.e_id
def size(self):
return self.buffer_size
def add(self, state, action, reward, next_state, done): # add(self, state, next_state, action, reward, done):
new_experience = (state, action, reward, next_state, done)#(state, action, reward, next_state, done)
self.num_experiences += 1
if self.rand_s:
if self.num_experiences < self.buffer_size:
self.buffer.append(new_experience)
else:
self.buffer.popleft()
self.buffer.append(new_experience)
else:
self.replay_memory.store(new_experience)
def count(self):
# if buffer is full, return buffer size
# otherwise, return experience counter
return self.num_experiences
# def erase(self):
# self.buffer = deque()
# self.num_experiences = 0
def rebalance(self):
self.replay_memory.rebalance()
def update_priority(self, indices, delta):
self.replay_memory.update_priority(indices, delta)
| 2.65625 | 3 |
gameconst.py | pestunov/game | 0 | 12771109 | <filename>gameconst.py
WIDTH = 512
HEIGHT = 336
FPS = 50
PIX_FOR_BRIX = 16
STEPS_FOR_CYCLE = 16
N_STEP_FOR_TEST = 5
BLACK = (0, 0, 0)
BKGR = (0, 0, 0)
| 1.296875 | 1 |
gottesdienstplan/auth.py | wulmer/churchtools-automation | 0 | 12771110 | import json
import os
from google.oauth2 import service_account
from googleapiclient.discovery import build
SCOPES = [
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive",
]
credentials = service_account.Credentials.from_service_account_info(
json.loads(os.environ["GOOGLE_SERVICE_ACCOUNT"]), scopes=SCOPES
)
spreadsheet_service = build("sheets", "v4", credentials=credentials)
| 2.390625 | 2 |
udp-client.py | BrandonSoto/Socket-Types | 0 | 12771111 | #!/usr/bin/env python
import socket
import time
server_name = 'localhost'
server_port = 4242
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for i in range(0, 25):
message = 'message ' + str(i)
client_socket.sendto(message, (server_name, server_port))
receved_message, server_address = client_socket.recvfrom(2048)
print 'Received: ', receved_message
time.sleep(.5)
print 'Closed', client_socket.getsockname()
client_socket.close()
| 3.109375 | 3 |
Python/Algorithms/Sieve Algorithms/Sieve of Eratosthenes.py | m-payal/AlgorithmsAndDataStructure | 195 | 12771112 | <reponame>m-payal/AlgorithmsAndDataStructure<gh_stars>100-1000
"""
Sieve of Eratosthenes :
Generate all the primes less than any integer nn
"""
from math import sqrt
def get_primes(n):
m = n + 1
# numbers = [True for i in range(m)]
numbers = [True] * m
for i in range(2, int(sqrt(n) + 1)):
if numbers[i]:
for j in range(i * i, m, i):
numbers[j] = False
primes = []
for i in range(2, m):
if numbers[i]:
primes.append(i)
return primes
print(get_primes(25))
| 3.484375 | 3 |
tests/conftest.py | thusser/spexxy | 4 | 12771113 | import pytest
import numpy as np
from spexxy.grid import GridAxis, ValuesGrid
@pytest.fixture()
def number_grid():
# define grid
grid = np.array([
[1, 2, 3, 4, 5],
[3, 4, 5, 6, 7],
[2, 3, 4, 5, 6],
[4, 5, 6, 7, 8]
])
# define axes
ax1 = GridAxis(name='x', values=list(range(grid.shape[1])))
ax2 = GridAxis(name='y', values=list(range(grid.shape[0])))
# combine values
values = {}
for x in ax1.values:
for y in ax2.values:
values[(x, y)] = grid[y, x]
# return new grid
return ValuesGrid([ax1, ax2], values)
| 2.421875 | 2 |
src/database/biz/Accounts/Accounts.py | ytitweb/GitHub.Uploader.Pi3.Https.201802230700 | 0 | 12771114 | <reponame>ytitweb/GitHub.Uploader.Pi3.Https.201802230700
from database.Database import Database as Db
class Accounts:
def __init__(self, db):
#self.__db = Database().Accounts
#self.__db = Db()._Database__Initializers[self.__class__.__name__].Db
self.__db = db # dataset.connect('sqlite:///' + .../GitHub.Accounts.sqlite3) 呼出元: DatabaseMeta.__init__()
@property
def Db(self): return self.__db
# --------------------
# UserRegister.py
# --------------------
def Regist(self, record):
self.__db['Accounts'].insert(record)
def Delete(self): pass
#def GetUserForBasic(self): pass
#def GetUserForToken(self): pass
#def GetUserForTwoFactor(self): pass
# --------------------
# Uploader.py
# --------------------
def GetAccount(self, username=None):
if None is username: return self.__db['Accounts'].find().next()
else: return self.__db['Accounts'].find_one(Username=username)
def GetAccounts(self):
for account in self.__db['Accounts'].find(): yield account
| 2.71875 | 3 |
sys_simulator/general/actions_discretizations.py | lbaiao/sys-simulator-2 | 1 | 12771115 | <reponame>lbaiao/sys-simulator-2
import numpy as np
def db_five(min_value: float, max_value: float):
a_max = max_value - 10
aux = np.linspace(a_max-30, a_max, 4)
actions = [min_value, *aux]
return actions
def db_six(min_value: float, max_value: float):
a_max = max_value - 10
aux = np.linspace(a_max-40, a_max, 5)
a_min = min_value if min_value < max_value-40 else -90
actions = [a_min, *aux]
return actions
def db_ten(min_value: float, max_value: float):
aux = [max_value/2, max_value]
aux2 = np.linspace(max_value-60, max_value-10, 7)
a_min = min_value if min_value < max_value-40 else -90
actions = [a_min, *aux2, *aux]
return actions
def db_20():
a1 = np.linspace(-60, -20, 10)
a2 = np.linspace(-14, 0, 9)
actions = [-90, *a1, *a2]
return actions
def db_30():
a0 = np.linspace(-90, -64, 10)
a1 = np.linspace(-60, -20, 10)
a2 = np.linspace(-14, 0, 10)
actions = [*a0, *a1, *a2]
return actions
| 2.828125 | 3 |
CybORG/CybORG/Agents/SimpleAgents/B_line.py | rafvasq/cage-challenge-1 | 18 | 12771116 | from CybORG.Agents import BaseAgent
from CybORG.Shared import Results
from CybORG.Shared.Actions import PrivilegeEscalate, ExploitRemoteService, DiscoverRemoteSystems, Impact, \
DiscoverNetworkServices, Sleep
class B_lineAgent(BaseAgent):
def __init__(self):
self.action = 0
self.target_ip_address = None
self.last_subnet = None
self.last_ip_address = None
self.action_history = {}
self.jumps = [0,1,2,2,2,2,5,5,5,5,9,9,9,12,13]
def train(self, results: Results):
"""allows an agent to learn a policy"""
pass
def get_action(self, observation, action_space):
# print(self.action)
"""gets an action from the agent that should be performed based on the agent's internal state and provided observation and action space"""
session = 0
while True:
if observation['success'] == True:
self.action += 1 if self.action < 14 else 0
else:
self.action = self.jumps[self.action]
if self.action in self.action_history:
action = self.action_history[self.action]
# Discover Remote Systems
elif self.action == 0:
self.last_subnet = observation['User0']['Interface'][0]['Subnet']
action = DiscoverRemoteSystems(session=session, agent='Red', subnet=self.last_subnet)
# Discover Network Services- new IP address found
elif self.action == 1:
self.last_ip_address = [value for key, value in observation.items() if key != 'success'][1]['Interface'][0]['IP Address']
action =DiscoverNetworkServices(session=session, agent='Red', ip_address=self.last_ip_address)
# Exploit User1
elif self.action == 2:
action = ExploitRemoteService(session=session, agent='Red', ip_address=self.last_ip_address)
# Privilege escalation on User1
elif self.action == 3:
hostname = [value for key, value in observation.items() if key != 'success' and 'System info' in value][0]['System info']['Hostname']
action = PrivilegeEscalate(agent='Red', hostname=hostname, session=session)
# Discover Network Services- new IP address found
elif self.action == 4:
self.last_ip_address = observation['Enterprise1']['Interface'][0]['IP Address']
action = DiscoverNetworkServices(session=session, agent='Red', ip_address=self.last_ip_address)
# Exploit- Enterprise1
elif self.action == 5:
self.target_ip_address = [value for key, value in observation.items() if key != 'success'][0]['Interface'][0]['IP Address']
action = ExploitRemoteService(session=session, agent='Red', ip_address=self.target_ip_address)
# Privilege escalation on Enterprise1
elif self.action == 6:
hostname = [value for key, value in observation.items() if key != 'success' and 'System info' in value][0]['System info']['Hostname']
action = PrivilegeEscalate(agent='Red', hostname=hostname, session=session)
# Scanning the new subnet found.
elif self.action == 7:
self.last_subnet = observation['Enterprise1']['Interface'][0]['Subnet']
action = DiscoverRemoteSystems(subnet=self.last_subnet, agent='Red', session=session)
# Discover Network Services- Enterprise2
elif self.action == 8:
self.target_ip_address = [value for key, value in observation.items() if key != 'success'][2]['Interface'][0]['IP Address']
action = DiscoverNetworkServices(session=session, agent='Red', ip_address=self.target_ip_address)
# Exploit- Enterprise2
elif self.action == 9:
self.target_ip_address = [value for key, value in observation.items() if key != 'success'][0]['Interface'][0]['IP Address']
action = ExploitRemoteService(session=session, agent='Red', ip_address=self.target_ip_address)
# Privilege escalation on Enterprise2
elif self.action == 10:
hostname = [value for key, value in observation.items() if key != 'success' and 'System info' in value][0]['System info']['Hostname']
action = PrivilegeEscalate(agent='Red', hostname=hostname, session=session)
# Discover Network Services- Op_Server0
elif self.action == 11:
action = DiscoverNetworkServices(session=session, agent='Red', ip_address=observation['Op_Server0']['Interface'][0]['IP Address'])
# Exploit- Op_Server0
elif self.action == 12:
info = [value for key, value in observation.items() if key != 'success']
if len(info) > 0:
action = ExploitRemoteService(agent='Red', session=session, ip_address=info[0]['Interface'][0]['IP Address'])
else:
self.action = 0
continue
# Privilege escalation on Op_Server0
elif self.action == 13:
action = PrivilegeEscalate(agent='Red', hostname='Op_Server0', session=session)
# Impact on Op_server0
elif self.action == 14:
action = Impact(agent='Red', session=session, hostname='Op_Server0')
if self.action not in self.action_history:
self.action_history[self.action] = action
return action
def end_episode(self):
self.action = 0
self.target_ip_address = None
self.last_subnet = None
self.last_ip_address = None
self.action_history = {}
def set_initial_values(self, action_space, observation):
pass
| 2.65625 | 3 |
mailer/__main__.py | snoonetIRC/anope-mailer-daemon | 0 | 12771117 | import time
from .daemon import Daemon
def main():
daemon = Daemon()
while True:
daemon.tick()
files = daemon.get_email_files()
for file in files:
try:
daemon.handle_email(file)
except Exception:
daemon.on_error()
time.sleep(5)
if __name__ == '__main__':
main()
| 2.65625 | 3 |
tools/fake_robot.py | BHAY-3DiTex/Pyluos | 16 | 12771118 | from __future__ import division
import json
from time import time
from random import randint, choice
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.set_nodelay(True)
self.rt = RepeatedTimer(self.period, self.proxy_pub)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
if (message == '{detection:}'):
self.ioloop.add_callback(self.pub_routing_table)
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def proxy_pub(self):
self.ioloop.add_callback(self.pub_state)
def pub_routing_table(self):
state = {'routing_table': [{'uuid': [4456498, 1347571976, 540555569], 'port_table': [65535, 2], 'services': [{'type': 'Gate', 'id': 1, 'alias': 'gate'}]}, {'uuid': [3932192, 1194612503, 540554032], 'port_table': [3, 1], 'services': [{'type': 'Angle', 'id': 2, 'alias': 'potentiometer_m'}]}, {'uuid': [2949157, 1194612501, 540554032], 'port_table': [65535, 2], 'services': [{'type': 'Gate', 'id': 3, 'alias': 'gate1'}]}]}
def pub_state(self):
state = {
'services': [
{
'alias': 'my_gate',
'id': 1,
'type': 'Gate',
},
{
'alias': 'my_led',
'id': 2,
'type': 'Color',
},
{
'alias': 'my_servo',
'id': 3,
'type': 'Servo',
},
{
'alias': 'my_button',
'id': 4,
'type': 'State',
'state': choice((0, 1)),
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'Angle',
'position': randint(0, 4096),
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'Distance',
'distance': randint(0, 2000),
},
{
'alias': 'my_dxl_1',
'id': 8,
'type': 'DynamixelMotor',
'position': randint(-180, 180),
},
{
'alias': 'my_dxl_2',
'id': 9,
'type': 'DynamixelMotor',
'position': randint(-180, 180),
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
def check_origin(self, origin):
return True
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9342)
parser.add_argument('--verbose', action='store_true', default=False)
args = parser.parse_args()
loop = IOLoop()
port = args.port
FakeRobot.verbose = args.verbose
FakeRobot.ioloop = loop
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
if args.verbose:
print('Fake robot serving on {}'.format(url))
loop.start()
| 2.4375 | 2 |
src/vessel_NN_training.py | Binjie-Qin/SVS-net | 8 | 12771119 | ###################################################
#
# Script to:
# - Load the images and extract the patches
# - Define the neural network
# - define the training
#
##################################################
import numpy as np
import configparser
from keras.utils import multi_gpu_model
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout,Add,Convolution2D,merge,Conv3D, MaxPooling3D,Multiply
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from keras.utils.vis_utils import plot_model as plot
from keras.optimizers import SGD
import h5py
import sys
sys.path.insert(0, './lib/')
#from help_functions import *
from keras.layers import BatchNormalization,SpatialDropout3D,Reshape,GlobalMaxPooling3D,GlobalAveragePooling2D
#function to obtain data for training/testing (validation)
#from extract_patches import get_data_training
from keras.layers.core import Dropout, Activation
from keras import backend as K
import tensorflow as tf
print(K.backend())
from data_feed import *
from keras import optimizers
#from pre_processing import my_PreProc
import math
import sys
sys.setrecursionlimit(4000)
#Define the neural network
def focal_loss(gamma=2,alpha=0.75):
def focal_loss_fixed(y_true,y_pred):
pt_1=tf.where(tf.equal(y_true,1),y_pred,tf.ones_like(y_pred))
pt_0=tf.where(tf.equal(y_true,0),y_pred,tf.zeros_like(y_pred))
return -K.sum(alpha*K.pow(1.-pt_1,gamma)*K.log(pt_1))-K.sum((1-alpha)*K.pow(pt_0,gamma)*K.log(1.-pt_0))
return focal_loss_fixed
def block_2_conv(input,num_filter):
conv1=Conv2D(num_filter,(3,3),strides=(1,1),padding='same',data_format='channels_first')(input)
conv1_bn=BatchNormalization(axis=1)(conv1)
conv1_relu=Activation('relu')(conv1_bn)
conv2=Conv2D(num_filter,(3,3),strides=(1,1),padding='same',data_format='channels_first')(conv1_relu)
conv2_bn=BatchNormalization(axis=1)(conv2)
conv2_add=Add()([input,conv2_bn])
conv2_relu=Activation('relu')(conv2_add)
return conv2_relu
def block_2_conv3D(input,num_filter):
conv1 = Conv3D(num_filter, (3, 3,3), strides=(1, 1,1), padding='same', data_format='channels_first')(input)
conv1_bn = BatchNormalization(axis=1)(conv1)
conv1_relu = Activation('relu')(conv1_bn)
conv2 = Conv3D(num_filter, (3, 3,3), strides=(1, 1,1), padding='same', data_format='channels_first')(conv1_relu)
conv2_bn = BatchNormalization(axis=1)(conv2)
conv2_add = Add()([input, conv2_bn])
conv2_relu = Activation('relu')(conv2_add)
return conv2_relu
def attention_block(input,iter,depth):
global_pool=GlobalMaxPooling3D(data_format='channels_first')(input)
global_pool1=Reshape((depth,1,1,1))(global_pool)
conv_1x1=Conv3D(depth,(1,1,1),padding='same',data_format='channels_first')(global_pool1)
relu_out=Activation('relu')(conv_1x1)
conv_2x1=Conv3D(depth,(1,1,1),strides=(1,1,1),padding='same',data_format='channels_first')(relu_out)
sigmoid_out=Activation('sigmoid')(conv_2x1)
concat1=sigmoid_out
#print("***********1")
#print(concat1.shape)
for i in range(4-1):
concat1=concatenate([concat1,sigmoid_out],axis=2)
concat2=concat1
for j in range(iter-1):
concat2=concatenate([concat2,concat1],axis=3)
concat3=concat2
for k in range(iter-1):
concat3=concatenate([concat3,concat2],axis=4)
#print("************2")
#print(concat3.shape)
out=Multiply()([input,concat3])
return out
def saliency_map_attention_block(input,depth):
conv_1x1=Conv3D(depth,(1,1,1),padding='same',data_format='channels_first')(input)
relu_out=Activation('relu')(conv_1x1)
conv_2x1=Conv3D(depth,(1,1,1),padding='same',data_format='channels_first')(relu_out)
sigmoid_out=Activation('sigmoid')(conv_2x1)
out1=Multiply()([input,sigmoid_out])
out=Add()([input,out1])
return out
def channel_attnetion_block(low_input,high_input,depth,size):
input=concatenate([low_input,high_input],axis=1)
global_pool=GlobalAveragePooling2D(data_format='channels_first')(input)
global_pool1 = Reshape((2*depth, 1, 1))(global_pool)
conv_1x1 = Conv2D(depth, (1, 1), padding='same', data_format='channels_first')(global_pool1)
relu_out = Activation('relu')(conv_1x1)
conv_2x1 = Conv2D(depth, (1, 1), strides=(1, 1), padding='same', data_format='channels_first')(relu_out)
sigmoid_out = Activation('sigmoid')(conv_2x1)
concat1 = sigmoid_out
for i in range(size-1):
concat1=concatenate([concat1,sigmoid_out],axis=2)
concat2=concat1
for j in range(size-1):
concat2=concatenate([concat2,concat1],axis=3)
out1 = Multiply()([low_input, concat2])
out2=Add()([out1,high_input])
return out2
# F1 score: harmonic mean of precision and sensitivity DICE = 2*TP/(2*TP + FN + FP)
def DiceCoef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f*y_pred_f)
return (2.*intersection)/(K.sum(y_true_f) + K.sum(y_pred_f) + 0.00001)
def DiceCoefLoss(y_true, y_pred):
return -DiceCoef(y_true, y_pred)
def get_unet3D_new_4_fram_2(n_ch,frame,patch_height,patch_width):
inputs = Input(shape=(n_ch, frame,patch_height, patch_width))
conv0 = Conv3D(8, (1, 1,1), padding='same')(inputs)
conv1 = block_2_conv3D(conv0, 8)
## channel attention
#out1=attention_block(conv1,512,8)
###特征输出
conv1_3d_2d = Conv3D(8, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv1)
#conv1_3d_2d = Conv3D(8, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out1)
conv1_trans_2d = Reshape((8, 512, 512))(conv1_3d_2d)
conv1_1 = Conv3D(16, (2, 2,2), strides=(1,2,2),padding='same', data_format='channels_first')(conv1)
conv1_1 = BatchNormalization(axis=1)(conv1_1)
conv1_1 = Activation('relu')(conv1_1)
conv2 = block_2_conv3D(conv1_1, 16)
## channel attention
#out2 = attention_block(conv2, 256, 16)
###特征输出
conv2_3d_2d = Conv3D(16, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv2)
#conv2_3d_2d = Conv3D(16, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out2)
conv2_trans_2d = Reshape((16, 256, 256))(conv2_3d_2d)
conv2_1 = Conv3D(32, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv2)
conv2_1 = BatchNormalization(axis=1)(conv2_1)
conv2_1 = Activation('relu')(conv2_1)
conv3 = block_2_conv3D(conv2_1, 32)
## channel attention
#out3 = attention_block(conv3, 128, 32)
###特征输出
conv3_3d_2d = Conv3D(32, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv3)
#conv3_3d_2d = Conv3D(32, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out3)
conv3_trans_2d = Reshape((32, 128, 128))(conv3_3d_2d)
conv3_1 = Conv3D(64, (2, 2,2), strides=(1,2,2),padding='same', data_format='channels_first')(conv3)
conv3_1 = BatchNormalization(axis=1)(conv3_1)
conv3_1 = Activation('relu')(conv3_1)
conv4 = block_2_conv3D(conv3_1, 64)
##saliency_map
out4_1=saliency_map_attention_block(conv4,64)
## channel attention
#out4 = attention_block(conv4, 64, 64)
out4 = attention_block(out4_1, 64, 64)
###特征输出
#conv4_3d_2d = Conv3D(64, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv4)
conv4_3d_2d = Conv3D(64, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out4)
conv4_trans_2d = Reshape((64, 64, 64))(conv4_3d_2d)
conv4_1 = Conv3D(128, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv4)
conv4_1 = BatchNormalization(axis=1)(conv4_1)
conv4_1 = Activation('relu')(conv4_1)
conv5 = block_2_conv3D(conv4_1, 128)
## channel attention
out5 = attention_block(conv5, 32, 128)
###特征输出
#conv5_3d_2d = Conv3D(128, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv5)
conv5_3d_2d = Conv3D(128, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out5)
conv5_trans_2d = Reshape((128, 32, 32))(conv5_3d_2d)
conv5_dropout = SpatialDropout3D(0.5,data_format='channels_first')(conv5)
conv5_1 = Conv3D(256, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv5_dropout)
conv5_1 = BatchNormalization(axis=1)(conv5_1)
conv5_1 = Activation('relu')(conv5_1)
conv6 = block_2_conv3D(conv5_1, 256)
## channel attention
out6 = attention_block(conv6, 16, 256)
###特征输出
#conv6_3d_2d=Conv3D(256,(4,1,1),strides=(1,1,1),data_format='channels_first')(conv6)
conv6_3d_2d = Conv3D(256, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out6)
conv6_trans_2d=Reshape((256,16,16))(conv6_3d_2d)
conv6_dropout = SpatialDropout3D(0.5,data_format='channels_first')(conv6)
conv6_1 = Conv3D(512, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv6_dropout)
conv6_1 = BatchNormalization(axis=1)(conv6_1)
conv6_1 = Activation('relu')(conv6_1)
conv3d_2d=Conv3D(512,(4,1,1),strides=(1,1,1),data_format='channels_first')(conv6_1)
#print(conv3d_2d.shape)
conv_trans_2d=Reshape((512,8,8))(conv3d_2d)
up1 = UpSampling2D(size=(2, 2))(conv_trans_2d)
up1_1 = Conv2D(256, (2, 2), strides=1, padding='same', data_format='channels_first')(up1)
up1_1 = BatchNormalization(axis=1)(up1_1)
up1_1 = Activation('relu')(up1_1)
up1_2 = concatenate([ conv6_trans_2d , up1_1], axis=1)
up1_3 = block_2_conv(up1_2, 512)
up2 = UpSampling2D(size=(2, 2))(up1_3)
up2_1 = Conv2D(128, (2, 2), strides=1, padding='same', data_format='channels_first')(up2)
up2_1 = BatchNormalization(axis=1)(up2_1)
up2_1 = Activation('relu')(up2_1)
up2_2 = concatenate([conv5_trans_2d, up2_1], axis=1)
up2_3 = block_2_conv(up2_2, 256)
up3 = UpSampling2D(size=(2, 2))(up2_3)
up3_1 = Conv2D(64, (2, 2), strides=1, padding='same', data_format='channels_first')(up3)
up3_1 = BatchNormalization(axis=1)(up3_1)
up3_1 = Activation('relu')(up3_1)
up3_2 = concatenate([conv4_trans_2d, up3_1], axis=1)
up3_3 = block_2_conv(up3_2, 128)
up4 = UpSampling2D(size=(2, 2))(up3_3)
up4_1 = Conv2D(32, (2, 2), strides=1, padding='same', data_format='channels_first')(up4)
up4_1 = BatchNormalization(axis=1)(up4_1)
up4_1 = Activation('relu')(up4_1)
up4_2 = concatenate([conv3_trans_2d, up4_1], axis=1)
up4_3 = block_2_conv(up4_2, 64)
up5 = UpSampling2D(size=(2, 2))(up4_3)
up5_1 = Conv2D(16, (2, 2), strides=1, padding='same', data_format='channels_first')(up5)
up5_1 = BatchNormalization(axis=1)(up5_1)
up5_1 = Activation('relu')(up5_1)
up5_2 = concatenate([conv2_trans_2d, up5_1], axis=1)
up5_3 = block_2_conv(up5_2, 32)
up6 = UpSampling2D(size=(2, 2))(up5_3)
up6_1 = Conv2D(8, (2, 2), strides=1, padding='same', data_format='channels_first')(up6)
up6_1 = BatchNormalization(axis=1)(up6_1)
up6_1 = Activation('relu')(up6_1)
up6_2 = concatenate([conv1_trans_2d, up6_1], axis=1)
up6_3 = block_2_conv(up6_2, 16)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(up6_3)
model = Model(inputs=inputs, outputs=outputs)
#model.compile(optimizer='sgd', loss=DiceCoefLoss, metrics=[DiceCoef])
return model
def get_unet3D_new_4_fram_2_new(n_ch,frame,patch_height,patch_width):
inputs = Input(shape=(n_ch, frame,patch_height, patch_width))
conv0 = Conv3D(8, (1, 1,1), padding='same')(inputs)
conv1 = block_2_conv3D(conv0, 8)
## channel attention
#out1=attention_block(conv1,512,8)
###特征输出
conv1_3d_2d = Conv3D(8, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv1)
#conv1_3d_2d = Conv3D(8, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out1)
conv1_trans_2d = Reshape((8, 512, 512))(conv1_3d_2d)
conv1_1 = Conv3D(16, (2, 2,2), strides=(1,2,2),padding='same', data_format='channels_first')(conv1)
conv1_1 = BatchNormalization(axis=1)(conv1_1)
conv1_1 = Activation('relu')(conv1_1)
conv2 = block_2_conv3D(conv1_1, 16)
## channel attention
#out2 = attention_block(conv2, 256, 16)
###特征输出
conv2_3d_2d = Conv3D(16, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv2)
#conv2_3d_2d = Conv3D(16, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out2)
conv2_trans_2d = Reshape((16, 256, 256))(conv2_3d_2d)
conv2_1 = Conv3D(32, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv2)
conv2_1 = BatchNormalization(axis=1)(conv2_1)
conv2_1 = Activation('relu')(conv2_1)
conv3 = block_2_conv3D(conv2_1, 32)
## channel attention
#out3 = attention_block(conv3, 128, 32)
###特征输出
conv3_3d_2d = Conv3D(32, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv3)
#conv3_3d_2d = Conv3D(32, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out3)
conv3_trans_2d = Reshape((32, 128, 128))(conv3_3d_2d)
conv3_1 = Conv3D(64, (2, 2,2), strides=(1,2,2),padding='same', data_format='channels_first')(conv3)
conv3_1 = BatchNormalization(axis=1)(conv3_1)
conv3_1 = Activation('relu')(conv3_1)
conv4 = block_2_conv3D(conv3_1, 64)
##saliency_map
#out4_1=saliency_map_attention_block(conv4,64)
## channel attention
#out4 = attention_block(conv4, 64, 64)
#out4 = attention_block(out4_1, 64, 64)
###特征输出
#conv4_3d_2d = Conv3D(64, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv4)
conv4_3d_2d = Conv3D(64, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv4)
conv4_trans_2d = Reshape((64, 64, 64))(conv4_3d_2d)
conv4_1 = Conv3D(128, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv4)
conv4_1 = BatchNormalization(axis=1)(conv4_1)
conv4_1 = Activation('relu')(conv4_1)
conv5 = block_2_conv3D(conv4_1, 128)
## channel attention
#out5 = attention_block(conv5, 32, 128)
###特征输出
#conv5_3d_2d = Conv3D(128, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv5)
conv5_3d_2d = Conv3D(128, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(conv5)
conv5_trans_2d = Reshape((128, 32, 32))(conv5_3d_2d)
conv5_dropout = SpatialDropout3D(0.5,data_format='channels_first')(conv5)
conv5_1 = Conv3D(256, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv5_dropout)
conv5_1 = BatchNormalization(axis=1)(conv5_1)
conv5_1 = Activation('relu')(conv5_1)
conv6 = block_2_conv3D(conv5_1, 256)
## channel attention
out6 = attention_block(conv6, 16, 256)
###特征输出
#conv6_3d_2d=Conv3D(256,(4,1,1),strides=(1,1,1),data_format='channels_first')(conv6)
conv6_3d_2d = Conv3D(256, (4, 1, 1), strides=(1, 1, 1), data_format='channels_first')(out6)
conv6_trans_2d=Reshape((256,16,16))(conv6_3d_2d)
conv6_dropout = SpatialDropout3D(0.5,data_format='channels_first')(conv6)
conv6_1 = Conv3D(512, (2, 2,2), strides=(1,2,2), padding='same',data_format='channels_first')(conv6_dropout)
conv6_1 = BatchNormalization(axis=1)(conv6_1)
conv6_1 = Activation('relu')(conv6_1)
conv3d_2d=Conv3D(512,(4,1,1),strides=(1,1,1),data_format='channels_first')(conv6_1)
#print(conv3d_2d.shape)
conv_trans_2d=Reshape((512,8,8))(conv3d_2d)
up1 = UpSampling2D(size=(2, 2))(conv_trans_2d)
up1_1 = Conv2D(256, (2, 2), strides=1, padding='same', data_format='channels_first')(up1)
up1_1 = BatchNormalization(axis=1)(up1_1)
up1_1 = Activation('relu')(up1_1)
up1_2=channel_attnetion_block(conv6_trans_2d,up1_1,256,16)
#up1_2 = concatenate([ conv6_trans_2d , up1_1], axis=1)
up1_3 = block_2_conv(up1_2, 256)
up2 = UpSampling2D(size=(2, 2))(up1_3)
up2_1 = Conv2D(128, (2, 2), strides=1, padding='same', data_format='channels_first')(up2)
up2_1 = BatchNormalization(axis=1)(up2_1)
up2_1 = Activation('relu')(up2_1)
up2_2=channel_attnetion_block(conv5_trans_2d,up2_1,128,32)
#up2_2 = concatenate([conv5_trans_2d, up2_1], axis=1)
up2_3 = block_2_conv(up2_2, 128)
up3 = UpSampling2D(size=(2, 2))(up2_3)
up3_1 = Conv2D(64, (2, 2), strides=1, padding='same', data_format='channels_first')(up3)
up3_1 = BatchNormalization(axis=1)(up3_1)
up3_1 = Activation('relu')(up3_1)
up3_2=channel_attnetion_block(conv4_trans_2d,up3_1,64,64)
#up3_2 = concatenate([conv4_trans_2d, up3_1], axis=1)
up3_3 = block_2_conv(up3_2, 64)
up4 = UpSampling2D(size=(2, 2))(up3_3)
up4_1 = Conv2D(32, (2, 2), strides=1, padding='same', data_format='channels_first')(up4)
up4_1 = BatchNormalization(axis=1)(up4_1)
up4_1 = Activation('relu')(up4_1)
up4_2=channel_attnetion_block(conv3_trans_2d,up4_1,32,128)
#up4_2 = concatenate([conv3_trans_2d, up4_1], axis=1)
up4_3 = block_2_conv(up4_2, 32)
up5 = UpSampling2D(size=(2, 2))(up4_3)
up5_1 = Conv2D(16, (2, 2), strides=1, padding='same', data_format='channels_first')(up5)
up5_1 = BatchNormalization(axis=1)(up5_1)
up5_1 = Activation('relu')(up5_1)
up5_2=channel_attnetion_block(conv2_trans_2d,up5_1,16,256)
# up5_2 = concatenate([conv2_trans_2d, up5_1], axis=1)
up5_3 = block_2_conv(up5_2, 16)
up6 = UpSampling2D(size=(2, 2))(up5_3)
up6_1 = Conv2D(8, (2, 2), strides=1, padding='same', data_format='channels_first')(up6)
up6_1 = BatchNormalization(axis=1)(up6_1)
up6_1 = Activation('relu')(up6_1)
up6_2=channel_attnetion_block(conv1_trans_2d,up6_1,8,512)
#up6_2 = concatenate([conv1_trans_2d, up6_1], axis=1)
up6_3 = block_2_conv(up6_2, 8)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(up6_3)
model = Model(inputs=inputs, outputs=outputs)
#model.compile(optimizer='sgd', loss=DiceCoefLoss, metrics=[DiceCoef])
return model
#========= Load settings from Config file
config = configparser.RawConfigParser()
config.read('configuration.txt')
#patch to the datasets
path_data = config.get('data paths', 'path_local')
#Experiment name
name_experiment = config.get('experiment name', 'name')
#training settings
N_epochs = int(config.get('training settings', 'N_epochs'))
_batchSize = int(config.get('training settings', 'batch_size'))
n_ch=1
frame=4
patch_height=512
patch_width=512
model = get_unet3D_new_4_fram_2_new(n_ch, frame,patch_height, patch_width) #the U-net model
## data parallel
#parallel_model=multi_gpu_model(model,gpus=2)
parallel_model=model
sgd= optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
parallel_model.compile(optimizer=sgd, loss=DiceCoefLoss, metrics=[DiceCoef])
#parallel_model.compile(optimizer='sgd', loss=[focal_loss(gamma=2,alpha=0.25)], metrics=[DiceCoef])
print ("Check: final output of the network:")
print (parallel_model.output_shape)
#plot(model, to_file='./'+name_experiment+'/'+name_experiment + '_model.png') #check how the model looks like
json_string = model.to_json()
open('./'+name_experiment+'/'+name_experiment +'_architecture.json', 'w').write(json_string)
new_train_imgs_original = path_data + config.get('data paths', 'train_imgs_original')
new_train_imgs_groundTruth=path_data + config.get('data paths', 'train_groundTruth')
train_data_ori= h5py.File(new_train_imgs_original,'r')
train_data_gt=h5py.File(new_train_imgs_groundTruth,'r')
train_imgs_original= np.array(train_data_ori['image'])
train_groundTruth=np.array(train_data_gt['image'])
train_imgs = train_imgs_original/np.max(train_imgs_original)
train_masks = train_groundTruth/np.max(train_groundTruth)
#check masks are within 0-1
#assert(np.min(train_masks)==0 and np.max(train_masks)==1)
print("imgs max value:")
print(np.max(train_imgs))
print("imgs min value")
print(np.min(train_imgs))
print("label max value")
print(np.max(train_masks))
print("label min value")
print(np.min(train_masks))
print ("\ntrain images/masks shape:")
print (train_imgs.shape)
print ("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
print ("train masks are within 0-1\n")
#============ Training ==================================
checkpoint_test = ModelCheckpoint(filepath='./'+name_experiment+'/'+name_experiment +'_best_weights.h5', monitor='val_loss', save_best_only=True,save_weights_only=True) #save at each epoch if the validation decreased
checkpoint = ModelCheckpoint(filepath='./'+name_experiment+'/'+name_experiment + "bestTrainWeight" + ".h5", monitor='loss', save_best_only=True, save_weights_only=True)
def step_decay(epoch):
lrate = 0.01 #the initial learning rate (by default in keras)
if epoch%200==0:
lrate=lrate*0.1
return lrate
lrate_drop = LearningRateScheduler(step_decay)
keepPctOriginal = 0.5
hflip = True
vflip = True
iter_times=250
num=train_imgs_original.shape[0]
np.random.seed(0)
index=list(np.random.permutation(num))
_X_train=train_imgs[index][0:174]
_Y_train=train_masks[index][0:174]
print(_X_train.shape)
print(_Y_train.shape)
_X_vali=train_imgs[index][174:219]
_Y_vali=train_masks[index][174:219]
print(_X_vali.shape)
print(_Y_vali.shape)
def ImgGenerator():
for image in train_generator(_X_train, _Y_train,_batchSize, iter_times, _keepPctOriginal=0.5,
_intensity=INTENSITY_FACTOR, _hflip=True, _vflip=True):
yield image
def valiGenerator():
for image in validation_generator(_X_vali, _Y_vali,_batchSize):
yield image
stepsPerEpoch = math.ceil((num-40) / _batchSize)
validationSteps = math.ceil(40 / _batchSize)
history = parallel_model.fit_generator(ImgGenerator(), verbose=2, workers=1,
validation_data=valiGenerator(),
steps_per_epoch=stepsPerEpoch, epochs=N_epochs,
validation_steps=validationSteps,
callbacks=[lrate_drop,checkpoint,checkpoint_test])
model.summary()
#========== Save and test the last model ===================
model.save_weights('./'+name_experiment+'/'+name_experiment +'_last_weights.h5', overwrite=True)
| 2.359375 | 2 |
converters/ipynb.py | ekmixon/journal-cli | 11 | 12771120 | <gh_stars>10-100
from os import path, makedirs
import click
from config import config
from commands.util import generate_image_path, generate_post_path
IPYNB_TEMPLATE = '''
{%- extends 'markdown.tpl' -%}
{% block data_text scoped %}
```ipynb-output
{{ output.data['text/plain']}}
```
{% endblock data_text %}
{% block stream %}
```ipynb-output
{{ output.text}}
```
{% endblock stream %}
'''
class IpynbConverter:
def __init__(self, filepath):
self.filepath = filepath
self.post_slug, _ = path.splitext(path.basename(self.filepath))
def save_image(self, image_name, content):
"""Saves an image to the correct directory
Arguments:
image_name {str} -- The filename of the image
content {bytes} -- The raw image bytes
"""
image_path = generate_image_path(self.post_slug, image_name)
makedirs(path.dirname(image_path), exist_ok=True)
click.secho('Saving image to {}'.format(image_path), fg='green')
with open(image_path, 'wb') as image_output:
image_output.write(content)
def convert(self):
"""Converts a Jupyter notebook for use in Journal.
Specifically, this function:
"""
import nbformat
from traitlets.config import Config
from nbconvert import MarkdownExporter
notebook = nbformat.read(self.filepath, as_version=4)
# Determine the static folder path and configure the Config
c = Config()
c.ExtractOutputPreprocessor.output_filename_template = path.join(
'/images', 'team', config['username'], self.post_slug,
'{unique_key}_{cell_index}_{index}{extension}')
exporter = MarkdownExporter(config=c, raw_template=IPYNB_TEMPLATE)
post, images = exporter.from_notebook_node(notebook)
for image_path, content in images['outputs'].items():
image_name = path.basename(image_path)
self.save_image(image_name, content)
new_filename = '{}.md'.format(self.post_slug)
post_path = generate_post_path(new_filename)
click.secho('Saving post content to {}'.format(post_path), fg='green')
with open(post_path, 'w') as output:
output.write(post)
return post_path | 2.609375 | 3 |
main_loglizer.py | zaihanLit/logbert | 0 | 12771121 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
from loglizer import InvariantsMiner, PCA, IsolationForest, OneClassSVM, LogClustering, LR
from loglizer import dataloader, preprocessing
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", metavar="DIR", help="output directory")
parser.add_argument("--dataset_name", help="which dataset to use")
parser.add_argument('--baselines', type=str, help='options: im pca iforest svm logcluster lr')
args = parser.parse_args()
print("select baselines", args)
selected_baselines = args.baselines.split('_')
ouput_dir = os.path.expanduser(args.output_dir + args.dataset_name + "/")
(x_train, y_train), (x_test, y_test) = dataloader.load_data(data_dir=ouput_dir)
feature_extractor = preprocessing.FeatureExtractor()
x_train = feature_extractor.fit_transform(x_train)
x_test = feature_extractor.transform(x_test)
if 'im' in selected_baselines:
print("="*20 + " Model: InvariantsMiner " + "="*20)
epsilon = 0.5 # threshold for estimating invariant space
model = InvariantsMiner(epsilon=epsilon)
model.fit(x_train)
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if 'pca' in selected_baselines:
print("="*20 + " Model: PCA " + "="*20)
model = PCA(n_components=0.95, threshold=50, c_alpha=3.2905)
model.fit(x_train)
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if 'iforest' in selected_baselines:
print("="*20 + " Model: IsolationForest " + "="*20)
model = IsolationForest(n_estimators=100, max_samples='auto', contamination='auto', random_state=88)
model.fit(x_train)
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if 'svm' in selected_baselines:
print("="*20 + " Model: SVM " + "="*20)
model = OneClassSVM()
model.fit(x_train, y_train)
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if 'logcluster' in selected_baselines:
print("="*20 + " Model: LogClustering " + "="*20)
max_dist = 0.3 # the threshold to stop the clustering process
anomaly_threshold = 0.3 # the threshold for anomaly detection
model = LogClustering(max_dist=max_dist, anomaly_threshold=anomaly_threshold)
model.fit(x_train[y_train == 0, :]) # Use only normal samples for training
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if 'lr' in selected_baselines:
print("="*20 + " Model: LR " + "="*20)
model = LR()
model.fit(x_train, y_train)
print('Train validation:')
precision, recall, f1 = model.evaluate(x_train, y_train)
print('Test validation:')
precision, recall, f1 = model.evaluate(x_test, y_test)
if __name__ == "__main__":
main()
| 2.4375 | 2 |
clevr/helper.py | Clevrai/Clevr | 0 | 12771122 | <gh_stars>0
import base64
import os
import json
from clevr.library_helpers import Library_Functions
class Helper():
def image_learner_object(directory_object):
if type(directory_object) != dict:
raise Exception('Parameter directory_object must be a dictionary.')
learner = {}
valid_file_types = ['.jpg', '.png', 'jpeg']
for label, directory in directory_object.items():
list_of_files = os.listdir(directory)
for files in list_of_files:
if files.endswith(files):
if label in learner:
learner[label].append(Library_Functions.reduce_and_prepare_image(directory + '/' + files))
else:
learner[label] = [Library_Functions.reduce_and_prepare_image(directory + '/' + files)]
return {'image': learner}
def sign_endpoint(signature=None):
if signature == None:
raise Exception('You must pass your account signature to successfully verify your endpoint.')
return json.dumps({'verified': True, 'signature': signature})
| 2.609375 | 3 |
src/p4rrot/known_types.py | gycsaba96/P4RROT | 0 | 12771123 | #
# Classes representing available types and their P4 equivalent.
#
from typing import Dict, List, Tuple
import ctypes
from functools import lru_cache
class KnownType:
"""
Base class of available types.
"""
pass
class uint8_t(KnownType):
def get_p4_type() -> str:
return 'bit<8>'
def get_size() -> int:
return 1
def to_p4_literal(v):
return str(uint8_t.cast_value(v))
def cast_value(v):
if type(v)==int:
return ctypes.c_uint8(v).value
raise Exception('Can not recignize value {}'.format(v))
class uint16_t(KnownType):
def get_p4_type() -> str:
return 'bit<16>'
def get_size() -> int:
return 2
def to_p4_literal(v):
return str(uint16_t.cast_value(v))
def cast_value(v):
if type(v)==int:
return ctypes.c_uint16(v).value
raise Exception('Can not recignize value {}'.format(v))
class uint32_t(KnownType):
def get_p4_type() -> str:
return 'bit<32>'
def get_size() -> int:
return 4
def to_p4_literal(v):
return str(uint32_t.cast_value(v))
def cast_value(v):
if type(v)==int:
return ctypes.c_uint32(v).value
raise Exception('Can not recignize value {}'.format(v))
class uint64_t(KnownType):
def get_p4_type() -> str:
return 'bit<64>'
def get_size() -> int:
return 8
def to_p4_literal(v):
return str(uint64_t.cast_value(v))
def cast_value(v):
if type(v)==int:
return ctypes.c_uint64(v).value
raise Exception('Can not recignize value {}'.format(v))
class bool_t(KnownType):
def get_p4_type() -> str:
return 'BOOL_T' # defined as bit<8>
def get_size() -> int:
return 1
def to_p4_literal(v):
return str(bool_t.cast_value(v))
def cast_value(v):
if v==True:
return 1
if v==False:
return 0
raise Exception('Can not recignize value {}'.format(v))
def padding_t(width:int):
if width<=0:
raise ValueError('width must be positive')
class hidden_padding_t(KnownType):
def get_p4_type() -> str:
return 'bit<{}>'.format(width*8)
def get_size() -> int:
return width
def to_p4_literal(v):
raise Exception('Padding variables can not be transleted to literals.')
def cast_value(v):
raise Exception('Padding variables do not have values.')
return hidden_padding_t
@lru_cache(maxsize=None)
def string_t(width:int):
if width<=0:
raise ValueError('width must be positive')
class hidden_string_t(KnownType):
def get_p4_type() -> str:
return 'bit<{}>'.format(width*8)
def get_size() -> int:
return width
def to_p4_literal(v: bytes):
return '0x'+v.hex()
def cast_value(v: bytes):
return v[-width:]
return hidden_string_t
def hdr_len(description: List[Tuple[str, KnownType]]):
'''
Returns the length of the described header in bytes. It does NOT consider alignments.
'''
return sum(map(lambda p: p[1].get_size(), description))
| 2.921875 | 3 |
vidyo/__init__.py | parafoxia/vidyo | 0 | 12771124 | <filename>vidyo/__init__.py
__productname__ = "vidyo"
__version__ = "0.1.0"
__description__ = "A simple way to get information on YouTube videos."
__url__ = "https://github.com/parafoxia/vidyo"
__docs__ = "https://vidyo.readthedocs.io/en/latest/"
__author__ = "<NAME>"
__license__ = "BSD-3-Clause"
__bugtracker__ = "https://github.com/parafoxia/vidyo/issues"
from .errors import *
from .client import Client
| 1.570313 | 2 |
gdrive_sync/tasks_test.py | mitodl/ocw-studio | 2 | 12771125 | """Tests for gdrive_sync tasks"""
from datetime import datetime
import pytest
import pytz
from gdrive_sync import tasks
from gdrive_sync.conftest import LIST_FILE_RESPONSES, LIST_VIDEO_RESPONSES
from gdrive_sync.constants import (
DRIVE_API_FILES,
DRIVE_FILE_FIELDS,
DRIVE_FOLDER_FILES_FINAL,
DRIVE_FOLDER_VIDEOS_FINAL,
)
from gdrive_sync.factories import DriveApiQueryTrackerFactory, DriveFileFactory
from gdrive_sync.models import DriveFile
from gdrive_sync.tasks import (
create_resource_from_gdrive,
import_recent_files,
import_website_files,
transcode_drive_file_video,
)
from websites.factories import WebsiteFactory
pytestmark = pytest.mark.django_db
@pytest.mark.parametrize("shared_id", [None, "testDrive"])
@pytest.mark.parametrize("drive_creds", [None, '{"key": "value"}'])
def test_stream_drive_file_to_s3(settings, mocker, shared_id, drive_creds):
""" File should be streamed only if required settings are present"""
settings.DRIVE_SHARED_ID = shared_id
settings.DRIVE_SERVICE_ACCOUNT_CREDS = drive_creds
mock_stream = mocker.patch("gdrive_sync.tasks.api.stream_to_s3")
drive_file = DriveFileFactory.create()
tasks.stream_drive_file_to_s3.delay(drive_file.file_id)
assert mock_stream.call_count == (1 if shared_id and drive_creds else 0)
@pytest.mark.parametrize("shared_id", [None, "testDrive"])
@pytest.mark.parametrize("drive_creds", [None, '{"key": "value"}'])
def test_create_gdrive_folders(settings, mocker, shared_id, drive_creds):
""" Folder should be created if settings are present"""
settings.DRIVE_SHARED_ID = shared_id
settings.DRIVE_SERVICE_ACCOUNT_CREDS = drive_creds
mock_create_folder = mocker.patch("gdrive_sync.tasks.api.create_gdrive_folders")
tasks.create_gdrive_folders.delay("test")
assert mock_create_folder.call_count == (1 if shared_id and drive_creds else 0)
def test_transcode_drive_file_video(mocker):
""" transcode_drive_file_video should create Video object and call create_media_convert_job"""
mock_transcode_call = mocker.patch("gdrive_sync.tasks.transcode_gdrive_video")
drive_file = DriveFileFactory.create()
transcode_drive_file_video.delay(drive_file.file_id)
mock_transcode_call.assert_called_once_with(drive_file)
# pylint:disable=too-many-arguments, too-many-locals
@pytest.mark.parametrize(
"arg_last_dt",
[None, datetime.strptime("2021-01-01", "%Y-%m-%d").replace(tzinfo=pytz.UTC)],
)
@pytest.mark.parametrize(
"tracker_last_dt",
[None, datetime.strptime("2021-02-02", "%Y-%m-%d").replace(tzinfo=pytz.UTC)],
)
@pytest.mark.parametrize(
"parent_folder,parent_folder_in_ancestors",
[(None, False), ("parent", True), ("parent", False)],
)
@pytest.mark.parametrize("same_checksum", [True, False])
def test_import_recent_files_videos(
settings,
mocker,
mocked_celery,
arg_last_dt,
tracker_last_dt,
parent_folder,
parent_folder_in_ancestors,
same_checksum,
):
"""import_recent_files should created expected video objects and call s3 tasks"""
mocker.patch("gdrive_sync.tasks.is_gdrive_enabled", return_value=True)
settings.DRIVE_SHARED_ID = "test_drive"
settings.DRIVE_UPLOADS_PARENT_FOLDER_ID = parent_folder
website = WebsiteFactory.create()
DriveFileFactory.create(
file_id=LIST_VIDEO_RESPONSES[1]["files"][0]["id"],
name=LIST_VIDEO_RESPONSES[1]["files"][0]["name"],
checksum=(
LIST_VIDEO_RESPONSES[1]["files"][0]["md5Checksum"]
if same_checksum is True
else "differentmd5"
),
)
parent_tree_responses = [
[
{
"id": LIST_VIDEO_RESPONSES[0]["files"][0]["parents"][0],
"name": website.short_id,
},
{"id": "abc123", "name": DRIVE_FOLDER_VIDEOS_FINAL},
],
[
{
"id": LIST_VIDEO_RESPONSES[0]["files"][1]["parents"][0],
"name": "no-matching-website",
},
{"id": "xyz987", "name": DRIVE_FOLDER_VIDEOS_FINAL},
],
[
{
"id": LIST_VIDEO_RESPONSES[0]["files"][0]["parents"][0],
"name": website.short_id,
},
{"id": "def456", "name": DRIVE_FOLDER_VIDEOS_FINAL},
],
[
{
"id": LIST_VIDEO_RESPONSES[0]["files"][1]["parents"][0],
"name": "no-matching-website",
},
{"id": "ghi789", "name": DRIVE_FOLDER_VIDEOS_FINAL},
],
]
if parent_folder_in_ancestors:
for response in parent_tree_responses:
response.append(
{
"id": "parent",
"name": "ancestor_exists",
}
)
mocker.patch("gdrive_sync.api.get_parent_tree", side_effect=parent_tree_responses)
mock_list_files = mocker.patch(
"gdrive_sync.tasks.query_files",
return_value=LIST_VIDEO_RESPONSES[0]["files"]
+ LIST_VIDEO_RESPONSES[1]["files"],
)
mock_upload_task = mocker.patch("gdrive_sync.tasks.stream_drive_file_to_s3.s")
mock_transcode_task = mocker.patch(
"gdrive_sync.tasks.transcode_drive_file_video.si"
)
mock_sync_content_task = mocker.patch("gdrive_sync.tasks.sync_website_content.si")
tracker = DriveApiQueryTrackerFactory.create(
api_call=DRIVE_API_FILES, last_dt=tracker_last_dt
)
if parent_folder_in_ancestors or parent_folder is None:
with pytest.raises(mocked_celery.replace_exception_class):
import_recent_files.delay(last_dt=arg_last_dt)
else:
import_recent_files.delay(last_dt=arg_last_dt)
last_dt = arg_last_dt or tracker_last_dt
last_dt_str = last_dt.strftime("%Y-%m-%dT%H:%M:%S.%f") if last_dt else None
base_query = "(not trashed and not mimeType = 'application/vnd.google-apps.folder')"
expected_query = (
f"{base_query} and (modifiedTime > '{last_dt_str}' or createdTime > '{last_dt_str}')"
if last_dt
else base_query
)
mock_list_files.assert_called_once_with(
query=expected_query, fields=DRIVE_FILE_FIELDS
)
tracker.refresh_from_db()
for i in range(2):
if (i == 1 and same_checksum) or (
parent_folder and not parent_folder_in_ancestors
): # chained tasks should not be run (wrong folder, or same checksum & name)
with pytest.raises(AssertionError):
mock_upload_task.assert_any_call(
LIST_VIDEO_RESPONSES[i]["files"][0]["id"]
)
with pytest.raises(AssertionError):
mock_transcode_task.assert_any_call(
LIST_VIDEO_RESPONSES[i]["files"][0]["id"]
)
else: # chained tasks should be run
mock_upload_task.assert_any_call(LIST_VIDEO_RESPONSES[i]["files"][0]["id"])
assert (
tracker.last_dt
== datetime.strptime(
LIST_VIDEO_RESPONSES[0]["files"][0]["modifiedTime"],
"%Y-%m-%dT%H:%M:%S.%fZ",
).replace(tzinfo=pytz.utc)
)
mock_transcode_task.assert_any_call(
LIST_VIDEO_RESPONSES[i]["files"][0]["id"]
)
mock_sync_content_task.assert_any_call(website.name)
if (
not parent_folder or parent_folder_in_ancestors
): # DriveFile should be created
assert DriveFile.objects.filter(
file_id=LIST_VIDEO_RESPONSES[i]["files"][0]["id"]
).exists()
assert (
DriveFile.objects.filter(
file_id=LIST_VIDEO_RESPONSES[i]["files"][1]["id"]
).exists()
is False
)
def test_import_recent_files_nonvideos(settings, mocker, mocked_celery):
"""
import_recent_files should import non-video files
"""
mocker.patch("gdrive_sync.tasks.is_gdrive_enabled", return_value=True)
settings.DRIVE_SHARED_ID = "test_drive"
settings.DRIVE_UPLOADS_PARENT_FOLDER_ID = "parent"
website = WebsiteFactory.create()
parent_tree_responses = [
[
{
"id": "parent",
"name": "ancestor_exists",
},
{
"id": LIST_FILE_RESPONSES[0]["files"][i]["parents"][0],
"name": website.short_id,
},
{"id": "abc123", "name": DRIVE_FOLDER_FILES_FINAL},
]
for i in range(2)
]
mocker.patch("gdrive_sync.api.get_parent_tree", side_effect=parent_tree_responses)
mocker.patch(
"gdrive_sync.tasks.query_files", return_value=LIST_FILE_RESPONSES[0]["files"]
)
mock_upload_task = mocker.patch("gdrive_sync.tasks.stream_drive_file_to_s3.s")
mock_resource_task = mocker.patch(
"gdrive_sync.tasks.create_resource_from_gdrive.si"
)
with pytest.raises(mocked_celery.replace_exception_class):
import_recent_files.delay(
last_dt=datetime.strptime("2021-01-01", "%Y-%m-%d").replace(
tzinfo=pytz.UTC
),
)
with pytest.raises(AssertionError):
mock_upload_task.assert_any_call(LIST_FILE_RESPONSES[1]["files"][0]["id"])
mock_upload_task.assert_any_call(
LIST_VIDEO_RESPONSES[0]["files"][0]["id"],
prefix=website.starter.config["root-url-path"],
)
mock_resource_task.assert_any_call(LIST_VIDEO_RESPONSES[0]["files"][0]["id"])
def test_create_resource_from_gdrive(mocker):
"""create_resource_from_gdrive should call create_gdrive_resource_content"""
mocker.patch(
"gdrive_sync.api.get_s3_content_type", return_value="application/ms-word"
)
mock_create_content = mocker.patch(
"gdrive_sync.tasks.create_gdrive_resource_content"
)
drive_file = DriveFileFactory.create()
create_resource_from_gdrive.delay(drive_file.file_id)
mock_create_content.assert_called_once_with(drive_file)
def test_import_website_files(mocker, mocked_celery):
"""import_website_files should run process_file_result for each drive file and trigger tasks"""
mocker.patch("gdrive_sync.tasks.is_gdrive_enabled", return_value=True)
website = WebsiteFactory.create()
drive_files = DriveFileFactory.create_batch(2, website=website)
mock_process_file_result = mocker.patch(
"gdrive_sync.tasks.process_file_result", side_effect=drive_files
)
mock_stream_task = mocker.patch("gdrive_sync.tasks.stream_drive_file_to_s3.s")
mock_create_resource = mocker.patch(
"gdrive_sync.tasks.create_resource_from_gdrive.si"
)
mock_sync_content = mocker.patch("gdrive_sync.tasks.sync_website_content.si")
mocker.patch(
"gdrive_sync.tasks.query_files",
side_effect=[
[
{
"id": "websiteFolderId",
"name": website.short_id,
},
],
[
{
"id": "websiteVideoFinalFolderId",
"name": DRIVE_FOLDER_VIDEOS_FINAL,
},
],
[
{
"id": "websiteFileFinalFolderId",
"name": DRIVE_FOLDER_FILES_FINAL,
},
],
],
)
mocker.patch(
"gdrive_sync.tasks.walk_gdrive_folder",
side_effect=[[], LIST_FILE_RESPONSES[0]["files"]],
)
with pytest.raises(mocked_celery.replace_exception_class):
import_website_files.delay(website.short_id)
assert mock_process_file_result.call_count == 2
for drive_file in drive_files:
mock_stream_task.assert_any_call(drive_file.file_id)
mock_create_resource.assert_any_call(drive_file.file_id)
mock_sync_content.assert_called_once_with(website.name)
def test_import_website_files_dupe_site_folders(mocker):
"""import_website_files should run process_file_result for each drive file and trigger tasks"""
mocker.patch("gdrive_sync.tasks.is_gdrive_enabled", return_value=True)
website = WebsiteFactory.create()
mocker.patch(
"gdrive_sync.tasks.query_files",
return_value=[
{
"id": "websiteFolderId",
"name": website.short_id,
},
{
"id": "websiteFolderId2",
"name": website.short_id,
},
],
)
with pytest.raises(Exception) as exc:
import_website_files.delay(website.short_id)
assert exc.value.args == (
"Expected 1 drive folder for %s but found %d",
website.short_id,
2,
)
def test_import_website_files_missing_folder(mocker):
"""import_website_files should run process_file_result for each drive file and trigger tasks"""
mocker.patch("gdrive_sync.tasks.is_gdrive_enabled", return_value=True)
website = WebsiteFactory.create()
mock_log = mocker.patch("gdrive_sync.tasks.log.error")
mocker.patch(
"gdrive_sync.tasks.query_files",
side_effect=[
[
{
"id": "websiteFolderId",
"name": website.short_id,
},
],
[],
[],
],
)
import_website_files.delay(website.short_id)
for folder in [DRIVE_FOLDER_VIDEOS_FINAL, DRIVE_FOLDER_FILES_FINAL]:
mock_log.assert_any_call(
"Expected 1 drive folder for %s/%s but found %d",
website.short_id,
folder,
0,
)
| 2.109375 | 2 |
sparse/repos/CINPLA/exdir/setup.py | yuvipanda/mybinder.org-analytics | 1 | 12771126 | # -*- coding: utf-8 -*-
from setuptools import setup
import os
from setuptools import setup, find_packages
import versioneer
long_description = open("README.md").read()
install_requires = []
setup(
name="exdir",
packages=find_packages(),
include_package_data=True,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
data_files=[
# like `jupyter nbextension install --sys-prefix`
("share/jupyter/nbextensions/exdir", [
"exdir/static/index.js",
]),
# like `jupyter nbextension enable --sys-prefix`
("etc/jupyter/nbconfig/notebook.d", [
"jupyter-config/nbconfig/notebook.d/exdir.json"
]),
# like `jupyter serverextension enable --sys-prefix`
("etc/jupyter/jupyter_notebook_config.d", [
"jupyter-config/jupyter_notebook_config.d/exdir.json"
])
],
zip_safe=False
)
| 1.132813 | 1 |
pytfa/optim/config.py | embt/pytfa | 30 | 12771127 | # -*- coding: utf-8 -*-
"""
.. module:: pytfa
:platform: Unix, Windows
:synopsis: Thermodynamics-based Flux Analysis
.. moduleauthor:: pyTFA team
Pre-tuned configurations for faster solving
"""
def dg_relax_config(model):
"""
:param model:
:return:
"""
# grbtune output on a hard model :
#
# Tested 6992 parameter sets in 46793.78s
#
# Baseline parameter set: mean runtime 142.09s
#
# Improved parameter set 1 (mean runtime 3.27s):
#
# NormAdjust 0
# BranchDir 1
# DegenMoves 0
# Heuristics 0
# MIPFocus 1
# Cuts 3
#
# Improved parameter set 2 (mean runtime 3.30s):
#
# NormAdjust 0
# BranchDir 1
# DegenMoves 0
# Heuristics 0.001
# PreSparsify 0
#
# Improved parameter set 3 (mean runtime 3.34s):
#
# NormAdjust 0
# BranchDir 1
# DegenMoves 0
# Heuristics 0.001
#
# Improved parameter set 4 (mean runtime 5.22s):
#
# NormAdjust 1
# BranchDir 1
# DegenMoves 0
#
# Improved parameter set 5 (mean runtime 7.18s):
#
# BranchDir 1
# DegenMoves 0
if model.solver.interface.__name__ == 'optlang.gurobi_interface':
model.solver.problem.Params.NormAdjust = 0
model.solver.problem.Params.BranchDir = 1
model.solver.problem.Params.DegenMoves = 0
model.solver.problem.Params.Heuristics = 0.001
model.solver.problem.Params.Cuts = 3
model.solver.problem.Params.Presolve = 2
model.solver.problem.Params.Method = 0
| 2.09375 | 2 |
vnpy/trader/app/cmaStrategy/__init__.py | ghjan/vnpy | 4 | 12771128 | # encoding: UTF-8
from vnpy.trader.app.cmaStrategy.cmaEngine import CmaEngine
from vnpy.trader.app.cmaStrategy.uiCmaWidget import CmaEngineManager
appName = 'CrossMarketArbitrage'
appDisplayName = u'跨市场套利'
appEngine = CmaEngine
appWidget = CmaEngineManager
appIco = 'cma.ico' | 1.171875 | 1 |
data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/count_syntactic_ngrams.py | Keesiu/meta-kaggle | 0 | 12771129 | <filename>data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/count_syntactic_ngrams.py<gh_stars>0
#!/usr/bin/env python
'''
Count frequency of each syntactic n-gram and save to Pickle file.
The n-gram frequencies are held in an in-memory dict, so this script
can only be used if the vocab is small (e.g. POS tags).
'''
import sys, argparse
from collections import defaultdict
from util import ngram_frequencies
from nltk.tree import Tree
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--lexical', action='store_true',
help='Count lexical as well as non-lexical productions')
return parser
if __name__ == "__main__":
args = opts().parse_args()
counts = defaultdict(lambda: 0)
for line in sys.stdin:
t = Tree.fromstring(line)
for p in t.productions():
if args.lexical or p.is_nonlexical():
counts[str(p)] += 1
for ngram, freq in counts.iteritems():
print '%s\t%s' % (ngram, freq) | 2.796875 | 3 |
test/__init__.py | KnowledgeCaptureAndDiscovery/sosen | 4 | 12771130 | <reponame>KnowledgeCaptureAndDiscovery/sosen
import unittest
from .test_data_to_graph import *
from .end_to_end import *
if __name__ == "__main__":
unittest.main() | 0.964844 | 1 |
cogs/mine.py | dashr9230/Discord-Bot | 1 | 12771131 |
from discord.ext import commands
from PIL import Image
import discord, datetime, re
import PIL, shutil, os, random
class MineBase:
def __init__(self, member):
self.grid = [[0 for x in range(12)] for y in range(12)]
self.grid_show = [[0 for x in range(12)] for y in range(12)]
self.file = f"cache/mine_{member.id}.png"
self.tile_size = 32
shutil.copy("assets/mine/board.png", self.file)
self.image = Image.open(self.file)
def __del__(self):
self.image.close()
try:
os.remove(self.file)
except:
pass
def generate_grid(self):
for x in range(11):
for y in range(11):
self.grid_show[x][y] = 10
n = random.randint(0,6)
self.grid[x][y] = 9 if n == 0 else 0
for x in range(11):
for y in range(11):
n = 0
if self.grid[x][y] == 9:
continue
if self.grid[x + 1][y] == 9:
n += 1
if self.grid[x][y + 1] == 9:
n += 1
if self.grid[x - 1][y] == 9:
n += 1
if self.grid[x][y - 1] == 9:
n += 1
if self.grid[x + 1][y + 1] == 9:
n += 1
if self.grid[x - 1][y - 1] == 9:
n += 1
if self.grid[x - 1][y + 1] == 9:
n += 1
if self.grid[x + 1][y - 1] == 9:
n += 1
self.grid[x][y] = n
def modify_board(self):
tiles = Image.open("assets/mine/tiles.png")
if not tiles:
return
w = self.tile_size
for x in range(11):
for y in range(11):
# left-top-right-bottom
#print(x, y, self.grid[x][y])
#print((self.grid[x][y] * w, 0, w, w))
tile = tiles.crop((self.grid_show[x][y]*w, 0, self.grid_show[x][y]*w+w, w))
self.image.paste(tile, (24+x*self.tile_size, 24+y*self.tile_size))
self.image.save(self.file)
def translate_move(self, move_code):
n = ''.join(c for c in move_code if c.isdigit()) or None
l = ''.join(c for c in move_code if c.isalpha()) or None
return n, l
class Mine(commands.Cog):
def __init__(self, bot):
print("I'M loaded")
@commands.command()
async def mine(self, context, *, move: str = ""):
test = MineBase(context.author)
test.generate_grid()
test.modify_board()
with open(test.file, "rb") as fp:
await context.send(file=discord.File(fp=fp))
#print(move)
#n, l = test.translate_move(move)
#await context.send(f'-> {n}, {l}')
del test
"""board = [[0 for x in range(12)] for y in range(12)]
#sboard = board
for x in range(11):
for y in range(11):
if random.randint(0,5) == 0:
board[x][y] = 9
else:
board[x][y] = 0
for x in range(11):
for y in range(11):
n = 0
if board[x][y] == 9:
continue
if board[x + 1][y] == 9:
n += 1
if board[x][y + 1] == 9:
n += 1
if board[x - 1][y] == 9:
n += 1
if board[x][y - 1] == 9:
n += 1
if board[x + 1][y + 1] == 9:
n += 1
if board[x - 1][y - 1] == 9:
n += 1
if board[x - 1][y + 1] == 9:
n += 1
if board[x + 1][y - 1] == 9:
n += 1
board[x][y] = n
output = "```"
for x in range(11):
for y in range(11):
output += f"{board[x][y]} "
output += "\n"
output += "```"
await context.send(output)"""
def setup(bot):
bot.add_cog(Mine(bot)) | 2.65625 | 3 |
src/pyshowdown/message.py | ScottehMax/pyshowdown | 0 | 12771132 | <gh_stars>0
import json
from typing import Optional, List, Dict
from pyshowdown.user import User
class Message:
def __init__(self, room: str, message_str: str):
"""Initialize a Message object.
Args:
room (str): The room the message was sent to.
message_str (str): The raw message.
"""
self.room = room
self.message_str = message_str
# optional attributes
self.username: Optional[str] = None
self.avatar: Optional[str] = None
self.users: Optional[Dict[str, User]] = None
self.parse_message()
def parse_message(self) -> None:
"""Parse the message and store the attributes."""
info = self.message_str.split("|")
if len(info) > 1:
self.type = info[1]
else:
self.type = None
# Room initialization
if self.type == "init":
self.roomtype = info[2]
elif self.type == "deinit":
pass
elif self.type == "title":
self.title = info[2]
elif self.type == "users":
users = info[2].split(",")
self.usercount = int(users.pop(0))
self.users = {}
for u in users:
rank = u[0]
u = u[1:]
if "@" in u:
name, status = u.split("@")
else:
name, status = u, ""
if status and status[0] == "!":
away = True
status = status[1:]
else:
away = False
user_obj = User(name, rank, status, away)
self.users[user_obj.id] = user_obj
# Room messages
elif self.type == "html":
self.html = info[2]
elif self.type == "uhtml":
self.name = info[2]
self.html = info[3]
elif self.type == "uhtmlchange":
self.name = info[2]
self.html = info[3]
elif self.type in ["j", "J", "join"]:
self.type = "join"
self.rank = info[2][0]
self.user = info[2][1:]
elif self.type in ["l", "L", "leave"]:
self.type = "leave"
self.rank = info[2][0]
self.user = info[2][1:]
elif self.type in ["n", "N", "name"]:
self.type = "name"
self.user = info[2]
self.oldid = info[3]
elif self.type in ["c", "chat"]:
self.type = "chat"
self.timestamp = None
self.user = info[2]
self.message = info[3]
elif self.type == ":":
self.type = "timestamp"
self.timestamp = int(info[2])
elif self.type == "c:":
self.type = "chat"
self.timestamp = int(info[2])
self.user = info[3]
self.message = info[4]
elif self.type == "battle":
self.roomid = info[2]
self.user1 = info[3]
self.user2 = info[4]
# Global messages
elif self.type == "popup":
self.message = info[2]
elif self.type == "pm":
self.sender = info[2]
self.receiver = info[3]
self.message = info[4]
elif self.type == "usercount":
self.usercount = int(info[2])
elif self.type == "nametaken":
self.username = info[2]
self.message = info[3]
elif self.type == "challstr":
self.challstr = "|".join(info[2:])
elif self.type == "updateuser":
self.user = info[2]
self.named = True if info[3] == "1" else False
self.avatar = info[4]
self.settings = json.loads(info[5])
elif self.type == "formats":
self.formats = parse_formats("|".join(info[2:]))
elif self.type == "updatesearch":
self.json = json.loads("|".join(info[2:]))
elif self.type == "updatechallenges":
self.json = json.loads("|".join(info[2:]))
elif self.type == "queryresponse":
self.query_type = info[2]
json_str = "|".join(info[3:])
self.json_data = json.loads(json_str)
if self.query_type == "savereplay":
# override the relevant room, since queryresponse messages
# are sent as global messages
self.room = self.json_data["id"]
self.password = self.json_data.get("password", "")
elif self.type == "raw":
self.data = info[2]
# Battle messages
elif self.type == "win":
self.winner = info[2]
elif self.type == "player":
self.player = info[2] if len(info) > 2 else None
self.username = info[3] if len(info) > 3 else None
self.avatar = info[4] if len(info) > 4 else None
self.rating = int(info[5]) if len(info) > 5 and info[5] else None
else:
# TODO: Handle other message types
pass
def __str__(self) -> str:
"""Return a string representation of the message.
Returns:
str: A string representation of the message.
"""
return f"<Message: {self.message_str}>"
def __repr__(self) -> str:
"""Return a representation of the message.
Returns:
str: A representation of the message.
"""
return self.__str__()
section = Dict[str, List[str]]
formats = Dict[str, section]
def parse_formats(format_str: str) -> formats:
"""Parse a format message and return a list of formats.
Args:
format_str (str): The format message.
Returns:
formats: A dictionary of sections, formats, and some additional
info about the formats.
"""
results: formats = {}
split_str = format_str.split("|")
in_section = False
section_name = ""
for item in split_str:
if not item:
continue
if in_section:
section_name = item
results[section_name] = {}
in_section = False
elif item[0] == ",":
if item[1:] == "LL":
# this is being run locally, ignore it
continue
# this is a section
in_section = True
section_number = int(item[1:]) # unused here for now
else:
# this is a format
name, rule_num_str = item.split(",")
rule_num = int(rule_num_str, 16)
rules = []
if rule_num & 1:
rules.append("Requires Team")
if rule_num & 2:
rules.append("Available for Search")
if rule_num & 4:
rules.append("Available for Challenge")
if rule_num & 8:
rules.append("Available for Tournaments")
if rule_num & 16:
rules.append("Level 50")
results[section_name][name] = rules
return results
| 2.90625 | 3 |
tests/test_kubernetes.py | riksu-raksu/k-lga | 0 | 12771133 | <reponame>riksu-raksu/k-lga
import base64
import os
import tempfile
from pathlib import Path
import pytest
from kubernetes.client.rest import ApiException
from kolga.libs.kubernetes import Kubernetes
from kolga.libs.project import Project
from kolga.utils.general import get_deploy_name
from kolga.utils.models import BasicAuthUser
DEFAULT_TRACK = os.environ.get("DEFAULT_TRACK", "stable")
K8S_NAMESPACE = os.environ.get("K8S_NAMESPACE", "testing")
@pytest.mark.parametrize( # type: ignore
"value, expected", [("400", True), ("500", False), ("300", False), ("200", False)]
)
def test__is_client_error(value: str, expected: bool) -> None:
assert Kubernetes._is_client_error(value) == expected
def test_labels_to_string() -> None:
test_labels = {"app": "testapp", "release": "testrelease", "lizard": "-1"}
expected_string = "app=testapp,release=testrelease,lizard=-1"
assert Kubernetes.labels_to_string(labels=test_labels) == expected_string
@pytest.mark.parametrize( # type: ignore
"error, raises_exception",
[(ApiException(), True), (ApiException(status="403"), False)],
)
def test__handle_api_error(error: ApiException, raises_exception: bool) -> None:
if raises_exception:
with pytest.raises(ApiException):
Kubernetes._handle_api_error(error)
else:
Kubernetes._handle_api_error(error)
def test__encode_secret() -> None:
test_data = {"password": "<PASSWORD>", "username": "user"}
expected_data = {"password": "<PASSWORD>==", "username": "dXNlcg=="}
assert Kubernetes._encode_secret(test_data) == expected_data
def test__b64_encode_file() -> None:
content = "test:$apr1$35522gYe$r3E.NGo0m0bbOXppHr3g0."
expected = "<KEY>"
with tempfile.NamedTemporaryFile() as f:
encoded_string = str.encode(content, encoding="UTF-8")
f.write(encoded_string)
f.seek(0)
path = Path(f.name)
assert Kubernetes._b64_encode_file(path=path) == expected
def test__create_basic_auth_data(kubernetes: Kubernetes) -> None:
basic_auth_users = [
BasicAuthUser(username="test", password="<PASSWORD>"),
BasicAuthUser(username="user", password="<PASSWORD>"),
]
data = kubernetes._create_basic_auth_data(basic_auth_users=basic_auth_users)
auth_data = data["auth"]
decoded_data = base64.b64decode(auth_data).decode("UTF-8")
user_split = decoded_data.split("\n")[:-1]
for i, user in enumerate(user_split):
username, password = user.split(":")
assert password
assert username == basic_auth_users[i].username
# =====================================================
# KUBERNETES CLUSTER REQUIRED FROM THIS POINT FORWARD
# =====================================================
def test_create_client(kubernetes: Kubernetes) -> None:
assert kubernetes
def test_create_namespace_env(kubernetes: Kubernetes, test_namespace: str) -> None:
assert test_namespace == K8S_NAMESPACE
def test_create_namespace_named(kubernetes: Kubernetes) -> None:
namespace = "testing-2"
assert kubernetes.create_namespace(namespace) == namespace
kubernetes.delete_namespace(namespace)
def test_create_secret_stable(kubernetes: Kubernetes, test_namespace: str) -> None:
track = DEFAULT_TRACK
data = {"test_secret": "1234"}
project = Project(track=track)
kubernetes.create_secret(
data=data,
namespace=K8S_NAMESPACE,
track=track,
project=project,
secret_name=project.secret_name,
)
kubernetes.get(resource="secret", namespace=K8S_NAMESPACE, name=project.secret_name)
def test_create_secret_qa(kubernetes: Kubernetes, test_namespace: str) -> None:
track = "qa"
project = Project(track=track)
kubernetes.create_secret(
data={"test_secret": "1234"},
namespace=K8S_NAMESPACE,
track=track,
project=project,
secret_name=project.secret_name,
)
kubernetes.get(resource="secret", namespace=K8S_NAMESPACE, name=project.secret_name)
def test_delete_namespace(kubernetes: Kubernetes, test_namespace: str) -> None:
kubernetes.delete(
resource="namespace", name=test_namespace, namespace=K8S_NAMESPACE
)
with pytest.raises(Exception):
kubernetes.get(resource="namespace", name=test_namespace)
def test_delete_all(kubernetes: Kubernetes, test_namespace: str) -> None:
track = DEFAULT_TRACK
deploy_name = get_deploy_name(track=track)
project = Project(track=track)
kubernetes.create_secret(
data={"test": "test"},
namespace=test_namespace,
track=track,
project=project,
secret_name=project.secret_name,
)
kubernetes.delete_all(namespace=test_namespace, labels={"release": deploy_name})
with pytest.raises(Exception):
kubernetes.get(resource="secret", name=test_namespace)
| 2.09375 | 2 |
tempus/timetracking/models.py | rixx/tempus | 3 | 12771134 | <reponame>rixx/tempus
import datetime
import random
import os
import yaml
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.template.defaultfilters import slugify
class BaseTimetracking(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=40)
slug_filter = {}
class Meta:
abstract = True
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.slug = self.slugify()
super().save(*args, **kwargs)
def slugify(self, slug_field='slug'):
max_length = self.__class__._meta.get_field(slug_field).max_length
long_slug = slugify(self.name)
slug = slugify(self.name)[:max_length]
tries = 0
owner_filter = {}
if hasattr(self, 'owner'):
owner_filter['owner'] = self.owner
while self.__class__.objects.filter(slug=slug, **owner_filter).exists():
tries += 1
ending = '-{}'.format(tries)
slug = '{}{}'.format(long_slug[:max_length-len(ending)], ending)
return slug
class Category(BaseTimetracking):
owner = models.ForeignKey(User)
icon = models.CharField(max_length=60)
class Meta:
unique_together = (('owner', 'name'), )
def save(self, *args, **kwargs):
if not self.pk:
self.icon = get_fontawesome_icon(self.name, default=None)
super().save(*args, **kwargs)
class Tag(BaseTimetracking):
owner = models.ForeignKey(User)
class Project(BaseTimetracking):
category = models.ForeignKey(Category)
tags = models.ManyToManyField(Tag, blank=True)
class Meta:
unique_together = (('slug', 'category'),)
def __str__(self):
return '{} (Category {})'.format(self.name, self.category)
def total_time(self):
if self.entry_set.count() > 0:
entries = [e.duration() for e in self.entry_set.all()]
entries = [e for e in entries if isinstance(e, datetime.timedelta)]
entries = sum(entries, datetime.timedelta())
entries = entries - datetime.timedelta(microseconds=entries.microseconds)
return entries
else:
return datetime.timedelta(0)
def delta_to_last_edit(self):
if self.entry_set.count() > 0:
latest_entry = self.entry_set.latest('end_time')
delta = datetime.datetime.now(pytz.timezone('Europe/Berlin')) - latest_entry.end_time
return delta - datetime.timedelta(microseconds=delta.microseconds)
else:
return datetime.timedelta(0)
class Entry(models.Model):
project = models.ForeignKey(Project)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
def __str__(self):
ago = datetime.datetime.now(pytz.timezone('Europe/Berlin')) - self.end_time
return "Entry of Project {}, {} ago.".format(
self.project.name,
ago - datetime.timedelta(microseconds=ago.microseconds)
)
def duration(self):
duration = self.end_time - self.start_time
return duration - datetime.timedelta(microseconds=duration.microseconds)
def get_fontawesome_icon(name, default=None):
def has_list(obj, name):
return (name in obj.keys()) and isinstance(obj[name], list)
with open(os.path.join(settings.PROJECT_PATH, 'timetracking/icons.yml'), 'r') as stream:
icons = yaml.load(stream)['icons']
name = name.lower()
category_match = []
name_partial_match = []
for icon in icons:
icon_name = icon['name'].lower()
# first off, return direct matches
if icon_name == name:
return icon['id']
if has_list(icon, 'aliases') and name in icon['aliases']:
return icon['id']
# next, collect category matches to choose randomly from
if has_list(icon, 'filter') and name in icon['filter']:
category_match.append(icon['id'])
# lastly, collect partial matches to choose randomly from
if name in icon_name or icon_name in name:
name_partial_match.append(icon['id'])
if has_list(icon, 'aliases'):
name_partial_match += [alias for alias in icon['aliases'] if (name in alias or alias in name)]
if category_match:
return random.choice(category_match)
if name_partial_match:
return random.choice(name_partial_match)
if not default:
return random.choice(icons)['id']
return default
| 1.976563 | 2 |
setup.py | Jimdo/nagios2trac | 0 | 12771135 | #!/usr/bin/env python
from setuptools import Command, setup
import sys
class PyPandoc(Command):
description = 'Generates the documentation in reStructuredText format.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def convert(self, infile, outfile):
import pypandoc
with open(outfile, 'w+') as f:
f.write(pypandoc.convert(infile, 'rst'))
def run(self):
self.convert('README.md', 'rst/README.rst')
self.convert('CHANGELOG.md', 'rst/CHANGELOG.rst')
setup(name='nagios2trac',
version='0.5.1',
description='Let Nagios Create or Comment on Trac Tickets',
long_description=open('rst/README.rst').read() + '\n\n' +
open('rst/CHANGELOG.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Jimdo/nagios2trac',
license='Apache',
scripts=['nagios2trac.py'],
cmdclass={'doc': PyPandoc},
include_package_data=True,
)
| 2.015625 | 2 |
synthqc/errors.py | jcreinhold/synthqc | 0 | 12771136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
synthqc.errors
This module holds project defined errors
Author: <NAME> (<EMAIL>)
Created on: Nov 15, 2018
"""
class SynthQCError(Exception):
pass
| 1.273438 | 1 |
test/test_wordle_core.py | tanayseven/wordle-solver-bot | 1 | 12771137 | import pytest
from core import WordsRepository
@pytest.mark.parametrize(
"input_words,forget_letters,remaining_words",
[
("urger,ribat,anorn,stram,sofar", "ugo", "ribat,stram")
]
)
def test_forget_letters(input_words, forget_letters, remaining_words):
input_words = tuple(input_words.split(','))
remaining_words = remaining_words.split(',')
word_solver = WordsRepository(input_words)
new_word_solver = word_solver.forget(forget_letters)
assert set(new_word_solver.remaining_words) == set(remaining_words)
@pytest.mark.parametrize(
"input_words",
[
"urger,ribat,anorn,stram,sofar"
]
)
def test_forget_letters_given_no_letters(input_words):
input_words = tuple(input_words.split(','))
word_solver = WordsRepository(input_words)
new_word_solver = word_solver.forget("")
assert set(new_word_solver.remaining_words) == set(input_words)
@pytest.mark.parametrize(
"input_words,remember_positions,remaining_words",
[
("uhuru,morro,frory,fjord", ([3], [], [0], [2]), "fjord,frory")
]
)
def test_remember_letter(input_words, remember_positions, remaining_words):
input_words = tuple(input_words.split(','))
word_solver = WordsRepository(input_words)
remaining_words = remaining_words.split(",")
for word, positions in zip(input_words, remember_positions):
word_solver = word_solver.remember_at(current_word=word, at_positions=positions)
assert set(word_solver.remaining_words) == set(remaining_words)
@pytest.mark.parametrize(
"input_words,not_at_position,remaining_words",
[
("saury,kebab,tunic,hepar,quiff", ([2], [], [3], [], []), "quiff")
]
)
def test_remember_letter_better(input_words, not_at_position, remaining_words):
input_words = tuple(input_words.split(','))
word_solver = WordsRepository(input_words)
remaining_words = remaining_words.split(",")
for current_word, position in zip(input_words, not_at_position):
word_solver = word_solver.remember_not_at(current_word, position)
assert set(word_solver.remaining_words) == set(remaining_words)
# fmt: on
@pytest.mark.parametrize(
"input_words,not_at_positions,at_positions,to_forget,remaining_words",
[
("leese,tunic,benab", ([], []), ([1], []), ("e", "i"), "leese,benab")
]
)
# fmt: off
def test_already_remembered_letters_cant_be_forgotten(
input_words,
not_at_positions,
at_positions,
to_forget,
remaining_words,
):
input_words = tuple(input_words.split(","))
word_solver = WordsRepository(input_words)
remaining_words = tuple(remaining_words.split(","))
new_word_solver = word_solver
for word, not_at, at, forget in zip(input_words, not_at_positions, at_positions, to_forget):
new_word_solver = word_solver.remember_not_at(word, not_at)
new_word_solver = new_word_solver.remember_at(word, at)
new_word_solver = new_word_solver.forget(grey_letters=forget)
assert set(remaining_words) == set(new_word_solver.remaining_words)
@pytest.mark.parametrize(
"input_words,forget_words,remaining_words",
[
("saury,kebab,tunic,hepar,quiff", "kebab,hepar,quiff", "saury,tunic")
]
)
def test_forget_word(input_words, forget_words, remaining_words):
input_words = tuple(input_words.split(","))
forget_words = tuple(forget_words.split(","))
remaining_words = tuple(remaining_words.split(","))
word_solver = WordsRepository(input_words)
new_word_solver = word_solver
for word in forget_words:
new_word_solver = new_word_solver.forget_word(word)
assert set(remaining_words) == set(new_word_solver.remaining_words)
| 2.734375 | 3 |
Model/Models.py | BluePinetree/KaKR_3rd_month-Car_class_classification | 0 | 12771138 | import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
from keras.applications.xception import Xception
# from efficientnet.keras import EfficientNetB3
from keras_preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Conv2D, Dense, Flatten, GlobalAveragePooling2D, GlobalMaxPool2D, Dropout, MaxPooling2D
def build_model(model_name=None, include_top=False, input_shape=(256,256,3), fine_tuning=True, layer_to_freeze=None, load_pretrained : str = None, summary=False):
pmodel_name = model_name.strip().lower()
print(pmodel_name)
if pmodel_name == 'resnet50': base_model = ResNet50(include_top=include_top, input_shape=input_shape)
elif pmodel_name == 'inception_v3' : base_model = InceptionV3(include_top=include_top, input_shape=input_shape)
elif pmodel_name == 'xception' : base_model = Xception(include_top=include_top, input_shape=input_shape)
# elif pmodel_name == 'efficient_net' : base_model = EfficientNetB3(include_top=include_top, input_shape=input_shape)
else : raise ValueError
if fine_tuning:
# Freese layers
assert layer_to_freeze != None, 'You must define layer\'s name to freese.'
fr_layer_name = layer_to_freeze
set_trainable = False
for layer in base_model.layers:
if not layer.name == fr_layer_name:
set_trainable = True
layer.trainable = set_trainable
# change last layers
last_1dconv_1 = Conv2D(1024, 1, activation='relu', kernel_initializer='he_normal')(base_model.output)
last_pool_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(last_1dconv_1)
global_avg_pool = GlobalAveragePooling2D()(last_pool_1)
last_Dense_1 = Dense(512, activation='relu', kernel_initializer='he_normal')(global_avg_pool)
dropout_1 = Dropout(rate=0.5)(last_Dense_1)
last_Dense_2 = Dense(196, activation='softmax')(dropout_1)
# compile
model = Model(base_model.input, last_Dense_2)
# summary
if summary:
model.summary()
# load pretrained weights
if load_pretrained:
model.load_weights(load_pretrained)
return model
| 2.578125 | 3 |
Python/API Sellsy/__version__.py | Cantin-L/Professionelle | 1 | 12771139 | __title__ = 'Sellsy'
__description__ = 'https://api.sellsy.fr/documentation/methodes'
__version__ = '0.0.4'
__author__ = '<NAME>.'
print("API de ", __title__ ," lien de la documentation : ", __description__ ," c'est la version ", __version__ ," développer par ", __author__)
| 1.179688 | 1 |
readme/forms.py | audax/pypo | 25 | 12771140 | <filename>readme/forms.py
from crispy_forms import layout
from django import forms
from django.conf import settings
from django.utils.encoding import force_text
from haystack.forms import FacetedSearchForm
import six
from taggit.forms import TagWidget
from .models import Item, UserProfile
from crispy_forms.helper import FormHelper
def edit_string_for_tags(tags):
return ', '.join(sorted(tag.name for tag in tags))
def parse_tags(tagstring):
if not tagstring:
return []
tagstring = force_text(tagstring)
words = [word.strip() for word in tagstring.split(',')]
return sorted(set(words))
class QuotelessTagWidget(TagWidget):
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, six.string_types):
value = edit_string_for_tags([o.tag for o in value.select_related("tag")])
return super(QuotelessTagWidget, self).render(name, value, attrs)
class TagField(forms.CharField):
widget = TagWidget
def clean(self, value):
value = super(TagField, self).clean(value)
try:
return parse_tags(value)
except ValueError:
raise forms.ValidationError("Please provide a comma-separated list of tags.")
class CreateItemForm(forms.ModelForm):
tags = TagField(required=False)
def __init__(self, *args, **kwargs):
h = FormHelper()
h.form_id = 'create-item-form'
h.form_method = 'post'
h.form_class = 'form-horizontal'
h.label_class = 'col-lg-2'
h.field_class = 'col-lg-8'
h.help_text_inline = True
h.error_text_inline = True
h.html5_required = True
h.add_input(layout.Submit('submit', 'Submit'))
self.helper = h
super(CreateItemForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data['tags']:
cleaned_data['tags'] = [tag[:99] for tag in cleaned_data['tags']]
return cleaned_data
class Meta:
model = Item
fields = ('url', 'tags',)
widgets = {
'tags': QuotelessTagWidget()
}
class UpdateItemForm(CreateItemForm):
class Meta:
model = Item
fields = ('tags',)
widgets = {
'tags': QuotelessTagWidget()
}
class UserProfileForm(forms.ModelForm):
theme = forms.ChoiceField(
label='Theme',
help_text='Choose a color theme',
choices=settings.PYPO_THEMES)
items_per_page = forms.IntegerField(
label='Page size',
help_text='Select how many items should be shown on a page',
min_value=1,
max_value=100)
excluded_tags = TagField(
label='Excluded tags',
help_text='Items with these tags will not be shown',
required=False)
def __init__(self, *args, **kwargs):
h = FormHelper()
h.form_id = 'user-profile-form'
h.form_class = 'form-horizontal'
h.label_class = 'col-lg-2'
h.field_class = 'col-lg-4'
h.layout = layout.Layout(
'theme',
'new_window',
'items_per_page',
'excluded_tags',
'show_excluded',
layout.Div(
layout.Div(
layout.Submit('Save', value='Save', css_class='btn-default'),
css_class='col-lg-offset-2 col-lg-4'
),
css_class='form-group',
)
)
h.help_text_inline = True
h.error_text_inline = True
h.html5_required = True
self.helper = h
super(UserProfileForm, self).__init__(*args, **kwargs)
class Meta:
model = UserProfile
fields = ['theme', 'new_window', 'items_per_page', 'excluded_tags', 'show_excluded']
class SearchForm(FacetedSearchForm):
sort = forms.Select()
def __init__(self, *args, **kwargs):
h = FormHelper()
h.form_id = 'item-search-form'
h.form_method = 'GET'
h.form_class = 'form-horizontal'
h.label_class = 'col-lg-2'
h.field_class = 'col-lg-8'
h.layout = layout.Layout(
'q',
layout.Div(
layout.Div(
layout.Button('Search', value='Search', css_class='btn-primary'),
layout.Div(
layout.HTML('<button id="id_oldest" class="btn btn-default" name="sort"'
' value="oldest" type="submit">'
'<i class="fa fa-sort-asc"></i> Oldest first'
'</button>'),
layout.HTML('<button id="id_newest" class="btn btn-default" name="sort"'
' value="newest" type="submit">'
'<i class="fa fa-sort-desc"></i> Newest first'
'</button>'),
css_class="pull-right"),
css_class='col-lg-offset-2 col-lg-8'
),
css_class='form-group',
)
)
h.help_text_inline = True
h.error_text_inline = True
h.html5_required = True
self.helper = h
super(SearchForm, self).__init__(*args, **kwargs)
| 2.28125 | 2 |
language-learning/python/tricks/equal_vs_is.py | imteekay/programming-language-research | 24 | 12771141 | a = [1, 2, 3]
b = a
print(a == b)
# => True
print(a is b)
# => True
c = list(a)
# == evaluates to true if the objects referred by the variables are equal
print(a == c)
# => True
# is evaluates to true if both variables point to the same object
print(a is c)
# => false
| 3.953125 | 4 |
dnnv/nn/transformers/simplifiers/convert_batch_norm.py | samysweb/dnnv | 5 | 12771142 | import numpy as np
from copy import copy
from .base import Simplifier
from ... import operations
from ...analyzers import SplitAnalysis
class ConvertBatchNorm(Simplifier):
ANALYSES = {"is_split": SplitAnalysis}
def visit_BatchNormalization(self, operation: operations.BatchNormalization):
input_op = operation.x
if (
isinstance(input_op, operations.Conv)
and not self.analysis["is_split"][input_op]
):
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.scale * operation.mean / std
weights = input_op.w
a_w = a[:, None, None, None]
weights = a_w * weights
bias = input_op.b
if bias is None:
bias = np.zeros(weights.shape[0], dtype=weights.dtype)
bias = a * bias + b
new_operation = copy(input_op)
new_operation.w = weights
new_operation.b = bias
return new_operation
elif (
isinstance(input_op, operations.Gemm)
and not self.analysis["is_split"][input_op]
):
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.mean * a
return operations.Gemm(input_op, np.diag(a), b)
elif isinstance(input_op, operations.Input):
input_shape = input_op.shape
input_dtype = input_op.dtype
if len(input_shape) == 2:
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.mean * a
return operations.Gemm(input_op, np.diag(a), b)
elif len(input_shape) == 4:
c = operation.mean.shape[0]
std = np.sqrt(operation.variance + operation.epsilon)
k = np.zeros(
(c, c, 1, 1), dtype=input_dtype
) # identity kernel (H, W, inC, outC)
for i in range(c):
k[i, i, 0, 0] = 1
W = k * operation.scale / std
b = operation.bias - operation.scale * operation.mean / std
op = operations.Conv(input_op, W, b)
return op
# TODO : in what other scenarios can BatchNorm be converted?
return operation
| 2.453125 | 2 |
research/cv/NFNet/src/models/NFNet/classifier.py | mindspore-ai/models | 77 | 12771143 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Classifier head and layer factory
Hacked together by / Copyright 2020 <NAME>
"""
from mindspore import nn
from mindspore import ops
def create_pool(pool_type='avg'):
assert pool_type in ["avg", "max"]
if pool_type == 'avg':
global_pool = ops.ReduceMean(keep_dims=False)
elif pool_type == 'max':
global_pool = ops.ReduceMax(keep_dims=False)
return global_pool
class ClassifierHead(nn.Cell):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0.):
super(ClassifierHead, self).__init__()
self.drop_rate = drop_rate
self.global_pool = create_pool(pool_type)
self.drop_out = nn.Dropout(keep_prob=1 - self.drop_rate) if self.drop_rate > 0. else ops.Identity()
self.fc = nn.Dense(in_chs, num_classes)
def construct(self, x):
x = self.global_pool(x, (2, 3))
x = self.drop_out(x)
x = self.fc(x)
return x
| 2.25 | 2 |
config.py | gokul-uf/tf-BEGAN | 0 | 12771144 | import tensorflow as tf
class Config:
batch_size = 16
n = 13 # TODO: change
embedding_dim = 64
z_low_limit = -1
z_high_limit = 1
img_width = 64
img_height = 64
num_channel = 3
lambda_k = 0.001
gamma = 0.5
k_0 = 0
lr = 5e-5
sample_location = "../samples"
num_samples = 25
num_rows = 5 # for the tiling in sampling
num_cols = 5
num_epoch = 1000
sample_epoch = 5 # generate samples every 5 epochs
dataset_size = 202599 #CelebA dataset size
data_location = "../data/img_align_celeba/"
| 2.21875 | 2 |
SIDRE/core.py | joshwalawender/SIDRE | 1 | 12771145 | <reponame>joshwalawender/SIDRE<filename>SIDRE/core.py<gh_stars>1-10
import os
import sys
import re
import tempfile
import logging
from glob import glob
from datetime import datetime as dt
if sys.version_info.major == 2:
import subprocess32 as subprocess
elif sys.version_info.major == 3:
import subprocess
import astropy.units as u
import astropy.coordinates as c
from astropy.time import Time
from astropy.io import fits
from astropy import wcs
from astropy.table import Table, Column
import ccdproc
import sep
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
from .config import get_config
from .calibration import *
from .utils import *
def preprocess_header(file):
print('Fixing header')
with fits.open(file, 'update', verify=True) as hdul:
updated = False
# Convert RADECSYS to RADESYSa
if not 'RADESYSa' in hdul[0].header.keys()\
and 'RADECSYS' in hdul[0].header.keys():
hdul[0].header.set('RADESYSa', hdul[0].header['RADECSYS'])
updated = True
# Add BUNIT
if not 'BUNIT' in hdul[0].header.keys():
hdul[0].header.set('BUNIT', 'adu')
updated = True
# If BUNIT is ADU convert to adu
elif hdul[0].header['BUNIT'] == 'ADU':
hdul[0].header.set('BUNIT', 'adu')
updated = True
if updated:
hdul.flush()
class ScienceImage(object):
'''
'''
def __init__(self, file, preprocess_header=preprocess_header,
logfile=None, verbose=False, unit='adu'):
self.log = None
assert os.path.exists(file)
self.file = file
self.filename = os.path.basename(file)
# Do splitext twice to hanfle .fits.fz extensions
self.fileroot = os.path.splitext(os.path.splitext(self.filename)[0])[0]
# Initialize metadata for analysis
start_DR = dt.utcnow()
self.metadata = fits.Header()
self.metadata.set('DRSTART', value=start_DR.isoformat(),
comment='UT time of start of analysis')
# Preprocess header if needed
if preprocess_header:
preprocess_header(self.file)
# Read FITS file
try:
self.ccd = ccdproc.fits_ccddata_reader(file, verify=True)
except ValueError:
self.ccd = ccdproc.fits_ccddata_reader(file, verify=True, unit=unit)
self.config = get_config()
self.datekw = self.config.get('DATE-OBS', 'DATE-OBS')
self.datefmt = self.config.get('DATEFMT', '%Y-%m-%dT%H:%M:%S')
self.obstime = None
self.date = self.get_date()
self.add_logger(verbose=verbose)
self.log.info('Opened File: {}'.format(self.filename))
self.header_pointing = None
self.wcs_pointing = None
self.altaz = None
try:
lat = c.Latitude(self.config.get('Latitude') * u.degree)
lon = c.Longitude(self.config.get('Longitude') * u.degree)
height = self.config.get('Elevation') * u.meter
self.loc = c.EarthLocation(lon, lat, height)
self.altazframe = c.AltAz(location=self.loc, obstime=self.obstime,
temperature=self.config.get('Temperature')*u.Celsius,
pressure=self.config.get('Pressure')/1000.*u.bar)
# self.moon = c.get_moon(self.obstime, location=self.loc)
except:
self.loc = None
self.altazframe = None
self.moon = None
self.back = None
self.UCAC4 = None
self.extracted = None
self.assoc = None
self.gain = ccdproc.Keyword('GAIN', unit=u.electron / u.adu)
try:
self.gain.value_from(self.ccd.header)
except KeyError:
self.gain.value = self.config.get('Gain')
self.exptime = ccdproc.Keyword(self.config.get('EXPTIME', 'EXPTIME'),
unit=u.second)
self.exptime.value_from(self.ccd.header)
self.readnoise = ccdproc.Keyword('RDNOISE', unit=u.electron)
try:
self.readnoise.value_from(self.ccd.header)
except KeyError:
self.readnoise.value = self.config.get('RN')
def __del__(self):
if self.log:
self.log.info('Done.')
def add_logger(self, logfile=None, verbose=False):
'''Create a logger object to use with this image. The logger object
will be available as self.log
Parameters
----------
logfile : file to write log to
verbose : Defaults to False. If verbose is true, it sets the logging
level to DEBUG (otherwise level is INFO).
'''
self.log = logging.getLogger(self.filename.replace('.', '_'))
if len(self.log.handlers) == 0:
self.log.setLevel(logging.DEBUG)
LogFormat = logging.Formatter('%(asctime)s %(levelname)8s: %(message)s',
datefmt='%Y%m%d %H:%M:%S')
## Log to a file
if logfile:
self.logfile = logfile
self.logfilename = os.path.split(self.logfile)[1]
if os.path.exists(logfile): os.remove(logfile)
LogFileHandler = logging.FileHandler(logfile)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
self.log.addHandler(LogFileHandler)
## Log to console
LogConsoleHandler = logging.StreamHandler(stream=sys.stdout)
if verbose:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogConsoleHandler.setFormatter(LogFormat)
self.log.addHandler(LogConsoleHandler)
def get_date(self):
'''
Return string with the UT date of the image in YYYYMMDDUT format.
Also populates the `obstime` property with an `astropy.time.Time`
instance.
'''
dto = dt.strptime(self.ccd.header[self.datekw], self.datefmt)
self.obstime = Time(dto)
self.date = dto.strftime('%Y%m%dUT')
return self.date
def bias_correct(self):
'''
'''
master_bias = get_master(self.date, type='Bias')
if master_bias:
self.log.info('Subtracting master bias')
self.ccd = ccdproc.subtract_bias(self.ccd, master_bias,
add_keyword=None)
self.metadata.set('BIASFILE', value=master_bias.header['FILENAME'],
comment='Filename of master bias file')
self.metadata.set('BIASCSUM', value=master_bias.header['CHECKSUM'],
comment='CHECKSUM of master bias file')
self.metadata.set('BIASDSUM', value=master_bias.header['DATASUM'],
comment='DATASUM of master bias file')
def gain_correct(self):
'''
Wrapper around `ccdproc.gain_correct` function.
'''
self.log.info('Gain correcting image')
self.ccd = ccdproc.gain_correct(self.ccd, self.gain.value)
def dark_correct(self):
'''
Wrapper around `ccdproc.subtract_dark` function.
'''
master_dark = get_master(self.date, type='Dark')
if master_dark:
self.log.info('Dark correcting image')
self.ccd = ccdproc.subtract_dark(self.ccd, master_dark,
data_exposure=self.exptime.value,
dark_exposure=1.0*u.second,
scale=True,
add_keyword=None)
self.metadata.set('DARKFILE', value=master_dark.header['FILENAME'],
comment='Filename of master dark file')
self.metadata.set('DARKCSUM', value=master_dark.header['CHECKSUM'],
comment='CHECKSUM of master dark file')
self.metadata.set('DARKDSUM', value=master_dark.header['DATASUM'],
comment='DATASUM of master dark file')
def shutter_correct(self):
'''
Wrapper around `ccdproc.apply_shutter_map` function.
'''
shutter_map = get_master_shutter_map(self.date)
if shutter_map:
self.log.info('Applying shutter map')
self.ccd = ccdproc.apply_shutter_map(self.ccd, shutter_map)
self.metadata.set('SHUTFILE', value=shutter_map.header['FILENAME'],
comment='Filename of master shutter correction file')
self.metadata.set('SHUTCSUM', value=shutter_map.header['CHECKSUM'],
comment='CHECKSUM of master shutter correction file')
self.metadata.set('SHUTDSUM', value=shutter_map.header['DATASUM'],
comment='DATASUM of master shutter correction file')
def flat_correct(self):
'''
Wrapper around `ccdproc.flat_correct` function.
'''
master_flat = get_master(self.date, type='Flat')
if master_flat:
self.log.info('Flat fielding image')
self.ccd = ccdproc.flat_correct(im, master_flat, add_keyword=None)
self.metadata.set('FLATFILE', value=master_flat.header['FILENAME'],
comment='Filename of master flat file')
self.metadata.set('FLATCSUM', value=master_flat.header['CHECKSUM'],
comment='CHECKSUM of master flat file')
self.metadata.set('FLATDSUM', value=master_flat.header['DATASUM'],
comment='DATASUM of master flat file')
def get_header_pointing(self):
'''
Read the pointing coordinate from the header RA and DEC keywords
and populate the `header_pointing` property with an
`astropy.coordinates.SkyCoord` instance.
'''
self.log.info('Reading pointing from header')
RAkwd = self.config.get('RA', 'RA')
DECkwd = self.config.get('DEC', 'DEC')
equinox = self.config.get('Equinox', 2000.0)
if type(equinox) is str:
equinox = self.ccd.header[equinox]
coord_frame = self.config.get('CoordinateFrame', 'Fk5')
coord_format = self.config.get('CoordinateFormat', 'HMSDMS')
RA = self.ccd.header[RAkwd]
DEC = self.ccd.header[DECkwd]
if coord_format == 'HMSDMS':
self.header_pointing = c.SkyCoord(RA, DEC, frame=coord_frame,
unit=(u.hourangle, u.deg))
elif coord_format == 'decimal degrees':
self.header_pointing = c.SkyCoord(float(RA), float(DEC),
frame=coord_frame,
equinox=equinox,
obstime=self.obstime,
unit=(u.deg, u.deg))
elif coord_format == 'decimal hours degrees':
self.header_pointing = c.SkyCoord(float(RA), float(DEC),
frame=coord_frame,
equinox=equinox,
obstime=self.obstime,
unit=(u.hourangle, u.deg))
else:
self.header_pointing = None
self.log.warning(' CoordinateFormat not understood.')
if self.loc and self.header_pointing:
self.header_altaz = self.header_pointing.transform_to(self.altazframe)
if self.header_pointing:
self.log.info(' Header pointing: {}'.format(
self.header_pointing.to_string('hmsdms')))
else:
self.log.warning(' Failed to parse pointing from header')
return self.header_pointing
def solve_astrometry(self, downsample=2, SIPorder=4):
'''
Use a local install of astrometry.net to solve the image
WCS and populate the `ccd.wcs` and `ccd.header` with the
updated WCS info.
'''
solvefield_args = self.config.get('SolveFieldArgs', [])
solvefield_args.extend(['-z', '{:d}'.format(downsample)])
solvefield_args.extend(['-t', '{:d}'.format(SIPorder)])
# Create temporary directory
tdir = tempfile.mkdtemp()
tfile = os.path.join(tdir, self.filename)
ccdproc.fits_ccddata_writer(self.ccd, tfile)
# run astrometry.net on the temporary fits file
cmd = ['solve-field', '-p']
cmd.extend(solvefield_args)
if not self.header_pointing:
self.get_header_pointing()
self.log.info('Calculating astrometric solution')
if self.header_pointing:
cmd.extend(['-3', '{:.4f}'.format(self.header_pointing.ra.degree)])
cmd.extend(['-4', '{:.4f}'.format(self.header_pointing.dec.degree)])
self.log.info(' Calling: {}'.format(' '.join(cmd)))
cmd.append(tfile)
with open(os.path.join(tdir, 'output.txt'), 'w') as output:
fail = subprocess.call(cmd, stdout=output, stderr=output)
with open(os.path.join(tdir, 'output.txt'), 'r') as output:
lines = output.readlines()
for line in lines:
self.log.debug(' {}'.format(line.strip('\n')))
rootname = os.path.splitext(self.filename)[0]
solved_file = os.path.join(tdir, '{}.solved'.format(rootname))
wcs_file = os.path.join(tdir, '{}.wcs'.format(rootname))
new_wcs = None
if not bool(fail):
new_wcs = wcs.WCS(wcs_file)
if new_wcs.is_celestial:
self.ccd.wcs = new_wcs
self.ccd.header.add_history('New WCS solved by astrometry.net')
new_header = new_wcs.to_header(relax=True)
for key in new_header.keys():
self.ccd.header.set(key, new_header[key],
new_header.comments[key])
self.get_wcs_pointing()
self.log.info(' Pointing center: {} ({})'.format(
self.wcs_pointing.to_string('hmsdms', precision=1),
self.wcs_pointing.to_string('decimal', precision=4)))
else:
new_wcs = None
else:
self.log.warning('solve-field failed')
# Cleanup temporary directory
tfiles = glob(os.path.join(tdir, '*'))
for tf in tfiles:
os.remove(tf)
os.rmdir(tdir)
return new_wcs
def get_wcs_pointing(self):
'''
Populate the `wcs_pointing` property with and
`astropy.coordinates.SkyCoord` instance based on the information
in the `ccd.wcs` property.
'''
equinox = self.config.get('Equinox', 2000.0)
if equinox in self.ccd.header.keys():
equinox = self.ccd.header[equinox]
if not self.ccd.wcs:
self.wcs_pointing = None
else:
coord_frame = self.config.get('CoordinateFrame', 'Fk5')
nx, ny = self.ccd.data.shape
r, d = self.ccd.wcs.all_pix2world([nx/2.], [ny/2.], 1)
self.wcs_pointing = c.SkyCoord(r[0], d[0], frame=coord_frame,
unit=(u.deg, u.deg),
equinox='J2000',
obstime=self.obstime)
if self.loc and self.wcs_pointing:
self.wcs_altaz = self.wcs_pointing.transform_to(self.altazframe)
return self.wcs_pointing
def calculate_pointing_error(self):
'''
Assming that the `header_pointing` extracted from the RA and DEC
FITS header keywords represents the intended pointing of the
image and that the `wcs_pointing` property represents the actual
pointing, calculate the pointing error magnitude.
'''
if not self.header_pointing:
self.get_header_pointing()
if not self.wcs_pointing:
self.get_wcs_pointing()
if self.header_pointing and self.wcs_pointing:
sep = self.header_pointing.separation(self.wcs_pointing)
self.log.info('Pointing error = {:.1f}'.format(sep.to(u.arcmin)))
return sep
def get_UCAC4(self):
'''
Use `astroquery` to get the UCAC4 catalog for this image from the
Vizier service.
'''
self.log.info('Get UCAC4 stars in field of view')
if not self.header_pointing:
self.get_header_pointing()
if not self.wcs_pointing:
self.get_wcs_pointing()
if self.wcs_pointing:
pointing = self.wcs_pointing
elif self.header_pointing:
pointing = self.wcs_pointing
else:
return None
from astroquery.vizier import Vizier
from astropy.coordinates import Angle
v = Vizier(columns=['_RAJ2000', '_DEJ2000','imag'],
column_filters={"imag":">0"})
v.ROW_LIMIT = 1e5
fp = self.ccd.wcs.calc_footprint(axes=self.ccd.data.shape)
dra = fp[:,0].max() - fp[:,0].min()
ddec = fp[:,1].max() - fp[:,1].min()
radius = np.sqrt((dra*np.cos(fp[:,1].mean()*np.pi/180.))**2 + ddec**2)/2.
catalog = v.query_region(self.header_pointing, catalog='I/322A',
radius=Angle(radius, "deg") )[0]
if self.ccd.wcs:
x, y = self.ccd.wcs.all_world2pix(catalog['_RAJ2000'],
catalog['_DEJ2000'], 1)
w = (x > 0) & (x < 4096) & (y > 0) & (y < 4096)
catalog = catalog[w]
self.log.info(' Found {:d} catalog stars'.format(len(catalog)))
self.UCAC4 = catalog
return catalog
def subtract_background(self):
'''
Use `sep.Background` to generate a background model and then
subtract it from the image.
'''
self.log.info('Subtracting Background')
sbc = self.config.get('Background', {})
bkg = sep.Background(self.ccd.data, mask=self.ccd.mask,
bw=sbc.get('bw', 64),
bh=sbc.get('bh', 64),
fw=sbc.get('fw', 3),
fh=sbc.get('fh', 3),
fthresh=sbc.get('fthresh', 0))
self.back = bkg
self.ccd.data -= bkg.back()
self.ccd.header.add_history('Background subtracted using SEP')
self.ccd.header.add_history(' bw={:d},bh={:d},fw={:d},fh={:d}'.format(
sbc.get('bw', 64), sbc.get('bh', 64),
sbc.get('fw', 3), sbc.get('fh', 3) ))
self.ccd.header.add_history(' fthresh={:f}'.format(sbc.get('fthresh',0)))
def extract(self):
'''
Wrapper around the `sep.extract` function.
'''
self.log.info('Extracting sources')
extract_config = self.config.get('Extract', {})
thresh = extract_config.get('thresh', 5)
minarea = extract_config.get('minarea', 5)
objects = sep.extract(self.ccd.data, err=self.ccd.uncertainty.array,
mask=self.ccd.mask,
thresh=float(thresh), minarea=minarea)
self.log.info(' Found {:d} sources'.format(len(objects)))
self.extracted = Table(objects)
next = len(self.extracted)
self.extracted.add_column(Column(data=[None]*next, name='RA', dtype=np.float))
self.extracted.add_column(Column(data=[None]*next, name='Dec', dtype=np.float))
self.extracted.add_column(Column(data=[None]*next, name='mag', dtype=np.float))
ny, nx = self.ccd.shape
r = np.sqrt((self.extracted['x']-nx/2.)**2 + (self.extracted['y']-ny/2.)**2)
self.extracted.add_column(Column(data=r.data, name='r', dtype=np.float))
return self.extracted
def associate(self, input, magkey='imag'):
'''
Associate entries from the input stellar catalog (e.g. UCAC4) with
the results from the `extract` method using a simple nearest neighbor
algorithm.
'''
if '_RAJ2000' in input.keys():
rakey = '_RAJ2000'
elif 'RA' in input.keys():
rakey = 'RA'
else:
rakey = None
if '_DEJ2000' in input.keys():
deckey = '_DEJ2000'
elif 'DEC' in input.keys():
deckey = 'DEC'
else:
deckey = None
if not rakey or not deckey:
self.log.warning('Could not parse input catalog table')
return None
assocconf = self.config.get('Assoc')
assoc_r = assocconf.get('radius', 3) # pixels
ny, nx = self.ccd.shape
for i,cstar in enumerate(input):
x, y = self.ccd.wcs.all_world2pix(cstar[rakey], cstar[deckey], 1)
dist = np.sqrt( (self.extracted['x']-x)**2 + (self.extracted['y']-y)**2 )
ncandidates = len(dist[dist < assoc_r])
if ncandidates > 0:
id = dist.argmin()
self.extracted[id]['RA'] = cstar[rakey]
self.extracted[id]['Dec'] = cstar[deckey]
self.extracted[id]['mag'] = cstar[magkey]
self.assoc = self.extracted[~np.isnan(self.extracted['RA'])]
self.log.info(' Associated {:d} extracted sources with catalog'.format(
len(self.assoc)))
return input
def test_astrometry(self, nradii=10, min_best_match=0.10, min_regional_match=0.50):
'''
'''
assert self.assoc
pass_astrometry = True
ny, nx = self.ccd.shape
radius = np.sqrt((nx/2.)**2 + (ny/2.)**2)
radii = np.linspace(0, radius, nradii+1)
frac = np.zeros(nradii)
for i in range(nradii):
n_assoc = len(self.assoc[(self.assoc['r'] > radii[i])\
& (self.assoc['r'] <= radii[i+1])])
n_detected = len(self.extracted[(self.extracted['r'] > radii[i])\
& (self.extracted['r'] <= radii[i+1])])
frac[i] = float(n_assoc)/float(n_detected)
AQM = frac/max(frac)
self.log.info('Astrometery verificaton metrics:')
best_match = max(frac)
self.log.info(' Best match = {:.1f} %'.format(best_match*100.))
if best_match < min_best_match:
pass_astrometry = False
self.log.warning('Astrometry test failed.')
self.log.warning('Best region absolute success rate < {:.1f} %'.format(min_best_match*100.))
for i,val in enumerate(AQM):
self.log.info(' AQM ({:d}) = {:.2f}'.format(i, val))
if min(AQM) < min_regional_match:
pass_astrometry = False
self.log.warning('Astrometry test failed.')
self.log.warning('Worst region relative success rate < {:.1f} %'.format(min_regional_match*100.))
return pass_astrometry
def calculate_zero_point(self):
'''
Estimate the photometric zero point of the image using the associated
catalog. Find the mean difference between instrumental magnitude and
catalog magnitude.
'''
if not self.assoc:
return None
instmag = -2.512*np.log10(self.assoc['flux'])
diffs = instmag - self.assoc['mag']
from scipy import stats
zp = np.mean(stats.sigmaclip(diffs, low=5.0, high=5.0)[0])
print(zp, np.std(diffs), np.std(diffs)/np.sqrt(len(diffs)))
def render_jpeg(self, jpegfilename=None, binning=1, radius=6,
overplot_UCAC4=False, overplot_extracted=False,
overplot_assoc=False, overplot_pointing=False):
'''
Render a jpeg of the image with optional overlays.
'''
self.log.info('Render JPEG of image to {}'.format(jpegfilename))
if not jpegfilename:
jpegfilename = '{}.jpg'.format(self.fileroot)
import matplotlib as mpl
from matplotlib import pyplot as plt
vmin = np.percentile(self.ccd.data, 0.5)
vmax = np.percentile(self.ccd.data, 99.0)
dpi=72
nx, ny = self.ccd.data.shape
sx = nx/dpi/binning
sy = ny/dpi/binning
fig = plt.figure(figsize=(sx, sy), dpi=dpi)
ax = fig.gca()
mdata = np.ma.MaskedArray(self.ccd.data, mask=self.ccd.mask)
palette = plt.cm.gray
palette.set_bad('r', 1.0)
plt.imshow(mdata, cmap=palette, vmin=vmin, vmax=vmax)
plt.xticks([])
plt.yticks([])
if overplot_UCAC4:
self.log.info(' Overlaying UCAC4 catalog stars')
if not self.UCAC4:
self.get_UCAC4()
x, y = self.ccd.wcs.all_world2pix(self.UCAC4['_RAJ2000'],
self.UCAC4['_DEJ2000'], 1)
for xy in zip(x, y):
c = plt.Circle(xy, radius=radius, edgecolor='b', facecolor='none')
ax.add_artist(c)
if overplot_extracted:
self.log.info(' Overlaying extracted stars')
x, y = self.ccd.wcs.all_world2pix(self.extracted['RA'],
self.extracted['Dec'], 1)
for xy in zip(x, y):
c = plt.Circle(xy, radius=radius, edgecolor='r', facecolor='none')
ax.add_artist(c)
if overplot_assoc:
self.log.info(' Overlaying associated stars')
x, y = self.ccd.wcs.all_world2pix(self.assoc['RA'],
self.assoc['Dec'], 1)
for xy in zip(x, y):
c = plt.Circle(xy, radius=radius, edgecolor='g', facecolor='none')
ax.add_artist(c)
if overplot_pointing:
if not self.ccd.wcs.is_celestial:
return None
if not self.header_pointing:
self.get_header_pointing()
if self.header_pointing:
x, y = self.ccd.wcs.all_world2pix(self.header_pointing.ra.degree,
self.header_pointing.dec.degree, 1)
plt.plot([0,nx], [ny/2,ny/2], 'y-', alpha=0.7)
plt.plot([nx/2, nx/2], [0,ny], 'y-', alpha=0.7)
# Draw crosshair on target
ms = radius*6
c = plt.Circle((x, y), radius=ms, edgecolor='g', alpha=0.7,
facecolor='none')
ax.add_artist(c)
plt.plot([x, x], [y+0.6*ms, y+1.4*ms], 'g', alpha=0.7)
plt.plot([x, x], [y-0.6*ms, y-1.4*ms], 'g', alpha=0.7)
plt.plot([x-0.6*ms, x-1.4*ms], [y, y], 'g', alpha=0.7)
plt.plot([x+0.6*ms, x+1.4*ms], [y, y], 'g', alpha=0.7)
plt.savefig(jpegfilename, dpi=dpi)
| 1.78125 | 2 |
app/create_newsletter.py | cheevahagadog/barker | 12 | 12771146 | <gh_stars>10-100
# -*- coding:utf-8 -*-
"""Fetches bookmark data and renders the email HTML file"""
from jinja2 import Environment, FileSystemLoader
import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
from textblob import TextBlob
import datetime
from gensim.summarization import summarize
import config
from app.db_interface import DBInterface
class NewsletterTemplate(object):
"""Uses bookmark data to produce the newsletter"""
def __init__(self):
self.dbi = DBInterface()
self.env = Environment(loader=FileSystemLoader('app/templates/'))
self.template = self.env.get_template("skeleton.html")
self.all_bookmarks_df = self.dbi.load_data_from_table('bookmarks')
def filter_and_select_links(self, use_recent=False, verbose=True) -> (bool, pd.DataFrame):
"""Pulling from all bookmarks, filters out folders the user doesn't want to show, and/or draws a random
number of links to fulfill the user's desired number of links"""
df = self.all_bookmarks_df
original_len = filtered_len = len(df)
if config.BOOKMARK_BAR_FOCUS_FOLDERS or config.OTHER_FOCUS_FOLDERS:
df = df[(df['bookmark_bar_parent'].isin(config.BOOKMARK_BAR_FOCUS_FOLDERS))
| (df['immediate_parent'].isin(config.OTHER_FOCUS_FOLDERS))]
filtered_len = len(df)
df = df[~df['url'].str.contains('|'.join(config.URL_STEMS_TO_EXCLUDE))]
url_filtered_len = len(df)
if url_filtered_len >= config.NUM_LINKS_TO_INCLUDE:
if use_recent:
df.sort_values(['date_added', 'date_modified'], ascending=[False, False], inplace=True)
df.reset_index(drop=True, inplace=True)
return True, df.ix[0:config.NUM_LINKS_TO_INCLUDE]
else:
return True, df.sample(config.NUM_LINKS_TO_INCLUDE)
else:
if verbose:
print("Of the {0} bookmarks found, {1} were removed by focus folders and {2} were removed by URL stems"
.format(original_len, original_len - filtered_len, original_len - url_filtered_len))
print("Unable to find {} bookmarks".format(config.NUM_LINKS_TO_INCLUDE))
return False, None
@staticmethod
def get_page_summary(title, url, verbose=False) -> str:
"""Request HTML of a bookmarked link and creates a summary to be used in the newsletter
Args:
title: str, bookmark title as saved in your bookmarks
url: str, link in bookmark
verbose: bool, prints out helpful debugging feedback, default= False
Returns:
Summary text of bookmarked link
"""
r = requests.get(url)
if verbose:
print('requesting web page data...')
if r.status_code == 200:
soup = bs(r.text, "html5lib")
for script in soup.find_all('script'):
script.extract()
for code in soup.find_all('code'):
code.extract()
paragraphs = soup.find_all('p')[0:10]
text = [paragraph.get_text() for paragraph in paragraphs]
text = list(filter(None, text))
blob = TextBlob(" ".join(text))
tokenized_text = [str(sentence) for sentence in blob.sentences]
if len(tokenized_text) == 1:
if verbose:
print("One sentence found for \t {}".format(title))
return " ".join(tokenized_text)
elif not tokenized_text:
if verbose:
print("No text found for \t {}".format(title))
return title
elif len(tokenized_text) <= 5:
if verbose:
print("LTE 5 sentences found for \t {}".format(title))
return " ".join(tokenized_text)
else:
try:
ntext = summarize(" ".join(tokenized_text), word_count=50)
if verbose:
print("summarizing text for \t {}...".format(title))
return ntext
except:
return " ".join(tokenized_text)[:150] + " ..."
else:
if verbose:
print("bad request for \t {}".format(title))
return title
def creating_link_info(self, df, verbose=True) -> dict:
"""Creates the bookmark data so it's ready for use in the HTML template.
Args:
df: pandas Dataframe of selected bookmarks
verbose: bool, default=True
Returns:
dictionary of bookmark title:
"""
bookmark_dict = df.set_index('name')['url'].to_dict()
if verbose:
print("Summarizing bookmark pages...")
bookmark_dict = {k: [self.get_page_summary(k, v), v] for k, v in bookmark_dict.items()}
return bookmark_dict
@staticmethod
def get_upcoming_meetup_calendar_events(params, verbose=True) -> (bool, list):
r = requests.get("http://api.meetup.com/self/calendar", params=params)
if r.status_code == 200:
meetups = []
for i in r.json():
if i['group']['urlname'] not in config.DO_NOT_INCLUDE_GROUPS and i['visibility'] == 'public':
time = int(i['time']) / 1000
time_obj = datetime.datetime.fromtimestamp(time)
meetups.append({"date": time_obj.strftime('%A %b %-d %-I%p:'),
"group_name": i['group']['name'],
"event_name": i['name'],
"link": i['link']})
if verbose:
print("Found {} meetups to add to the newsletter".format(len(meetups)))
return True, meetups
else:
print("Failed to connect to Meetup API with code: {}".format(r.status_code))
return False, []
def create_meetup_info(self, verbose=True) -> (bool, list):
"""Creates meetup information ready for the HTML template.
Args:
verbose: bool, default=True
Returns:
list of meetup event data
"""
params = {"state": config.STATE, "key": config.MEETUP_API_KEY, "page": 10, "order": 'time'}
if verbose:
print("Getting upcoming meetup info...")
success, meetup_data = self.get_upcoming_meetup_calendar_events(params)
if success:
return True, meetup_data
else:
return False, []
def fill_html_template(self, bookmark_dict, meetup_data):
"""Render HTML template with bookmark links, personal and meetup data"""
html = self.template.render(bookmark_data=bookmark_dict, page_data=config.PERSONAL_DATA, meetup_data=meetup_data)
return html
def main(self):
"""populate template"""
success, filtered_df = self.filter_and_select_links(use_recent=config.USE_MOST_RECENT_BOOKMARKS)
if success:
bookmark_dict = self.creating_link_info(df=filtered_df, verbose=config.VERBOSE)
success, meetup_data = self.create_meetup_info()
html = self.fill_html_template(bookmark_dict, meetup_data)
with open('app/templates/newsletter.html', 'w') as f:
f.write(html)
if config.VERBOSE:
print("newsletter html updated!")
return True
else:
return False
if __name__ == '__main__':
templater = NewsletterTemplate()
templater.main()
| 2.78125 | 3 |
milkviz/__init__.py | Mr-Milk/milkviz | 0 | 12771147 | <reponame>Mr-Milk/milkviz<filename>milkviz/__init__.py
from ._bubble import bubble
from ._cell_map import point_map, polygon_map
from ._clustermap import anno_clustermap
from ._dot import dot
from ._dot_matrix import dot_heatmap
from ._graph import graph
from ._stacked_bar import stacked_bar
from ._upset import upset
from ._venn import venn
from .utils import mask_triu
| 0.96875 | 1 |
masar_cheques/overrides/overrides.py | karamakcsc/masar_cheques | 0 | 12771148 | import frappe
import erpnext
from frappe import throw, msgprint, _
from erpnext.accounts.doctype.payment_entry.payment_entry import PaymentEntry
def custom_add_bank_gl_entries(self, gl_entries):
if self.mode_of_payment == "Cheque":
if self.payment_type in ("Pay", "Internal Transfer"):
for d in self.get("payment_cheques"):
gl_entries.append(
self.get_gl_dict({
"account": d.paid_from,
"account_currency": self.paid_from_account_currency,
"against": self.party if self.payment_type == "Pay" else self.paid_to,
"credit_in_account_currency": d.cheque_amount,
"credit": d.cheque_amount,
"cost_center": self.cost_center,
"remarks": "Cheque No " + d.cheque_no
}, item=self)
)
if self.payment_type in ("Receive", "Internal Transfer"):
for d in self.get("payment_cheques"):
gl_entries.append(
self.get_gl_dict({
"account": d.paid_to,
"account_currency": d.paid_to_account_currency,
"against": self.party if self.payment_type == "Receive" else self.paid_from,
"debit_in_account_currency": d.cheque_amount,
"debit": d.cheque_amount,
"cost_center": self.cost_center,
"remarks": "Cheque No " + d.cheque_no
}, item=self)
)
else:
if self.payment_type in ("Pay", "Internal Transfer"):
gl_entries.append(
self.get_gl_dict({
"account": self.paid_from,
"account_currency": self.paid_from_account_currency,
"against": self.party if self.payment_type == "Pay" else self.paid_to,
"credit_in_account_currency": self.paid_amount,
"credit": self.base_paid_amount,
"cost_center": self.cost_center
}, item=self)
)
if self.payment_type in ("Receive", "Internal Transfer"):
gl_entries.append(
self.get_gl_dict({
"account": self.paid_to,
"account_currency": self.paid_to_account_currency,
"against": self.party if self.payment_type=="Receive" else self.paid_from,
"debit_in_account_currency": self.received_amount,
"debit": self.base_received_amount,
"cost_center": self.cost_center
}, item=self)
)
def override_payment_entry():
PaymentEntry.add_bank_gl_entries = custom_add_bank_gl_entries
def execute_override(self,method):
override_payment_entry()
| 2.015625 | 2 |
python-real-time-application/views/base_project_main_qt4.py | italogsfernandes/emg-moviments-classifier | 2 | 12771149 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'layouts/base_project_main.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(792, 600)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_EMG = QtGui.QWidget()
self.tab_EMG.setObjectName(_fromUtf8("tab_EMG"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.tab_EMG)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayoutGraphStatus = QtGui.QVBoxLayout()
self.verticalLayoutGraphStatus.setObjectName(_fromUtf8("verticalLayoutGraphStatus"))
self.verticalLayoutGraph = QtGui.QVBoxLayout()
self.verticalLayoutGraph.setObjectName(_fromUtf8("verticalLayoutGraph"))
self.label_replace = QtGui.QLabel(self.tab_EMG)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_replace.sizePolicy().hasHeightForWidth())
self.label_replace.setSizePolicy(sizePolicy)
self.label_replace.setObjectName(_fromUtf8("label_replace"))
self.verticalLayoutGraph.addWidget(self.label_replace)
self.verticalLayoutGraphStatus.addLayout(self.verticalLayoutGraph)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.lbl_status = QtGui.QLabel(self.tab_EMG)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_status.sizePolicy().hasHeightForWidth())
self.lbl_status.setSizePolicy(sizePolicy)
self.lbl_status.setObjectName(_fromUtf8("lbl_status"))
self.horizontalLayout_3.addWidget(self.lbl_status)
self.cb_chart_emg_on_off = QtGui.QCheckBox(self.tab_EMG)
self.cb_chart_emg_on_off.setChecked(True)
self.cb_chart_emg_on_off.setObjectName(_fromUtf8("cb_chart_emg_on_off"))
self.horizontalLayout_3.addWidget(self.cb_chart_emg_on_off)
self.verticalLayoutGraphStatus.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2.addLayout(self.verticalLayoutGraphStatus)
self.tabWidget.addTab(self.tab_EMG, _fromUtf8(""))
self.tab_features = QtGui.QWidget()
self.tab_features.setObjectName(_fromUtf8("tab_features"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.tab_features)
self.horizontalLayout_5.setMargin(0)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.verticalLayoutGraphStatus_features = QtGui.QVBoxLayout()
self.verticalLayoutGraphStatus_features.setObjectName(_fromUtf8("verticalLayoutGraphStatus_features"))
self.verticalLayoutGraph_features = QtGui.QVBoxLayout()
self.verticalLayoutGraph_features.setObjectName(_fromUtf8("verticalLayoutGraph_features"))
self.horizontalLayout_features = QtGui.QHBoxLayout()
self.horizontalLayout_features.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_features.setObjectName(_fromUtf8("horizontalLayout_features"))
self.label_features = QtGui.QLabel(self.tab_features)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_features.sizePolicy().hasHeightForWidth())
self.label_features.setSizePolicy(sizePolicy)
self.label_features.setObjectName(_fromUtf8("label_features"))
self.horizontalLayout_features.addWidget(self.label_features)
self.cb_features = QtGui.QComboBox(self.tab_features)
self.cb_features.setEditable(False)
self.cb_features.setObjectName(_fromUtf8("cb_features"))
self.horizontalLayout_features.addWidget(self.cb_features)
self.checkBox = QtGui.QCheckBox(self.tab_features)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox.sizePolicy().hasHeightForWidth())
self.checkBox.setSizePolicy(sizePolicy)
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.horizontalLayout_features.addWidget(self.checkBox)
self.verticalLayoutGraph_features.addLayout(self.horizontalLayout_features)
self.label_replace_features = QtGui.QLabel(self.tab_features)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_replace_features.sizePolicy().hasHeightForWidth())
self.label_replace_features.setSizePolicy(sizePolicy)
self.label_replace_features.setObjectName(_fromUtf8("label_replace_features"))
self.verticalLayoutGraph_features.addWidget(self.label_replace_features)
self.verticalLayoutGraphStatus_features.addLayout(self.verticalLayoutGraph_features)
self.horizontalLayout_status_features = QtGui.QHBoxLayout()
self.horizontalLayout_status_features.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_status_features.setObjectName(_fromUtf8("horizontalLayout_status_features"))
self.lbl_status_features = QtGui.QLabel(self.tab_features)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_status_features.sizePolicy().hasHeightForWidth())
self.lbl_status_features.setSizePolicy(sizePolicy)
self.lbl_status_features.setObjectName(_fromUtf8("lbl_status_features"))
self.horizontalLayout_status_features.addWidget(self.lbl_status_features)
self.cb_chart_features_on_off = QtGui.QCheckBox(self.tab_features)
self.cb_chart_features_on_off.setChecked(True)
self.cb_chart_features_on_off.setObjectName(_fromUtf8("cb_chart_features_on_off"))
self.horizontalLayout_status_features.addWidget(self.cb_chart_features_on_off)
self.verticalLayoutGraphStatus_features.addLayout(self.horizontalLayout_status_features)
self.horizontalLayout_5.addLayout(self.verticalLayoutGraphStatus_features)
self.tabWidget.addTab(self.tab_features, _fromUtf8(""))
self.tab_classification = QtGui.QWidget()
self.tab_classification.setObjectName(_fromUtf8("tab_classification"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.tab_classification)
self.horizontalLayout_6.setMargin(0)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.graphicsView_classification = QtGui.QGraphicsView(self.tab_classification)
self.graphicsView_classification.setObjectName(_fromUtf8("graphicsView_classification"))
self.horizontalLayout_6.addWidget(self.graphicsView_classification)
self.tabWidget.addTab(self.tab_classification, _fromUtf8(""))
self.tab_controle_manuel = QtGui.QWidget()
self.tab_controle_manuel.setObjectName(_fromUtf8("tab_controle_manuel"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.tab_controle_manuel)
self.horizontalLayout_4.setMargin(0)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.groupBox_f1 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_f1.setObjectName(_fromUtf8("groupBox_f1"))
self.verticalLayout_11 = QtGui.QVBoxLayout(self.groupBox_f1)
self.verticalLayout_11.setObjectName(_fromUtf8("verticalLayout_11"))
self.h_slider_1 = QtGui.QSlider(self.groupBox_f1)
self.h_slider_1.setMaximum(90)
self.h_slider_1.setOrientation(QtCore.Qt.Horizontal)
self.h_slider_1.setObjectName(_fromUtf8("h_slider_1"))
self.verticalLayout_11.addWidget(self.h_slider_1)
self.verticalLayout_6.addWidget(self.groupBox_f1)
self.groupBox_f2 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_f2.setObjectName(_fromUtf8("groupBox_f2"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.groupBox_f2)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.h_slider_2 = QtGui.QSlider(self.groupBox_f2)
self.h_slider_2.setMaximum(90)
self.h_slider_2.setOrientation(QtCore.Qt.Horizontal)
self.h_slider_2.setObjectName(_fromUtf8("h_slider_2"))
self.verticalLayout_8.addWidget(self.h_slider_2)
self.verticalLayout_6.addWidget(self.groupBox_f2)
self.groupBox_f3 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_f3.setObjectName(_fromUtf8("groupBox_f3"))
self.verticalLayout_9 = QtGui.QVBoxLayout(self.groupBox_f3)
self.verticalLayout_9.setObjectName(_fromUtf8("verticalLayout_9"))
self.h_slider_3 = QtGui.QSlider(self.groupBox_f3)
self.h_slider_3.setMaximum(90)
self.h_slider_3.setOrientation(QtCore.Qt.Horizontal)
self.h_slider_3.setObjectName(_fromUtf8("h_slider_3"))
self.verticalLayout_9.addWidget(self.h_slider_3)
self.verticalLayout_6.addWidget(self.groupBox_f3)
self.groupBox_f4 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_f4.setObjectName(_fromUtf8("groupBox_f4"))
self.verticalLayout_10 = QtGui.QVBoxLayout(self.groupBox_f4)
self.verticalLayout_10.setObjectName(_fromUtf8("verticalLayout_10"))
self.h_slider_4 = QtGui.QSlider(self.groupBox_f4)
self.h_slider_4.setMaximum(90)
self.h_slider_4.setOrientation(QtCore.Qt.Horizontal)
self.h_slider_4.setObjectName(_fromUtf8("h_slider_4"))
self.verticalLayout_10.addWidget(self.h_slider_4)
self.verticalLayout_6.addWidget(self.groupBox_f4)
self.groupBox_f5 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_f5.setObjectName(_fromUtf8("groupBox_f5"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_f5)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.h_slider_5 = QtGui.QSlider(self.groupBox_f5)
self.h_slider_5.setMaximum(90)
self.h_slider_5.setOrientation(QtCore.Qt.Horizontal)
self.h_slider_5.setObjectName(_fromUtf8("h_slider_5"))
self.verticalLayout_4.addWidget(self.h_slider_5)
self.verticalLayout_6.addWidget(self.groupBox_f5)
self.groupBox_4 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.groupBox_4)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.btn_laser = QtGui.QPushButton(self.groupBox_4)
self.btn_laser.setObjectName(_fromUtf8("btn_laser"))
self.horizontalLayout_10.addWidget(self.btn_laser)
self.btn_reset_position = QtGui.QPushButton(self.groupBox_4)
self.btn_reset_position.setObjectName(_fromUtf8("btn_reset_position"))
self.horizontalLayout_10.addWidget(self.btn_reset_position)
self.btn_close_position = QtGui.QPushButton(self.groupBox_4)
self.btn_close_position.setObjectName(_fromUtf8("btn_close_position"))
self.horizontalLayout_10.addWidget(self.btn_close_position)
self.btn_send_position = QtGui.QPushButton(self.groupBox_4)
self.btn_send_position.setObjectName(_fromUtf8("btn_send_position"))
self.horizontalLayout_10.addWidget(self.btn_send_position)
self.verticalLayout_6.addWidget(self.groupBox_4)
self.groupBox_3 = QtGui.QGroupBox(self.tab_controle_manuel)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_14 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_14.setContentsMargins(-1, -1, -1, 9)
self.verticalLayout_14.setObjectName(_fromUtf8("verticalLayout_14"))
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.pushButton_forhonnor = QtGui.QPushButton(self.groupBox_3)
self.pushButton_forhonnor.setObjectName(_fromUtf8("pushButton_forhonnor"))
self.horizontalLayout_14.addWidget(self.pushButton_forhonnor)
self.pushButton_pointer = QtGui.QPushButton(self.groupBox_3)
self.pushButton_pointer.setObjectName(_fromUtf8("pushButton_pointer"))
self.horizontalLayout_14.addWidget(self.pushButton_pointer)
self.pushButton_spiderman = QtGui.QPushButton(self.groupBox_3)
self.pushButton_spiderman.setObjectName(_fromUtf8("pushButton_spiderman"))
self.horizontalLayout_14.addWidget(self.pushButton_spiderman)
self.pushButton_comein = QtGui.QPushButton(self.groupBox_3)
self.pushButton_comein.setObjectName(_fromUtf8("pushButton_comein"))
self.horizontalLayout_14.addWidget(self.pushButton_comein)
self.verticalLayout_14.addLayout(self.horizontalLayout_14)
self.horizontalLayout_16 = QtGui.QHBoxLayout()
self.horizontalLayout_16.setContentsMargins(-1, -1, -1, 0)
self.horizontalLayout_16.setObjectName(_fromUtf8("horizontalLayout_16"))
self.pushButton_v = QtGui.QPushButton(self.groupBox_3)
self.pushButton_v.setObjectName(_fromUtf8("pushButton_v"))
self.horizontalLayout_16.addWidget(self.pushButton_v)
self.pushButton_extra6 = QtGui.QPushButton(self.groupBox_3)
self.pushButton_extra6.setObjectName(_fromUtf8("pushButton_extra6"))
self.horizontalLayout_16.addWidget(self.pushButton_extra6)
self.pushButton_extra7 = QtGui.QPushButton(self.groupBox_3)
self.pushButton_extra7.setObjectName(_fromUtf8("pushButton_extra7"))
self.horizontalLayout_16.addWidget(self.pushButton_extra7)
self.comboBox_movimentfinal = QtGui.QComboBox(self.groupBox_3)
self.comboBox_movimentfinal.setObjectName(_fromUtf8("comboBox_movimentfinal"))
self.horizontalLayout_16.addWidget(self.comboBox_movimentfinal)
self.verticalLayout_14.addLayout(self.horizontalLayout_16)
self.verticalLayout_6.addWidget(self.groupBox_3)
self.verticalLayout_5.addLayout(self.verticalLayout_6)
self.horizontalLayout_4.addLayout(self.verticalLayout_5)
self.tabWidget.addTab(self.tab_controle_manuel, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tab_settings = QtGui.QWidget()
self.tab_settings.setObjectName(_fromUtf8("tab_settings"))
self.verticalLayout_7 = QtGui.QVBoxLayout(self.tab_settings)
self.verticalLayout_7.setMargin(0)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.groupBox_10 = QtGui.QGroupBox(self.tab_settings)
self.groupBox_10.setObjectName(_fromUtf8("groupBox_10"))
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.groupBox_10)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.cb_in_serial_port = QtGui.QComboBox(self.groupBox_10)
self.cb_in_serial_port.setObjectName(_fromUtf8("cb_in_serial_port"))
self.horizontalLayout_13.addWidget(self.cb_in_serial_port)
self.horizontalLayout_11.addWidget(self.groupBox_10)
self.groupBox_8 = QtGui.QGroupBox(self.tab_settings)
self.groupBox_8.setObjectName(_fromUtf8("groupBox_8"))
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.groupBox_8)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.cb_out_serial_port = QtGui.QComboBox(self.groupBox_8)
self.cb_out_serial_port.setObjectName(_fromUtf8("cb_out_serial_port"))
self.horizontalLayout_12.addWidget(self.cb_out_serial_port)
self.horizontalLayout_11.addWidget(self.groupBox_8)
self.verticalLayout_7.addLayout(self.horizontalLayout_11)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem)
self.btn_go = QtGui.QPushButton(self.tab_settings)
font = QtGui.QFont()
font.setPointSize(29)
self.btn_go.setFont(font)
self.btn_go.setObjectName(_fromUtf8("btn_go"))
self.verticalLayout_7.addWidget(self.btn_go)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem1)
self.pushButton = QtGui.QPushButton(self.tab_settings)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_7.addWidget(self.pushButton)
self.tabWidget.addTab(self.tab_settings, _fromUtf8(""))
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 792, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuArquivo = QtGui.QMenu(self.menubar)
self.menuArquivo.setObjectName(_fromUtf8("menuArquivo"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuFunctions = QtGui.QMenu(self.menubar)
self.menuFunctions.setObjectName(_fromUtf8("menuFunctions"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.dockWidget = QtGui.QDockWidget(MainWindow)
self.dockWidget.setObjectName(_fromUtf8("dockWidget"))
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.verticalLayout = QtGui.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.verticalLayoutOptions = QtGui.QVBoxLayout()
self.verticalLayoutOptions.setContentsMargins(-1, 0, -1, -1)
self.verticalLayoutOptions.setObjectName(_fromUtf8("verticalLayoutOptions"))
self.lbl_options = QtGui.QLabel(self.dockWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_options.sizePolicy().hasHeightForWidth())
self.lbl_options.setSizePolicy(sizePolicy)
self.lbl_options.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
font.setStrikeOut(False)
font.setKerning(True)
self.lbl_options.setFont(font)
self.lbl_options.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_options.setObjectName(_fromUtf8("lbl_options"))
self.verticalLayoutOptions.addWidget(self.lbl_options)
self.verticalLayoutThreshould = QtGui.QVBoxLayout()
self.verticalLayoutThreshould.setObjectName(_fromUtf8("verticalLayoutThreshould"))
self.groupBox = QtGui.QGroupBox(self.dockWidgetContents)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.btn_record_raw_emg = QtGui.QPushButton(self.groupBox)
self.btn_record_raw_emg.setObjectName(_fromUtf8("btn_record_raw_emg"))
self.verticalLayout_2.addWidget(self.btn_record_raw_emg)
self.label_file_name = QtGui.QLabel(self.groupBox)
self.label_file_name.setObjectName(_fromUtf8("label_file_name"))
self.verticalLayout_2.addWidget(self.label_file_name)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.lbl_output_name = QtGui.QLabel(self.groupBox)
self.lbl_output_name.setObjectName(_fromUtf8("lbl_output_name"))
self.horizontalLayout_7.addWidget(self.lbl_output_name)
self.lbl_output_value = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_output_value.sizePolicy().hasHeightForWidth())
self.lbl_output_value.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lbl_output_value.setFont(font)
self.lbl_output_value.setObjectName(_fromUtf8("lbl_output_value"))
self.horizontalLayout_7.addWidget(self.lbl_output_value)
self.verticalLayout_2.addLayout(self.horizontalLayout_7)
self.btn_generate_training_file = QtGui.QPushButton(self.groupBox)
self.btn_generate_training_file.setObjectName(_fromUtf8("btn_generate_training_file"))
self.verticalLayout_2.addWidget(self.btn_generate_training_file)
self.btn_load_training_file = QtGui.QPushButton(self.groupBox)
self.btn_load_training_file.setObjectName(_fromUtf8("btn_load_training_file"))
self.verticalLayout_2.addWidget(self.btn_load_training_file)
self.label = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
self.verticalLayoutThreshould.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(self.dockWidgetContents)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.checkBox_simulation = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_simulation.setChecked(False)
self.checkBox_simulation.setObjectName(_fromUtf8("checkBox_simulation"))
self.verticalLayout_3.addWidget(self.checkBox_simulation)
self.checkBox_simple_mode = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_simple_mode.setObjectName(_fromUtf8("checkBox_simple_mode"))
self.verticalLayout_3.addWidget(self.checkBox_simple_mode)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setContentsMargins(0, 0, -1, -1)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.horizontalSlider_threshold = QtGui.QSlider(self.groupBox_2)
self.horizontalSlider_threshold.setMaximum(250)
self.horizontalSlider_threshold.setProperty("value", 250)
self.horizontalSlider_threshold.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_threshold.setObjectName(_fromUtf8("horizontalSlider_threshold"))
self.horizontalLayout_8.addWidget(self.horizontalSlider_threshold)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_8.addWidget(self.label_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_8)
self.verticalLayoutThreshould.addWidget(self.groupBox_2)
self.verticalLayoutCheckBoxes = QtGui.QVBoxLayout()
self.verticalLayoutCheckBoxes.setObjectName(_fromUtf8("verticalLayoutCheckBoxes"))
self.label_channels = QtGui.QLabel(self.dockWidgetContents)
self.label_channels.setObjectName(_fromUtf8("label_channels"))
self.verticalLayoutCheckBoxes.addWidget(self.label_channels)
self.cb_ch1 = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_ch1.setObjectName(_fromUtf8("cb_ch1"))
self.verticalLayoutCheckBoxes.addWidget(self.cb_ch1)
self.cb_ch3 = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_ch3.setObjectName(_fromUtf8("cb_ch3"))
self.verticalLayoutCheckBoxes.addWidget(self.cb_ch3)
self.cb_ch4 = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_ch4.setObjectName(_fromUtf8("cb_ch4"))
self.verticalLayoutCheckBoxes.addWidget(self.cb_ch4)
self.cb_ch2 = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_ch2.setObjectName(_fromUtf8("cb_ch2"))
self.verticalLayoutCheckBoxes.addWidget(self.cb_ch2)
self.verticalLayoutThreshould.addLayout(self.verticalLayoutCheckBoxes)
self.verticalLayoutOptions.addLayout(self.verticalLayoutThreshould)
self.verticalLayout.addLayout(self.verticalLayoutOptions)
self.dockWidget.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget)
self.actionSimples = QtGui.QAction(MainWindow)
self.actionSimples.setObjectName(_fromUtf8("actionSimples"))
self.actionIn_Plotter = QtGui.QAction(MainWindow)
self.actionIn_Plotter.setObjectName(_fromUtf8("actionIn_Plotter"))
self.actionThread = QtGui.QAction(MainWindow)
self.actionThread.setObjectName(_fromUtf8("actionThread"))
self.actionDesativado = QtGui.QAction(MainWindow)
self.actionDesativado.setObjectName(_fromUtf8("actionDesativado"))
self.actionStartAcquisition = QtGui.QAction(MainWindow)
self.actionStartAcquisition.setCheckable(False)
self.actionStartAcquisition.setObjectName(_fromUtf8("actionStartAcquisition"))
self.actionStart_Recording = QtGui.QAction(MainWindow)
self.actionStart_Recording.setObjectName(_fromUtf8("actionStart_Recording"))
self.actionStop_Recording = QtGui.QAction(MainWindow)
self.actionStop_Recording.setObjectName(_fromUtf8("actionStop_Recording"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionFind_Serial_Port = QtGui.QAction(MainWindow)
self.actionFind_Serial_Port.setObjectName(_fromUtf8("actionFind_Serial_Port"))
self.menuArquivo.addAction(self.actionStart_Recording)
self.menuArquivo.addAction(self.actionStop_Recording)
self.menuHelp.addAction(self.actionAbout)
self.menuFunctions.addAction(self.actionStartAcquisition)
self.menuFunctions.addSeparator()
self.menuFunctions.addAction(self.actionFind_Serial_Port)
self.menubar.addAction(self.menuArquivo.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menubar.addAction(self.menuFunctions.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(3)
self.cb_features.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Project Main", None))
self.label_replace.setText(_translate("MainWindow", "Here will be the chart", None))
self.lbl_status.setText(_translate("MainWindow", "Status:", None))
self.cb_chart_emg_on_off.setText(_translate("MainWindow", "Chart On/Off", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_EMG), _translate("MainWindow", "EMG", None))
self.label_features.setText(_translate("MainWindow", "Feature:", None))
self.checkBox.setText(_translate("MainWindow", "Visible", None))
self.label_replace_features.setText(_translate("MainWindow", "Here will be another the chart", None))
self.lbl_status_features.setText(_translate("MainWindow", "Status:", None))
self.cb_chart_features_on_off.setText(_translate("MainWindow", "Chart On/Off", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_features), _translate("MainWindow", "Features", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_classification), _translate("MainWindow", "Classification", None))
self.groupBox_f1.setTitle(_translate("MainWindow", "Finger 1: 0º", None))
self.groupBox_f2.setTitle(_translate("MainWindow", "Finger 2: 0º", None))
self.groupBox_f3.setTitle(_translate("MainWindow", "Finger 3: 0º", None))
self.groupBox_f4.setTitle(_translate("MainWindow", "Finger 4: 0º", None))
self.groupBox_f5.setTitle(_translate("MainWindow", "Finger 5: 0º", None))
self.groupBox_4.setTitle(_translate("MainWindow", "General", None))
self.btn_laser.setText(_translate("MainWindow", "Laser", None))
self.btn_reset_position.setText(_translate("MainWindow", "Open Hand", None))
self.btn_close_position.setText(_translate("MainWindow", "Close Hand", None))
self.btn_send_position.setText(_translate("MainWindow", "Send", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Extra", None))
self.pushButton_forhonnor.setText(_translate("MainWindow", "For Honnor", None))
self.pushButton_pointer.setText(_translate("MainWindow", "Pointer", None))
self.pushButton_spiderman.setText(_translate("MainWindow", "Spider-Man", None))
self.pushButton_comein.setText(_translate("MainWindow", "Come In", None))
self.pushButton_v.setText(_translate("MainWindow", "V", None))
self.pushButton_extra6.setText(_translate("MainWindow", "Extra 6", None))
self.pushButton_extra7.setText(_translate("MainWindow", "Extra 7", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_controle_manuel), _translate("MainWindow", "Controle Manuel", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Processing", None))
self.groupBox_10.setTitle(_translate("MainWindow", "Input Serial Port", None))
self.groupBox_8.setTitle(_translate("MainWindow", "Output Serial Port", None))
self.btn_go.setText(_translate("MainWindow", "Go!", None))
self.pushButton.setText(_translate("MainWindow", "...", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_settings), _translate("MainWindow", "General Settings", None))
self.menuArquivo.setTitle(_translate("MainWindow", "File", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.menuFunctions.setTitle(_translate("MainWindow", "Options", None))
self.lbl_options.setText(_translate("MainWindow", "Options", None))
self.groupBox.setTitle(_translate("MainWindow", "Training", None))
self.btn_record_raw_emg.setText(_translate("MainWindow", "Record raw EMG file", None))
self.label_file_name.setText(_translate("MainWindow", "File: None", None))
self.lbl_output_name.setText(_translate("MainWindow", "Output:", None))
self.lbl_output_value.setText(_translate("MainWindow", "None", None))
self.btn_generate_training_file.setText(_translate("MainWindow", "Generate Training File", None))
self.btn_load_training_file.setText(_translate("MainWindow", "Load Training File", None))
self.label.setText(_translate("MainWindow", "Not Trained", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Status", None))
self.checkBox_simulation.setText(_translate("MainWindow", "Using Simulation", None))
self.checkBox_simple_mode.setText(_translate("MainWindow", "Threshold", None))
self.label_2.setText(_translate("MainWindow", "2,5", None))
self.label_channels.setText(_translate("MainWindow", "Channels:", None))
self.cb_ch1.setText(_translate("MainWindow", "CH1", None))
self.cb_ch3.setText(_translate("MainWindow", "CH2", None))
self.cb_ch4.setText(_translate("MainWindow", "CH3", None))
self.cb_ch2.setText(_translate("MainWindow", "CH4", None))
self.actionSimples.setText(_translate("MainWindow", "Processamento", None))
self.actionIn_Plotter.setText(_translate("MainWindow", "In Plotter", None))
self.actionThread.setText(_translate("MainWindow", "Thread", None))
self.actionDesativado.setText(_translate("MainWindow", "Desativado", None))
self.actionStartAcquisition.setText(_translate("MainWindow", "Start Acquisition", None))
self.actionStart_Recording.setText(_translate("MainWindow", "Start Recording", None))
self.actionStop_Recording.setText(_translate("MainWindow", "Stop Recording", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionFind_Serial_Port.setText(_translate("MainWindow", "Find Serial Port", None))
| 1.625 | 2 |
config/printout_info.py | etfovac/rpi_cam | 0 | 12771150 | <reponame>etfovac/rpi_cam<filename>config/printout_info.py<gh_stars>0
import platform
import datetime
import psutil
import subprocess
import sys
import os
sys.path.append(os.path.abspath(".."))
import config.printout_format as cpf
plt_id = platform.uname()
degree = chr(223) # spec. char for ° for LCD
def get_ip():
# "{} IP address".format(subprocess.getoutput("hostname -I"))
# iwconfig wlan0 # info iwgetid
# ethtool eth0 # ethtool -i eth0
# ifconfig lo
if psutil.net_if_stats()['wlan0'].isup:
ip_addr = psutil.net_if_addrs()['wlan0'][0].address
ip_net = subprocess.getoutput("iwgetid -r")
elif psutil.net_if_stats()['eth0'].isup:
ip_addr = psutil.net_if_addrs()['eth0'][0].address
ip_net = "LAN eth0"
elif psutil.net_if_stats()['lo'].isup:
ip_addr = psutil.net_if_addrs()['lo'][0].address #127.0.0.1
ip_net = "Loopback"
else:
ip_addr = "0.0.0.0"
ip_net = "all net if down"
return ip_addr, ip_net #tuple
def get_wlan():
# iwlist wlan0 freq # iwlist wlan0 scan
# iwlist wlan0 rate # iwlist rate
# iwgetid --protocol wlan0 -r
# iwgetid --scheme wlan0 -r
if psutil.net_if_stats()['wlan0'].isup:
freq = float(subprocess.getoutput("iwgetid --freq wlan0 -r"))/1e9
chnl = int(subprocess.getoutput("iwgetid --channel wlan0 -r"))
else:
freq = 0
chnl = 0
return freq, chnl #tuple
def get_os():
plt_id.system,plt_id.node
plt_id.release,plt_id.machine
psutil.virtual_memory().percent,psutil.swap_memory().percent
def lcd_msg_list(lcd_columns=16, lcd_rows=2):
msg_list = [
cpf.lcd_ribbon(lcd_columns, lcd_rows),
cpf.msg_form(plt_id.system,plt_id.node, "OS {}"),
cpf.msg_form(plt_id.release,plt_id.machine,
"rel {}", "chip {}"),
cpf.msg_form(get_ip(), "",
"{} IP", "{} ID"),
cpf.msg_form(get_wlan(), "",
"CFreq {} GHz", "WLAN Channel {}"),
cpf.msg_form(
"CPU {} %".format(psutil.cpu_percent()),
"CPU {} {}C".format(psutil.sensors_temperatures()['cpu_thermal'][0].current, degree)),
cpf.msg_form(psutil.virtual_memory().percent,psutil.swap_memory().percent,
"Virt MEM {} %","Swap MEM {} %"),
cpf.lcd_ribbon(lcd_columns, lcd_rows)
]
msg_idle = [cpf.lcd_info()]
return msg_list,msg_idle
| 2.28125 | 2 |