content stringlengths 5 1.05M |
|---|
#!/usr/bin/python -OOOO
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
"""
Base codec functions for bson.
"""
import struct
import cStringIO
import calendar
from datetime import datetime
import warnings
from abc import ABCMeta, abstractmethod
# serialization optimizations
length_struct = struct.Struct('<i')
int64_struct = struct.Struct('<q')
uint64_struct = struct.Struct('<Q')
binary_struct = struct.Struct('<ib')
double_struct = struct.Struct('<d')
boolean_struct = struct.Struct('<b')
unpack_length = length_struct.unpack_from
unpack_binary_struct = binary_struct.unpack_from
# Error Classes
class MissingClassDefinition(ValueError):
def __init__(self, class_name):
super(MissingClassDefinition, self).__init__(
"No class definition for class %s" % (class_name,))
#
# Warning Classes
class MissingTimezoneWarning(RuntimeWarning):
def __init__(self, *args):
args = list(args)
if len(args) < 1:
args.append("Input datetime object has no tzinfo, assuming UTC.")
super(MissingTimezoneWarning, self).__init__(*args)
#
# Traversal Step
class TraversalStep(object):
def __init__(self, parent, key):
self.parent = parent
self.key = key
#
# Custom Object Codec
class BSONCoding(object):
__metaclass__ = ABCMeta
@abstractmethod
def bson_encode(self):
pass
@abstractmethod
def bson_init(self, raw_values):
pass
classes = {}
def import_class(cls):
if not issubclass(cls, BSONCoding):
return
global classes
classes[cls.__name__] = cls
def import_classes(*args):
for cls in args:
import_class(cls)
def import_classes_from_modules(*args):
for module in args:
for item in module.__dict__:
if hasattr(item, "__new__") and hasattr(item, "__name__"):
import_class(item)
def encode_object(obj, traversal_stack, generator_func):
values = obj.bson_encode()
class_name = obj.__class__.__name__
values["$$__CLASS_NAME__$$"] = class_name
return encode_document(values, traversal_stack, obj, generator_func)
def encode_object_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_object(value, traversal_stack,
generator_func = generator_func)
class _EmptyClass(object):
pass
def decode_object(raw_values):
global classes
class_name = raw_values["$$__CLASS_NAME__$$"]
cls = None
try:
cls = classes[class_name]
except KeyError, e:
raise MissingClassDefinition(class_name)
retval = _EmptyClass()
retval.__class__ = cls
retval.bson_init(raw_values)
return retval
#
# Codec Logic
def encode_string(value):
value = value.encode("utf8")
length = len(value)
return struct.pack("<i%dsb" % (length,), length + 1, value, 0)
def decode_string(data, base):
length = unpack_length(data, base)[0]
value = data[base + 4: base + 4 + length - 1]
return (base + 4 + length, value)
def encode_cstring(value):
if isinstance(value, unicode):
value = value.encode("utf8")
return value + "\x00"
def decode_cstring(data, base):
end = data.index('\x00', base)
# NOTE(msolomon) this decode adds a depressing amount of overhead and
# seems incorrect. Nothing should expect more than a simple cstring.
# name = data[base:end].decode('utf8')
name = data[base:end]
return (end+1, name)
def encode_binary(value):
return binary_struct.pack(len(value), 0) + value
def decode_binary(data, base):
length, binary_type = unpack_binary_struct(data, base)
return (base + 5 + length, data[base + 5:base + 5 + length])
def decode_double(data, base):
return (base + 8, double_struct.unpack_from(data, base)[0])
def encode_double_element(name, value):
return "\x01" + encode_cstring(name) + double_struct.pack(value)
def decode_double_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_double(data, base)
return (base, name, value)
def encode_string_element(name, value):
return "\x02" + encode_cstring(name) + encode_string(value)
def decode_string_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_string(data, base)
value = value.decode("utf8")
return (base, name, value)
def encode_value(name, value, buf, traversal_stack, generator_func):
if isinstance(value, str):
buf.write(encode_binary_element(name, value))
elif isinstance(value, unicode):
buf.write(encode_string_element(name, value))
elif isinstance(value, int):
if value < -0x80000000 or value > 0x7fffffff:
buf.write(encode_int64_element(name, value))
else:
buf.write(encode_int32_element(name, value))
elif isinstance(value, long):
if value <= 0x7fffffffffffffff:
buf.write(encode_int64_element(name, value))
else:
buf.write(encode_uint64_element(name, value))
elif isinstance(value, bool):
buf.write(encode_boolean_element(name, value))
elif value is None:
buf.write(encode_none_element(name, value))
elif isinstance(value, dict):
buf.write(encode_document_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, (list, tuple)):
buf.write(encode_array_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, float):
buf.write(encode_double_element(name, value))
elif isinstance(value, datetime):
buf.write(encode_UTCdatetime_element(name, value))
elif isinstance(value, BSONCoding):
buf.write(encode_object_element(name, value, traversal_stack,
generator_func))
else:
raise ValueError('value has bad type', type(value))
def encode_document(obj, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
key_iter = obj.iterkeys()
if generator_func is not None:
key_iter = generator_func(obj, traversal_stack)
for name in key_iter:
value = obj[name]
traversal_stack.append(TraversalStep(traversal_parent or obj, name))
encode_value(name, value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def encode_array(array, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
for i, value in enumerate(array):
traversal_stack.append(TraversalStep(traversal_parent or array, i))
encode_value(str(i), value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def decode_document(data, base):
length = unpack_length(data, base)[0]
end_point = base + length
base += 4
retval = {}
while base < end_point - 1:
base, name, value = ELEMENT_DISPATCH[data[base]](data, base)
retval[name] = value
if "$$__CLASS_NAME__$$" in retval:
retval = decode_object(retval)
return (end_point, retval)
def encode_document_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_document(value, traversal_stack,
generator_func = generator_func)
def decode_document_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_document(data, base)
return (base, name, value)
def encode_array_element(name, value, traversal_stack, generator_func):
return "\x04" + encode_cstring(name) + \
encode_array(value, traversal_stack, generator_func = generator_func)
def _decode_array_document(data, base):
length = unpack_length(data, base)[0]
end_point = base + length
base += 4
retval = []
while base < end_point - 1:
base, name, value = ELEMENT_DISPATCH[data[base]](data, base)
retval.append(value)
return (end_point, retval)
def decode_array_element(data, base):
base, name = decode_cstring(data, base + 1)
base, retval = _decode_array_document(data, base)
return (base, name, retval)
def encode_binary_element(name, value):
return "\x05" + encode_cstring(name) + encode_binary(value)
def decode_binary_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_binary(data, base)
return (base, name, value)
def encode_boolean_element(name, value):
return "\x08" + encode_cstring(name) + boolean_struct.pack(value)
def decode_boolean_element(data, base):
base, name = decode_cstring(data, base + 1)
value = bool(boolean_struct.unpack_from(data, base)[0])
return (base + 1, name, value)
def encode_UTCdatetime_element(name, value):
value = int(round(calendar.timegm(value.utctimetuple()) * 1000 +
(value.microsecond / 1000.0)))
return "\x09" + encode_cstring(name) + int64_struct.pack(value)
def decode_UTCdatetime_element(data, base):
base, name = decode_cstring(data, base + 1)
value = datetime.utcfromtimestamp(
int64_struct.unpack_from(data, base)[0] / 1000.0)
return (base + 8, name, value)
def encode_none_element(name, value):
return "\x0a" + encode_cstring(name)
def decode_none_element(data, base):
base, name = decode_cstring(data, base + 1)
return (base, name, None)
def encode_int32_element(name, value):
return "\x10" + encode_cstring(name) + length_struct.pack(value)
def decode_int32_element(data, base):
base, name = decode_cstring(data, base + 1)
value = unpack_length(data, base)[0]
return (base + 4, name, value)
def encode_int64_element(name, value):
return "\x12" + encode_cstring(name) + int64_struct.pack(value)
def encode_uint64_element(name, value):
return "\x3F" + encode_cstring(name) + uint64_struct.pack(value)
def decode_int64_element(data, base):
base, name = decode_cstring(data, base + 1)
value = int64_struct.unpack_from(data, base)[0]
return (base + 8, name, value)
def decode_uint64_element(data, base):
base, name = decode_cstring(data, base + 1)
value = uint64_struct.unpack_from(data, base)[0]
return (base + 8, name, value)
ELEMENT_TYPES = {
0x01 : "double",
0x02 : "string",
0x03 : "document",
0x04 : "array",
0x05 : "binary",
0x08 : "boolean",
0x09 : "UTCdatetime",
0x0A : "none",
0x10 : "int32",
0x12 : "int64",
0x3F : "uint64"
}
# optimize dispatch once all methods are known
ELEMENT_DISPATCH = dict([(chr(i), globals()["decode_" + name + "_element"])
for i, name in ELEMENT_TYPES.iteritems()])
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import argparse
from dlab.actions_lib import *
from dlab.meta_lib import *
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--service_account_name', type=str, default='')
parser.add_argument('--role_name', type=str, default='')
parser.add_argument('--policy_path', type=str, default='')
parser.add_argument('--roles_path', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
if args.service_account_name != '':
if GCPMeta().get_service_account(args.service_account_name):
print("REQUESTED SERVICE ACCOUNT {} ALREADY EXISTS".format(args.service_account_name))
else:
print("Creating Service account {}".format(args.service_account_name))
GCPActions().create_service_account(args.service_account_name)
if GCPMeta().get_role(args.role_name):
print("REQUESTED ROLE {} ALREADY EXISTS".format(args.role_name))
else:
if args.policy_path == '':
permissions = []
else:
with open(args.policy_path, 'r') as f:
json_file = f.read()
permissions = json.loads(json_file)
print("Creating Role {}".format(args.role_name))
GCPActions().create_role(args.role_name, permissions)
print("Assigning custom role to Service account.")
GCPActions().set_role_to_service_account(args.service_account_name, args.role_name)
if args.roles_path != '':
print("Assigning predefined roles to Service account.")
with open(args.roles_path, 'r') as f:
json_file = f.read()
predefined_roles = json.loads(json_file)
for role in predefined_roles:
GCPActions().set_role_to_service_account(args.service_account_name, role, 'predefined')
else:
parser.print_help()
sys.exit(2)
|
from django.db import models
# Create your models here.
class Project(models.Model):
'''Simple model for a project'''
project_name = models.CharField(max_length=100)
description = models.CharField(max_length=100, blank=True, null=True)
added_on = models.DateTimeField(auto_now=True, auto_now_add=True)
def __unicode__(self):
return unicode(self.project_name)
@models.permalink
def get_absolute_url(self):
return ('project-detail', [self.pk])
class Target(models.Model):
'''Target model, a task can on N urls'''
name = models.ForeignKey(Project)
url = models.URLField(max_length=500)
status = (
('A', 'Active'),
('O', 'Offline'),
('D', 'Default'),
)
def __unicode__(self):
return unicode(self.url)
@models.permalink
def get_absolute_url(self):
return ('target-detail', [self.pk])
class Header(models.Model):
'''HTML Headers model, each task can have different headers. Example:
SOAPAction'''
name = models.ForeignKey(Project)
contenttype = models.CharField(max_length=50)
charset = models.CharField(max_length=50)
soapaction = models.CharField(max_length=100)
def __unicode__(self):
return unicode(self.soapaction)
@models.permalink
def get_absolute_url(self):
return ('header-detail', [self.pk])
class Argument(models.Model):
'''Arguments model, what you are going use from a XML result or pass'''
name = models.ForeignKey(Project)
argument = models.CharField(max_length=100)
value = models.CharField(max_length=200)
def __unicode__(self):
return unicode(self.name)
@models.permalink
def get_absolute_url(self):
return ('argument-detail', [self.pk])
class Task(models.Model):
'''Task model, the operation itself. A task can have N steps'''
project_name = models.ForeignKey(Project)
task_name = models.CharField(max_length=100)
target = models.ForeignKey(Target)
request = models.TextField(max_length=1000, verbose_name='XML request')
requires = models.ForeignKey('Task', blank=True, null=True)
threshold = models.FloatField()
header = models.ForeignKey(Header)
test = models.CharField(max_length=100, blank=True, null=True)
arguments = models.ForeignKey(Argument, blank=True, null=True)
steps = models.IntegerField()
added_on = models.DateTimeField(auto_now=True, auto_now_add=True)
def __unicode__(self):
return unicode(self.task_name)
@models.permalink
def get_absolute_url(self):
return ('task-detail', [self.pk])
class History(models.Model):
'''History model, save all results from tasks, including the XML response'''
project_name = models.ForeignKey(Project)
task_name = models.ForeignKey(Task)
response = models.TextField()
time = models.FloatField()
status = models.BooleanField(default=False)
added_on = models.DateTimeField(auto_now=True, auto_now_add=True)
def __unicode__(self):
return unicode(self.task_name)
|
from unittest import TestCase
from views.customer_functions import registerCustomer
def test_register_function():
first_name: str = "Ehis"
last_name: str = "Edemakhiota"
email: str = "edemaehiz@gmail.com"
password: str = "EdemaEhi17."
user_name: str = "ehizman"
registerCustomer(first_name, last_name, email, password, user_name)
class Test(TestCase):
test_register_function()
|
"""
timeseries_qt_demo.py
----
Modification of demo
This demo demonstrates how to embed a matplotlib (mpl) plot
into a PyQt4 GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich PyQt GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky (eliben@gmail.com)
License: this code is in the public domain
Last modified: 19.01.2009
"""
# system stack
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#science stack
import numpy as np
import json
#visual stack
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
#user stack
# Relative User Stack
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(1, parent_dir)
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
from calc.EPIC2Datetime import EPIC2Datetime
class AppForm(QMainWindow):
example_path = parent_dir+'/example_data/example_timeseries_data.nc'
def __init__(self, parent=None, active_file=example_path):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Timeseries Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.textbox.setText(active_file)
self.populate_dropdown()
self.create_status_bar()
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
xdata = event.artist.get_xdata()
ydata = event.artist.get_ydata()
ind = event.ind
msg = "You've clicked on a point with coords:\n {0}".format( tuple(zip(xdata[ind], ydata[ind])))
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
var1 = str(self.param_dropdown.currentText())
self.load_netcdf()
ind = self.ncdata[var1][:,0,0,0] >1e34
self.ncdata[var1][ind,0,0,0] = np.nan
tdata = self.ncdata['time'][:]
y = self.ncdata[var1][:,0,0,0]
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
if self.datapoints_cb.isChecked():
self.axes.plot(
tdata,y,
marker='*',
picker=True)
else:
self.axes.plot(
tdata,y,
picker=True)
self.fig.suptitle(self.station_data, fontsize=12)
self.canvas.draw()
def on_save(self):
"""
save to same location with .ed.nc ending
"""
file_out = unicode(self.textbox.text()).replace('.nc','.ed.nc')
self.save_netcdf(file_out)
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 75
self.fig = Figure((24.0, 15.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
self.datapoints_cb = QCheckBox("Show &DataPoints")
self.datapoints_cb.setChecked(False)
self.connect(self.datapoints_cb, SIGNAL('stateChanged(int)'), self.on_draw)
self.param_dropdown = QComboBox()
self.connect(self.param_dropdown, SIGNAL('clicked()'), self.on_draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ self.textbox, self.draw_button, self.grid_cb,
self.param_dropdown, self.datapoints_cb]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def populate_dropdown(self):
self.load_netcdf()
self.station_data = {}
for k in self.vars_dic.keys():
if k not in ['time','time2','lat','lon','depth','latitude','longitude']:
self.param_dropdown.addItem(k)
if k in ['lat','lon','depth','latitude','longitude']:
self.station_data[k] =str(self.ncdata[k][0])
def create_status_bar(self):
self.status_text = QLabel(json.dumps(self.station_data))
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def load_netcdf( self, file=parent_dir+'/example_data/example_timeseries_data.nc'):
df = EcoFOCI_netCDF(unicode(self.textbox.text()))
self.vars_dic = df.get_vars()
self.ncdata = df.ncreadfile_dic()
df.close()
#convert epic time
#time2 wont exist if it isnt epic keyed time
if 'time2' in self.vars_dic.keys():
self.ncdata['time'] = EPIC2Datetime(self.ncdata['time'], self.ncdata['time2'])
def main():
app = QApplication(sys.argv)
args = app.arguments()
try:
form = AppForm(active_file=args[1])
except:
form = AppForm()
app.setStyle("plastique")
form.show()
app.exec_()
if __name__ == "__main__":
main() |
def ccw(A,B,C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
def is_intersection(A,B,C,D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
def compare_with_prev_position(prev_detection, detection, line, entry, exit):
(xmin_prev, ymin_prev) = (int(prev_detection[0]), int(prev_detection[1]))
(xmax_prev, ymax_prev) = (int(prev_detection[2]), int(prev_detection[3]))
p0 = (
int(detection[0] + (detection[2]-detection[0])/2),
int(detection[1] + (detection[3]-detection[1])/2)
)
p1 = (int(xmin_prev + (xmax_prev-xmin_prev)/2), int(ymin_prev + (ymax_prev-ymin_prev)/2))
d = ((p0[0] - line[0][0])*(line[1][1] - line[0][1]))- ((p0[1] - line[0][1])*(line[1][0] - line[0][0]))
if is_intersection(p0, p1, line[0], line[1]):
if d < 0:
entry += 1
if d > 0:
exit += 1
return entry, exit |
from datetime import datetime
import pytest
from articat.bq_artifact import BQArtifact
def test_versioned_not_supported():
with pytest.raises(
NotImplementedError, match="Versioned BigQuery Artifact is not supported"
):
BQArtifact.versioned("foo", version="0.1.0")
def test_bq_client_is_lazy():
assert BQArtifact.bq_client
def test_partitioned_must_be_date():
assert BQArtifact.partitioned("foo")
with pytest.raises(
ValueError, match="Partition resolution for BQ artifact must be a day"
):
BQArtifact.partitioned("foo", partition=datetime.utcnow())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `hypothesis_ledger` package."""
import unittest
from collections import Counter, OrderedDict
from datetime import date
from decimal import Decimal
from pathlib import Path
from hypothesis_ledger.hypothesis_ledger import Ledger, Transaction
class TestLedger(unittest.TestCase):
"""Tests for `hypothesis_ledger` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.transaction_path = Path(__file__).with_name('transactions.csv')
self.ledger = Ledger(self.transaction_path, load_transactions=True)
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_load_transaction_length(self):
self.assertEqual(len(self.ledger.transactions), 3)
def test_load_transaction_formatting(self):
self.assertEqual(
str(self.ledger.transactions[0]),
'<Transaction 2015-01-16, john:-§125.00, mary:+§125.00 >',
)
def test_load_transaction_parse_types(self):
for transaction in self.ledger.transactions:
self.assertIsInstance(transaction, Transaction)
self.assertIsInstance(transaction.date, date)
self.assertIsInstance(transaction.source, str)
self.assertIsInstance(transaction.destination, str)
self.assertIsInstance(transaction.value, Decimal)
self.assertIsInstance(transaction.currency_symbol, str)
def test_load_transaction_parse_values(self):
transactions = [
Transaction(date=date(2015, 1, 16),
source='john',
destination='mary',
value=Decimal('125.00')),
Transaction(date=date(2015, 1, 17),
source='john',
destination='supermarket',
value=Decimal('20.00')),
Transaction(date=date(2015, 1, 17),
source='mary',
destination='insurance',
value=Decimal('100.00')),
]
self.assertEqual(self.ledger.transactions, transactions)
def test_calculate_no_bounds(self):
balances = Counter({'insurance': Decimal('100.00'),
'mary': Decimal('25.00'),
'supermarket': Decimal('20.00'),
'john': Decimal('-145.00')})
self.assertEqual(self.ledger.calculate_balances(), balances)
def test_calculate_with_bounds(self):
self.assertEqual(self.ledger.calculate_balances(date(2015, 1, 16)),
Counter({'john': Decimal('-125.00'),
'mary': Decimal('125.00')}))
def test_calculate_early_bounds(self):
self.assertEqual(self.ledger.calculate_balances(date.min),
Counter())
def test_balance_for_no_bounds(self):
self.assertEqual(self.ledger.balance_for('john'), Decimal('-145.00'))
def test_balance_for_with_bounds(self):
self.assertEqual(self.ledger.balance_for('john', date(2015, 1, 16)),
Decimal('-125.00'))
def test_balance_for_missing_account(self):
self.assertEqual(self.ledger.balance_for('bob'), 0)
def test_balance_for_invalid_type(self):
with self.assertRaises(TypeError):
self.ledger.balance_for('john', '2015-01-16')
def test_balance_for_by_day_no_bounds(self):
self.assertEqual(
self.ledger.balance_for_by_day('john'),
OrderedDict([
(date(2015, 1, 16), Decimal('-125.00')),
(date(2015, 1, 17), Decimal('-145.00'))
])
)
def test_balance_for_by_day_with_bounds(self):
self.assertEqual(
self.ledger.balance_for_by_day('john', date(2015, 1, 16)),
OrderedDict([
(date(2015, 1, 16), Decimal('-125.00')),
])
)
def test_balance_for_by_day_missing_account(self):
self.assertEqual(
self.ledger.balance_for_by_day('bob'),
OrderedDict([
(date(2015, 1, 16), 0),
(date(2015, 1, 17), 0),
])
)
def test_balance_by_day_no_bounds(self):
day_balances = OrderedDict([
(date(2015, 1, 16),
Counter({'mary': Decimal('125.00'),
'john': Decimal('-125.00')})),
(date(2015, 1, 17),
Counter({'insurance': Decimal('100.00'),
'mary': Decimal('25.00'),
'supermarket': Decimal('20.00'),
'john': Decimal('-145.00')}))
])
self.assertEqual(self.ledger.calculate_balances_by_day(), day_balances)
def test_balance_by_day_with_bounds(self):
day_balances = OrderedDict([
(date(2015, 1, 16),
Counter({'mary': Decimal('125.00'),
'john': Decimal('-125.00')})),
])
self.assertEqual(
self.ledger.calculate_balances_by_day(date(2015, 1, 16)),
day_balances,
)
|
import sys
import csv
class SimulationSettings:
Softening = 10.0 # KM added to every link distance to eliminate needless distinction between very short routes.
#TurnBackAllowed = True # feature disabled for now.
AgentLogLevel = 0 # set to 1 for basic agent information.
CampLogLevel = 0 # set to 1 to obtain average times for agents to reach camps at any time step (aggregate info).
InitLogLevel = 0 # set to 1 for basic information on locations added and conflict zones assigned.
TakeRefugeesFromPopulation = True
sqrt_ten = 3.16227766017 # square root of ten (10^0.5).
CampWeight = sqrt_ten # attraction factor for camps.
ConflictWeight = 1.0 / sqrt_ten # reduction factor for refugees entering conflict zones.
MaxMoveSpeed = 360 # most number of km that we expect refugees to traverse per time step (30 km/h * 12 hours).
MaxWalkSpeed = 35 # most number of km that we expect refugees to traverse per time step on foot (3.5 km/h * 10 hours).
MaxCrossingSpeed = 20 # most number of km that we expect refugees to traverse per time step on boat/walk to cross river (2 km/h * 10 hours).
StartOnFoot = True # Agents walk on foot when they travers their very first link.
CapacityBuffer = 1.0
# default move chance
ConflictMoveChance = 1.0
CampMoveChance = 0.001
DefaultMoveChance = 0.3
# Specific enhancements for the 2.0 ruleset.
# This includes a movespeed of 420 and a walk speed of 42.
AvoidShortStints = True # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
FlareConflictInputFile = ""
AwarenessLevel = 1 #-1, no weighting at all, 0 = road only, 1 = location, 2 = neighbours, 3 = region.
#NumProcs = 1 #This is not supported at the moment.
UseV1Rules = False
if UseV1Rules == True:
MaxMoveSpeed = 200
StartOnFoot = False
AvoidShortStints = False # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
CampWeight = 2.0 # attraction factor for camps.
ConflictWeight = 0.25 # reduction factor for refugees entering conflict zones.
def ReadFromCSV(csv_name):
"""
Reads simulation settings from CSV
"""
number_of_steps = -1
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
elif row[0].lower() == "agentloglevel":
SimulationSettings.AgentLogLevel = int(row[1])
elif row[0].lower() == "camploglevel":
SimulationSettings.CampLogLevel = int(row[1])
elif row[0].lower() == "initloglevel":
SimulationSettings.InitLogLevel = int(row[1])
elif row[0].lower() == "minmovespeed":
SimulationSettings.MinMoveSpeed = float(row[1])
elif row[0].lower() == "maxmovespeed":
SimulationSettings.MaxMoveSpeed = float(row[1])
elif row[0].lower() == "numberofsteps":
number_of_steps = int(row[1])
elif row[0].lower() == "campweight":
SimulationSettings.CampWeight = float(row[1])
elif row[0].lower() == "conflictweight":
SimulationSettings.ConflictWeight = float(row[1])
elif row[0].lower() == "conflictmovechance":
SimulationSettings.ConflictMoveChance = float(row[1])
elif row[0].lower() == "campmovechance":
SimulationSettings.CampMoveChance = float(row[1])
elif row[0].lower() == "defaultmovechance":
SimulationSettings.DefaultMoveChance = float(row[1])
elif row[0].lower() == "awarenesslevel":
SimulationSettings.AwarenessLevel = int(row[1])
elif row[0].lower() == "flareconflictinputfile":
SimulationSettings.FlareConflictInputFile = row[1]
elif row[0].lower() == "usev1rules":
SimulationSettings.UseV1Rules = (row[1].lower() == "true")
elif row[0].lower() == "startonfoot":
SimulationSettings.StartOnFoot = (row[1].lower() == "true")
elif row[0].lower() == "avoidshortstints":
SimulationSettings.AvoidShortStints = (row[1].lower() == "true")
else:
print("FLEE Initialization Error: unrecognized simulation parameter:",row[0])
sys.exit()
return number_of_steps
|
__author__ = "Mario Figueiro Zemor"
__email__ = "mario.figueiro@ufgrs.br"
from classes.Database import *
import logging
logger = logging.getLogger(__name__);
class Estudante():
cursor = Database.getCursor();
def __init__(self,tupla):
self.inscricao = tupla[0];
self.idade = tupla[1];
self.sexo = Estudante.getSexo(tupla[2]);
self.cod_nacionalidade = Estudante.getNacionalidade(tupla[3]);
self.cod_municipio_resid = tupla[4];
self.cod_municipio_nasc = tupla[5];
self.cod_uf_resid = tupla[6];
self.cod_uf_nasc = tupla[7];
self.estado_civil = Estudante.getEstadoCivil(tupla[8]);
self.etnia = Estudante.getEtnia(tupla[9]);
self.cod_conclusao_EM = Estudante.getConclusaoEM(tupla[10]);
self.ano_conclusao_EM = tupla[11];
self.tipo_escola_EM = tupla[12];
self.tipo_ens_escola_EM = tupla[13];
self.cod_escola = tupla[14];
def toString(self):
return "Estudante[{},{},{}]".format(self.inscricao,self.idade,self.sexo);
@staticmethod
def getQuery(query):
queries = { 'INSERT' : """INSERT INTO estudante VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",
'FIND_BY_ID': """SELECT * FROM estudante WHERE codinscricao = {} """,
'GROUP_BY_SCHOOL': """SELECT codescola, COUNT(*) FROM estudante GROUP BY codescola ORDER BY codescola"""};
return queries[query];
@staticmethod
def getEtnia(etnia):
etnias = { '0': 'Nao Declarado',
'1': 'Branca',
'2': 'Preta',
'3': 'Parda',
'4': 'Amarela',
'5': 'Indigina'};
return etnias[etnia];
@staticmethod
def getNacionalidade(nacionalidade):
nacionalidades = {'1': 'Brasileiro(a)', '2': 'Brasileiro(a) Naturalizado(a)', '3': 'Estrangeiro(a)', '4': 'Brasileiro(a) Nato(a), nascido(a) no exterior'};
return nacionalidades[nacionalidade];
@staticmethod
def getEstadoCivil(estado_civil):
estados = {'0': 'Solteiro(a)', '1': 'Casado(a)/Mora com um(a) companheiro(a)', '2': 'Divorciado(a)/Desquitado(a)/Separado(a)', '3': 'Viuvo(a)'};
return estados[estado_civil];
@staticmethod
def getSexo(sexo):
sexos = { 'F': 'Feminino', 'M': 'Masculino'};
return sexos[sexo];
@staticmethod
def getConclusaoEM(cod):
situacao = {1: 'Ja conclui o Ensino Medio', 2: 'Estou cursando e concluirei o Ensino Medio este Ano', 3: 'Estou cursando e concluirei o Ensino Medio apos este ano', 4: 'Nao conclui e nao estou cursando o Ensino Medio'};
return situacao[cod]
@staticmethod
def insert(values,cursor):
try:
logger.debug("Inserting values: {} into Student.".format(values));
query = Estudante.getQuery('INSERT');
cursor.execute(query,values);
except Exception as e:
logger.error("Exception during insert values into Student: {}".format(e));
raise
@staticmethod
def groupSchool():
query = Estudante.getQuery('GROUP_BY_SCHOOL');
Estudante.cursor.execute(query);
result = Estudante.cursor.fetchall();
print(result);
@staticmethod
def findByCod(cod):
try:
logger.debug("Find Studante by cod.");
query = Estudante.getQuery('FIND_BY_ID');
query = query.format(cod);
Estudante.cursor.execute(query);
result = Estudante.cursor.fetchone();
if result:
result = Estudante(result);
return result;
except Exception as e:
logger.error("class: (Estudante) Method: (findByCod) : Exception ({}).".format(e));
raise
@staticmethod
def getValues(columns,dd):
try:
logger.debug("Estudante: Getting values from file columns.");
inscricao = int(columns[dd['NU_INSCRICAO']]);
idade = int(columns[dd['IDADE']]);
sexo = columns[dd['TP_SEXO']];
cod_nacionalidade = int(columns[dd['NACIONALIDADE']]);
cod_municipio_resid = int(columns[dd['COD_MUNICIPIO_RESIDENCIA']]);
cod_municipio_nasc = -1 if columns[dd['COD_MUNICIPIO_NASCIMENTO']] == '' else int(columns[dd['COD_MUNICIPIO_NASCIMENTO']]);
cod_uf_resid = -1 if columns[dd['COD_UF_RESIDENCIA']] == '' else int(columns[dd['COD_UF_RESIDENCIA']]);
cod_uf_nasc = -1 if columns[dd['COD_UF_NASCIMENTO']] == '' else int(columns[dd['COD_UF_NASCIMENTO']]);
estado_civil = int(columns[dd['TP_ESTADO_CIVIL']]);
etnia = int(columns[dd['TP_COR_RACA']]);
cod_conclusao_EM = int(columns[dd['ST_CONCLUSAO']]);
no_conclusao_EM = -1 if columns[dd['ANO_CONCLUIU']] == '' else int(columns[dd['ANO_CONCLUIU']]);
tipo_escola_EM = -1 if columns[dd['TP_ESCOLA']] == '' else int(columns[dd['TP_ESCOLA']]);
tipo_ens_escola_EM = -1 if columns[dd['IN_TP_ENSINO']] == '' else int(columns[dd['IN_TP_ENSINO']]);
cod_escola = -1 if columns[dd['COD_ESCOLA']] == '' else int(columns[dd['COD_ESCOLA']]);
values = (inscricao,idade,sexo,cod_nacionalidade,cod_municipio_resid,cod_municipio_nasc,cod_uf_resid,cod_uf_nasc,estado_civil,etnia, cod_conclusao_EM, no_conclusao_EM, tipo_escola_EM, tipo_ens_escola_EM, cod_escola);
ret = values;
except Exception as e:
logger.error("Estudante: Exception during get values from columns: {}".format(e));
ret = None;
return ret;
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.detailed_ground_heat_transfer import GroundHeatTransferBasementAutoGrid
log = logging.getLogger(__name__)
class TestGroundHeatTransferBasementAutoGrid(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_groundheattransferbasementautogrid(self):
pyidf.validation_level = ValidationLevel.error
obj = GroundHeatTransferBasementAutoGrid()
# real
var_clearance_distance_from_outside_of_wall_to_edge_ = 0.0
obj.clearance_distance_from_outside_of_wall_to_edge_ = var_clearance_distance_from_outside_of_wall_to_edge_
# real
var_slabx_x_dimension_of_the_building_slab = 30.0
obj.slabx_x_dimension_of_the_building_slab = var_slabx_x_dimension_of_the_building_slab
# real
var_slaby_y_dimension_of_the_building_slab = 30.0
obj.slaby_y_dimension_of_the_building_slab = var_slaby_y_dimension_of_the_building_slab
# real
var_concagheight_height_of_the_foundation_wall_above_grade = 0.0
obj.concagheight_height_of_the_foundation_wall_above_grade = var_concagheight_height_of_the_foundation_wall_above_grade
# real
var_slabdepth_thickness_of_the_floor_slab = 5.5
obj.slabdepth_thickness_of_the_floor_slab = var_slabdepth_thickness_of_the_floor_slab
# real
var_basedepth_depth_of_the_basement_wall_below_grade = 0.0
obj.basedepth_depth_of_the_basement_wall_below_grade = var_basedepth_depth_of_the_basement_wall_below_grade
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].clearance_distance_from_outside_of_wall_to_edge_, var_clearance_distance_from_outside_of_wall_to_edge_)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].slabx_x_dimension_of_the_building_slab, var_slabx_x_dimension_of_the_building_slab)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].slaby_y_dimension_of_the_building_slab, var_slaby_y_dimension_of_the_building_slab)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].concagheight_height_of_the_foundation_wall_above_grade, var_concagheight_height_of_the_foundation_wall_above_grade)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].slabdepth_thickness_of_the_floor_slab, var_slabdepth_thickness_of_the_floor_slab)
self.assertAlmostEqual(idf2.groundheattransferbasementautogrids[0].basedepth_depth_of_the_basement_wall_below_grade, var_basedepth_depth_of_the_basement_wall_below_grade) |
from model import *
import json
import ui_helper as ui
import os
logfilename = 'warnings.log'
def read(file_name):
with open(file_name,'r') as f:
course_dict = json.load(f)
return course_from_dict(course_dict)
def save(gb, file_name):
data = json.dumps(course_to_dict(gb), indent=2, sort_keys=True)
with open(file_name, mode='w', encoding='utf-8') as f:
f.write(data)
def log_config_warnings(gb):
warnings = '\n'.join(gb.config_warnings())
if warnings:
warnings += '\n'
try:
with open(logfilename,'a') as f:
f.write(warnings)
except: pass
def log_message(message):
try:
with open(logfilename,'a') as f:
f.write(message + '\n')
except: pass
def delete_log_file():
try:
os.remove(logfilename)
except OSError:
pass
# transfer all data in the course object hierarchy to a dictionary
def course_to_dict(gb):
course = {'name': gb.name, 'term': gb.term, 'schema_version': gb.schema_version, \
'global_added_pct': gb.global_added_pct, 'letter_plus_minus_pct': gb.letter_plus_minus_pct, \
'audible_warnings': gb.audible_warnings, 'gradeables':[], 'scores':[]}
course['categories'] = [{'id':i, 'name': c.name, 'pct_of_grade': c.pct_of_grade, \
'drop_low_n': c.drop_low_n, 'est_ct':c.est_ct, 'combine_pts':c.combine_pts, \
'gradeable_pcts': c.gradeable_pcts, 'obj': c} \
for i, c in enumerate(gb.categories)]
course['students'] = [{'id':i, 'first': s.first, 'last': s.last, \
'email': s.email, 'is_active': s.is_active,'notes': s.notes, 'obj': s} \
for i, s in enumerate(gb.students)]
cat_dict = {item['obj']: item for item in course['categories']}
for i, g in enumerate(gb.gradeables):
gd = {'id':i, 'cid': cat_dict[g.category]['id'], 'name': g.name, 'total_pts': g.total_pts, \
'sub_pct': g.sub_pct, 'added_pts': g.added_pts, 'added_pct': g.added_pct, 'questions': []}
for j, q in enumerate(g.questions):
gd['questions'].append( {'id':j, 'gid': i, 'points': q.points} )
for sd in course['students']:
course['scores'].append( {'sid': sd['id'], 'gid': gd['id'], 'qid': j, \
'value': gb.get_score(sd['obj'],g,q).value } )
course['gradeables'].append(gd)
for s in course['students']: del s['obj']
for c in course['categories']: del c['obj']
return course
# construct the course object hierarchy from a dictionary
def course_from_dict(course_dict):
upgrade(course_dict)
# id-keyed dicts for reconstructing scores
category_dict = {item['id'] : item for item in course_dict['categories']}
gradeable_dict = {item['id'] : item for item in course_dict['gradeables']}
student_dict = {item['id'] : item for item in course_dict['students']}
question_dict = {(q['gid'],q['id']) : q for g in course_dict['gradeables'] \
for q in g['questions'] }
course_obj = Course(course_dict['name'], course_dict['term'], \
course_dict['global_added_pct'], course_dict['letter_plus_minus_pct'], \
course_dict['audible_warnings'])
for cd in course_dict['categories']:
category = Category(course_obj, cd['name'], cd['pct_of_grade'], cd['drop_low_n'], \
cd['est_ct'], cd['combine_pts'], cd['gradeable_pcts'])
category_dict[cd['id']]['obj'] = category
course_obj.categories.append(category)
for sd in course_dict['students']:
student = Student(course_obj, sd['first'], sd['last'], sd['email'], \
sd['is_active'], sd['notes'])
student_dict[sd['id']]['obj'] = student
course_obj.students.append(student)
for gd in course_dict['gradeables']:
gradeable = Gradeable(course_obj, gd['name'], category_dict[gd['cid']]['obj'], \
gd['total_pts'], gd['sub_pct'], gd['added_pts'], gd['added_pct'])
gradeable_dict[gd['id']]['obj'] = gradeable
course_obj.gradeables.append(gradeable)
for qd in gd['questions']:
question = Question(gradeable, qd['points'])
question_dict[(qd['gid'],qd['id'])]['obj'] = question
gradeable.questions.append(question)
for sd in course_dict['scores']:
student = student_dict[sd['sid']]['obj']
gradeable = gradeable_dict[sd['gid']]['obj']
question = question_dict[(sd['gid'],sd['qid'])]['obj']
score = Score(student, gradeable, question, sd['value'])
course_obj.scores[(student, gradeable, question)] = score
return course_obj
def upgrade(course_dict):
global schema_version
cv = course_dict['schema_version'] if 'schema_version' in course_dict else 0
while cv < schema_version:
cv += 1
globals()["migration"+str(cv)](course_dict)
course_dict['schema_version'] = cv
print("upgraded schema to version ", cv)
ui.pause()
def migration1(course_dict):
course_dict['schema_version'] = 1
def migration2(course_dict):
for s in course_dict['students']: s['notes']=''
def migration3(course_dict):
for c in course_dict['categories']: c['est_ct']=0
def migration4(course_dict):
course_dict['global_added_pct'] = 0.0
course_dict['letter_plus_minus_pct'] = 1.0
course_dict['schema_version'] = 4
def migration5(course_dict):
for c in course_dict['categories']: c['combine_pts'] = 0
def migration6(course_dict):
for c in course_dict['categories']: c['gradeable_pcts'] = []
def migration7(course_dict):
course_dict['audible_warnings'] = 1
|
import sys
from config import DEBUG_MODE, EASYGIF_VERSION
if "-v" in sys.argv or "--version" in sys.argv:
print(f"EasyGif {EASYGIF_VERSION}")
quit()
if "-h" in sys.argv or "--help" in sys.argv:
print()
print(" EASYGIF SERVER HELP CENTER")
print()
print(f"EasyGif {EASYGIF_VERSION}")
print(f"DEBUG_MODE: {DEBUG_MODE}")
print("""
The main server for EasyGif.
Args:
--clear-log Clears the 'easygif.log' file
-h, --help Shows the EasyGif Server Help Center and exits
-d, --debug Launches EasyGif Server in DEBUG_MODE (note: --debug enables a higher debug level)
-v, --version Shows the Server version and exits
""")
quit()
from __protected import DISCORD_BOT_TOKEN
from easygif import client
from utils.log import log
log("Running the Discord Bot")
client.run(DISCORD_BOT_TOKEN)
|
#!/usr/bin/python
# frontend to meowcc
# the Scratch linker <3
# llvm-link a.ll b.ll -f | llvm-dis
import sys
import subprocess
import argparse
# parse arguments in pretty way
parser = argparse.ArgumentParser(description='Frontend to meowcc')
parser.add_argument("--output", "-o", help="output file")
parser.add_argument("--username", help="Scratch username")
parser.add_argument("--project", "-pid", help="Project ID to upload to")
parser.add_argument("--compile", "-c", help="Skip linking step", action='store_true')
parser.add_argument("files", metavar='F', nargs="+")
args = parser.parse_args()
projectName = args.output or args.project
# first, compile any C program into LLVM
llvmFiles = []
for file in args.files:
extension = file.split(".")[-1]
# if it is an llvm file, we don't need to do anything
# but if it is a c file, we need to compile it
if extension == 'll':
llvmFiles.append(file)
elif extension == 'c':
# compile this into ll, then add it to the list
# if this is a compile only build, we need to output .o instead
if args.compile:
subprocess.call(["clang", "-Oz", "-S", "-emit-llvm", file, "-o", ".".join(file.split(".")[0:-1]) + ".o"])
else:
subprocess.call(["clang", "-Oz", "-S", "-emit-llvm", file])
# this will now be the same file with a .ll extension
# do some string twiddling and be on our way <3
llvmFiles.append(".".join(file.split(".")[0:-1]) + ".ll")
elif extension == 'o':
# they're not actually object files,
# just ll files in disguise :)
llvmFiles.append(file)
else:
print "Unknown file extension for file " + file
if args.compile: # just generate .o, nothing else
exit()
# unfortunately, scratch-llvm only works with a single llvm file
# link them here <3
# TODO: mitigate the shell=True security risk...
subprocess.call("llvm-link "+ (" ".join(llvmFiles)) + " -f | llvm-dis > " + projectName + ".lll", shell=True)
inputFile = projectName + ".lll"
# finally, make the actual call
meowcc = sys.argv[0][0:-len("scratchcc.py")] + "meowcc.js"
if args.output:
subprocess.call("node " + meowcc + " "+inputFile+" > "+args.output, shell=True)
elif args.project:
subprocess.call("node " + meowcc + " "+inputFile+" "+args.username+" "+args.project, shell=True)
else:
subprocess.call("node " + meow + " " +inputFile, shell=True)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
class Setting(models.Model):
"""
Stores values for ``mezzanine.conf`` that can be edited via the admin.
"""
name = models.CharField(max_length=50)
value = models.CharField(max_length=2000)
site = models.ForeignKey(Site, editable=False)
class Meta:
verbose_name = _("Setting")
verbose_name_plural = _("Settings")
def save(self, *args, **kwargs):
"""
Set the site to the current site.
"""
self.site = Site.objects.get_current()
super(Setting, self).save(*args, **kwargs)
|
def link(cmd, serv, nick, dest, msg):
q = msg.replace(" ","+")
serv.send("PRIVMSG %s :%s: http://lmgtfy.com/?q=%s\n" % (dest, nick, q))
serv.on_command.connect(link, "link")
|
from flask import request, jsonify, make_response, current_app
from flaskapp.database.models import User
from flaskapp.authorization import jwt_redis_blocklist, TOKEN_EXPIRES
from flaskapp.responses import *
from flask_restful import Resource
from flask_jwt_extended import create_access_token, get_jwt, current_user, jwt_required
from mongoengine import ValidationError, NotUniqueError, DoesNotExist
from datetime import datetime
import re
import logging
def password_policy(password):
"""
Verify the strength of 'password'
Returns a dict indicating the wrong criteria
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
symbol_error = re.search(r"\W", password) is None
# overall result
password_ok = not (length_error or digit_error or uppercase_error or lowercase_error or symbol_error)
if password_ok:
return None
elif length_error:
return jsonify(RESPONSE_PASSWORD_SHORT)
else:
return jsonify(RESPONSE_PASSWORD_WEAK)
# API for registration post method
# Username and password will be get from request body
class RegisterApi(Resource):
def post(self):
body = request.get_json()
if body is None:
return make_response(jsonify(RESPONSE_USERNAME_EMPTY), 400)
name = body.get('username')
if name is None:
return make_response(jsonify(RESPONSE_USERNAME_EMPTY), 400)
password = body.get('password')
if password is None:
return make_response(jsonify(RESPONSE_PASSWORD_EMPTY), 400)
password_errors = password_policy(password)
if password_errors is not None:
return make_response(password_errors, 400)
try:
user = User(name=name, password=password)
user.hash_password()
user.save()
current_app.logger.setLevel(logging.INFO)
current_app.logger.info('New user was registered with the following name: ' + str(name))
except ValidationError as e:
return make_response(jsonify(errorId="120", errorMessage=e.to_dict()), 400)
except NotUniqueError as e:
return make_response(jsonify(RESPONSE_USERNAME_TAKEN), 400)
return make_response(jsonify(id=str(user.id)), 200)
# API for log in post method
# With existing password and username access token will be generated
class LoginApi(Resource):
def post(self):
body = request.get_json()
if body is None:
return make_response(jsonify(RESPONSE_USERNAME_EMPTY), 400)
try:
user = User.objects.get(name=body.get('username'))
current_app.logger.setLevel(logging.INFO)
current_app.logger.info('Login attempt was made with the following username: ' + str(body.get('username')))
except DoesNotExist:
return make_response(jsonify(RESPONSE_INVALID_USERNAME_OR_PASSWORD), 400)
authorized = user.check_password(body.get('password'))
if not authorized:
return make_response(jsonify(RESPONSE_INVALID_USERNAME_OR_PASSWORD), 400)
access_token = create_access_token(identity=user, expires_delta=TOKEN_EXPIRES)
expiration_date_milliseconds = int((datetime.now()+TOKEN_EXPIRES).timestamp()*1000)
return make_response(jsonify(token=access_token, expire=expiration_date_milliseconds), 200)
# API for log out post method
# With correct JWT authentication user will be logged out
class LogoutApi(Resource):
@jwt_required()
def post(self):
jti = get_jwt()["jti"]
jwt_redis_blocklist.set(jti, "", ex=TOKEN_EXPIRES)
return make_response(jsonify(message="successful logout"), 200)
# API for password changing via post method
# Username and new password in body required besides the JWT token
class PasswordChangeApi(Resource):
@jwt_required()
def post(self):
body = request.get_json()
if body is None:
current_app.logger.setLevel(logging.ERROR)
current_app.logger.error('Password was empty')
return make_response(jsonify(RESPONSE_PASSWORD_EMPTY), 400)
new_password = body.get('password')
if new_password is None:
current_app.logger.setLevel(logging.ERROR)
current_app.logger.error('Password was empty')
return make_response(jsonify(RESPONSE_PASSWORD_EMPTY), 400)
password_errors = password_policy(new_password)
if password_errors is not None:
return make_response(password_errors, 400)
if current_user.isAdmin:
try:
if 'username' in body:
target_user = User.objects.get(name=body.get('username'))
target_user.change_password(new_password)
else:
current_user.change_password(new_password)
current_app.logger.setLevel(logging.INFO)
current_app.logger.info('Password changed successfully for the following user: ' + str(body.get('username')))
except ValidationError as e:
current_app.logger.setLevel(logging.ERROR)
current_app.logger.error('Password was not changed')
return make_response(jsonify(errorId="120", errorMessage=e.to_dict()), 400)
return make_response(jsonify(message="password change successful"), 200)
else:
if 'username' in body:
return make_response(jsonify(RESPONSE_FORBIDDEN), 403)
else:
try:
current_user.change_password(new_password)
current_app.logger.setLevel(logging.INFO)
current_app.logger.info('Password changed successfully for the following user: ' + str(body.get('username')))
except ValidationError as e:
return make_response(jsonify(errorId="120", errorMessage=e.to_dict()), 400)
return make_response(jsonify(message="password change successful"), 200)
|
from simple_wsgi import API
app = API()
# Simple routing like in Flask
@app.route("/")
def index_page(request, response):
response.text = "Hello from the INDEX page."
# Simple routing like in Django
def home_page(request, response):
response.text = "Hello from the HOME page."
app.add_route("/home", home_page)
app.add_route("/home/", home_page)
# Class Based Handlers with your methods (there only get)
@app.route("/test")
@app.route("/test/")
class TestResource:
def get(self, request, response):
response.text = "Test Page"
# Parameterized routing
@app.route("/hello/{name}")
def greeting(request, response, name):
response.text = f"Hello, {name}."
# Parameterized routing
@app.route("/age/{age:d}")
def greeting(request, response, age):
response.text = f"Your age are {age}."
# Support for templates with jinja templating language
@app.route("/template")
@app.route("/template/")
def template_handler(request, response):
response.body = app.template(
"index.html",
context={"title": "WSGI Framework", "body": "Testing template with jinja."}
).encode()
# Custom exception handler
def custom_exception_handler(request, response, exception_cls):
response.text = "Oops! Something went wrong."
app.add_exception_handler(custom_exception_handler)
@app.route("/except")
@app.route("/except/")
def exception_throwing_handler(request, response):
raise AssertionError("Testing exeption.")
|
"""
Project OCELoT: Open, Competitive Evaluation Leaderboard of Translations
"""
from difflib import SequenceMatcher
from django.contrib import messages
from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import HttpResponseRedirect
from django.shortcuts import render
from leaderboard.models import Submission
from leaderboard.views import _get_team_data
SEGMENTS_PER_PAGE = 100
def _annotate_texts_with_span_diffs(text1, text2, char_based=False):
"""
Returns pair of texts annotated with HTML tags highlighting word-level differences.
Both texts must be non empty.
For example,
'a b c d e' and 'a B c e f'
will become:
'a <span class="diff diff-sub">b</span> c <span class="diff diff-del">d</span> e',
'a <span class="diff diff-sub">B</span> c e <span class="diff diff-ins">f</span>'
"""
if not text1 or not text2 or (text1 == text2):
return (text1, text2)
toks1 = list(text1) if char_based else text1.split()
toks2 = list(text2) if char_based else text2.split()
matcher = SequenceMatcher(None, toks1, toks2)
sep = '' if char_based else ' '
text1 = ''
text2 = ''
# pylint: disable=invalid-name
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == 'equal':
text1 += sep + sep.join(toks1[i1:i2])
text2 += sep + sep.join(toks2[j1:j2])
elif tag == 'replace':
text1 += (
sep
+ '<span class="diff diff-sub">'
+ sep.join(toks1[i1:i2])
+ '</span>'
)
text2 += (
sep
+ '<span class="diff diff-sub">'
+ sep.join(toks2[j1:j2])
+ '</span>'
)
elif tag == 'insert':
text2 += (
sep
+ '<span class="diff diff-ins">'
+ sep.join(toks2[j1:j2])
+ '</span>'
)
elif tag == 'delete':
text1 += (
sep
+ '<span class="diff diff-del">'
+ sep.join(toks1[i1:i2])
+ '</span>'
)
return (text1.strip(), text2.strip())
def submission(request, sub_id=None):
"""Shows submission output."""
try:
sub = Submission.objects.get(id=sub_id)
except Submission.DoesNotExist:
raise Http404('Submission #{0} does not exist'.format(sub_id))
(
ocelot_team_name,
ocelot_team_email,
ocelot_team_token,
) = _get_team_data(request)
# Submission must be public unless it's yours
if sub.is_anonymous() and not sub.is_yours(ocelot_team_token):
_msg = 'Submission #{0} is not public.'.format(sub_id)
messages.warning(request, _msg)
return HttpResponseRedirect('/')
# A list of submissions which the current submission can be compared with
_subs = Submission.objects.filter(
test_set=sub.test_set,
score__gte=0, # Ignore invalid submissions
).exclude(id=sub_id)
compare_with = [
(sub.id, str(sub))
for sub in _subs
# Exclude anonymous submissions that are not yours
if not sub.is_anonymous() or sub.is_yours(ocelot_team_token)
]
# Paginate
data = list(zip(sub.get_src_text(), sub.get_hyp_text()))
paginator = Paginator(data, SEGMENTS_PER_PAGE)
page_num = request.GET.get('page', 1)
page_data = paginator.get_page(page_num)
context = {
'page': page_data,
'page_size': SEGMENTS_PER_PAGE,
'submission_id': sub.id,
'submission': str(sub),
'compare_with': compare_with,
'ocelot_team_name': ocelot_team_name,
'ocelot_team_email': ocelot_team_email,
'ocelot_team_token': ocelot_team_token,
}
return render(request, 'comparison/submission.html', context=context)
def compare_submissions(request, sub_a_id=None, sub_b_id=None):
"""Renders vertical or horizontal comparison between two submissions."""
try:
sub_a = Submission.objects.get(id=sub_a_id)
sub_b = Submission.objects.get(id=sub_b_id)
except Submission.DoesNotExist:
raise Http404(
'Submission #{0} or #{1} does not exist'.format(
sub_a_id, sub_b_id
)
)
# Submissions from different test sets cannot be compared
if sub_a.test_set != sub_b.test_set:
_msg = (
'Submissions #{0} and #{1} cannot be compared,'.format(
sub_a_id, sub_b_id
)
+ ' because they do not belong to the same test set.'
)
messages.warning(request, _msg)
return HttpResponseRedirect('/')
(
ocelot_team_name,
ocelot_team_email,
ocelot_team_token,
) = _get_team_data(request)
# Submissions that are not public cannot be compared
if (
sub_a.is_anonymous() and not sub_a.is_yours(ocelot_team_token)
) or (sub_b.is_anonymous() and not sub_b.is_yours(ocelot_team_token)):
_msg = (
'Submissions #{0} and #{1} cannot be compared.'.format(
sub_a_id, sub_b_id
)
+ ' Both submission outputs must be public.'
)
messages.warning(request, _msg)
return HttpResponseRedirect('/')
text1 = sub_a.get_hyp_text()
text2 = sub_b.get_hyp_text()
data = []
# TODO: Annotate with span diffs only the current page
for sent1, sent2 in zip(text1, text2):
data.append(_annotate_texts_with_span_diffs(sent1, sent2))
# Paginate
paginator = Paginator(data, SEGMENTS_PER_PAGE)
page_num = request.GET.get('page', 1)
page_data = paginator.get_page(page_num)
context = {
'page': page_data,
'page_size': SEGMENTS_PER_PAGE,
'submission_a': str(sub_a),
'submission_b': str(sub_b),
'ocelot_team_name': ocelot_team_name,
'ocelot_team_email': ocelot_team_email,
'ocelot_team_token': ocelot_team_token,
# 'comparison_options': [(0, '...'), (1, 'A>B'), (2, 'A<B'), (3, 'A=B')],
}
template = 'comparison/compare_submissions.html'
return render(request, template, context=context)
|
import xlrd
import numpy
from scipy import stats
from sklearn import datasets
from sklearn import linear_model
argc = 2
url = ['D:\\Code\\城房指数新编制\\1 指数编制2018-2020.7excel表\\2018-2020.7excel表\\33 成都标准数据2018.1.xls',
'D:\\Code\\城房指数新编制\\1 指数编制2018-2020.7excel表\\2018-2020.7excel表\\33 成都标准数据2018.2.xls']
def getSB(data): # 函数输入为excel文件的转置(二维数组,不含表头),输出为标准住房(字典格式)
SB = {
"pro_id": stats.mode(data[1])[0][0],
"unit_onsale": stats.mode(data[2])[0][0],
"unit_duration": stats.mode(data[5])[0][0],
"pro_area": numpy.mean(data[6]),
"pro_floor": stats.mode(data[7])[0][0],
"unit_floor": stats.mode(data[8])[0][0],
"unit_area": numpy.mean(data[9]),
"unit_price": numpy.mean(data[10]),
"pro_dis": stats.mode(data[11])[0][0],
"pro_block": stats.mode(data[12])[0][0],
"block_ehn": stats.mode(data[13])[0][0],
"block_edf": stats.mode(data[14])[0][0],
"block_edn": stats.mode(data[15])[0][0],
"block_enf": stats.mode(data[16])[0][0],
"block_exn": stats.mode(data[17])[0][0],
"block_exf": stats.mode(data[18])[0][0],
"block_exb": stats.mode(data[19])[0][0],
"block_ebf": stats.mode(data[20])[0][0],
"block_edb": stats.mode(data[21])[0][0],
"block_sdf": stats.mode(data[22])[0][0],
"block_sdn": stats.mode(data[23])[0][0],
"block_snf": stats.mode(data[24])[0][0],
"block_sxn": stats.mode(data[25])[0][0],
"block_sxf": stats.mode(data[26])[0][0],
"block_sxb": stats.mode(data[27])[0][0],
"block_sbf": stats.mode(data[28])[0][0],
"block_sdb": stats.mode(data[29])[0][0],
"block_rnf": stats.mode(data[30])[0][0],
"block_rxf": stats.mode(data[31])[0][0],
"subm_floor": stats.mode(data[32])[0][0],
"subm_area": stats.mode(data[33])[0][0],
"subm_putong": stats.mode(data[34])[0][0]}
return SB
def get_ratio(data, data_lastmonth, data_lastyear):
# 函数输入为三个二维数组,为excel的转置,不含表头,内容分别是:当期数据,上期数据,去年同期数据;输出为一个字典 year on year 为同比 chain为环比
price = numpy.mean(data[10])
return {"year_on_year": price / numpy.mean(data_lastyear[10]), "chain": price / numpy.mean(data_lastmonth[10])}
def linearRegression(data): # 函数输入为excel文件的转置(二维数组,不含表头),输出为回归方程结果(字典格式)
reg = linear_model.LinearRegression() # 声明模型
num = len(data[0]) # 获得数据个数
switch = num / 10 # 是否设置哑变量的阈值
# 以下代码为添加哑变量
table = []
table_len = 0
pro_id_list = []
pro_id_number = []
for i in range(0, num):
newid = True
for j in range(0, table_len):
if data[1][i] == table[j]:
newid = False
pro_id_list[j].append(i)
pro_id_number[j] += 1
break
if newid:
table_len += 1
table.append(data[1][i])
pro_id_number.append(1)
pro_id_list.append([i])
no = 0
dummy = []
name_dummy = []
for i in range(0, table_len):
if pro_id_number[i] > switch:
name_dummy.append(table[i])
dummy.append([0 for k in range(0, num)])
for j in range(0, pro_id_number[i]):
dummy[no][pro_id_list[i][j]] = 1
no += 1
# 向模型中装入数据
dataset = []
for i in range(0, num):
sample = [data[2][i]]
sample += [data[j][i] for j in range(5, 10)]
sample += [data[j][i] for j in range(11, 35)]
for j in range(0, no):
sample.append(dummy[j][i])
dataset.append(sample)
# 开始拟合
reg.fit(X=dataset, y=data[10])
# 返回结果
result = {
"intercept": reg.intercept_,
"unit_onsale": reg.coef_[0],
"unit_duration": reg.coef_[1],
"pro_area": reg.coef_[2],
"pro_floor": reg.coef_[3],
"unit_floor": reg.coef_[4],
"unit_area": reg.coef_[5],
"pro_dis": reg.coef_[6],
"pro_block": reg.coef_[7],
"block_ehn": reg.coef_[8],
"block_edf": reg.coef_[9],
"block_edn": reg.coef_[10],
"block_enf": reg.coef_[11],
"block_exn": reg.coef_[12],
"block_exf": reg.coef_[13],
"block_exb": reg.coef_[14],
"block_ebf": reg.coef_[15],
"block_edb": reg.coef_[16],
"block_sdf": reg.coef_[17],
"block_sdn": reg.coef_[18],
"block_snf": reg.coef_[19],
"block_sxn": reg.coef_[20],
"block_sxf": reg.coef_[21],
"block_sxb": reg.coef_[22],
"block_sbf": reg.coef_[23],
"block_sdb": reg.coef_[24],
"block_rnf": reg.coef_[25],
"block_rxf": reg.coef_[26],
"subm_floor": reg.coef_[27],
"subm_area": reg.coef_[28],
"subm_putong": reg.coef_[29]}
for i in range(0, no):
result["dummy_pro_id" + str(name_dummy[i])] = reg.coef_[30 + i]
return result
def _DataRead(argc, urls):
text = xlrd.open_workbook(urls[0])
worksheet = text.sheet_by_index(0)
ncols = worksheet.ncols
data = [[] for i in range(0, 35)]
for i in range(0, argc):
text = xlrd.open_workbook(urls[i])
worksheet = text.sheet_by_index(0)
for j in range(0, ncols):
data[j] += worksheet.col_values(j)[1:]
return data
|
from pathlib import Path
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='scbasset',
version='0.1',
description='model scATAC with sequence-based CNN.',
long_description=readme,
author='Han Yuan, David Kelley',
author_email='yuanh@calicolabs.com, drk@calicolabs.com',
url='https://github.com/calico/scbasset',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
l.strip() for l in
Path('requirements.txt').read_text('utf-8').splitlines()
]
) |
import os
import argparse
from glob import glob
from functools import partial
from dataclasses import dataclass
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import wandb
import jax
import jax.numpy as jnp
import flax
from flax import linen as nn
import torch
import torchvision.transforms as transforms
import tensorflow as tf
import tensorflow_datasets as tfds
class GANDataset(torch.utils.data.Dataset):
def __init__(self, args):
self.args = args
self.paths_a = glob('./data/trainA/*.jpg')
self.paths_b = glob('./data/trainB/*.jpg')
self.imgs_a = np.zeros(
(len(self.paths_a), 32, 32, 3), dtype=np.float32)
for i, path in tqdm(enumerate(self.paths_a)):
img = np.asarray(Image.open(path)) / 255.0
img = cv2.resize(img, dsize=(
32, 32), interpolation=cv2.INTER_CUBIC).reshape(1, 32, 32, 3)
self.imgs_a[i] = img
self.imgs_b = np.zeros(
(len(self.paths_b), 32, 32, 3), dtype=np.float32)
for i, path in tqdm(enumerate(self.paths_b)):
img = np.asarray(Image.open(path)) / 255.0
img = cv2.resize(img, dsize=(
32, 32), interpolation=cv2.INTER_CUBIC).reshape(1, 32, 32, 3)
self.imgs_b[i] = img
self.transforms = transforms.Compose([
# transforms.ToTensor(),
# transforms.ToPILImage(),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(hue=0.15),
# transforms.RandomGrayscale(p=0.25),
# transforms.RandomRotation(35),
# transforms.RandomPerspective(distortion_scale=0.35),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def __len__(self):
return len(self.paths_a)
def __getitem__(self, index):
img_a = self.imgs_a[index]
img_b = self.imgs_b[index]
img_a = self.transforms(img_a).numpy().transpose(1, 2, 0)
img_b = self.transforms(img_b).numpy().transpose(1, 2, 0)
# img_a = img_a.transpose(1, 2, 0)
# img_b = img_b.transpose(1, 2, 0)
return img_a, img_b
def shard(xs):
return jax.tree_map(
lambda x: x.reshape((jax.device_count(), -1) + x.shape[1:]), xs)
class Generator(nn.Module):
training: bool
@nn.compact
def __call__(self, z):
x = nn.ConvTranspose(features=64*8, kernel_size=(4, 4),
strides=(1, 1), padding='VALID', use_bias=False)(z)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=3, kernel_size=(
4, 4), strides=(1, 1), padding='SAME', use_bias=False)(x)
return jnp.tanh(x)
class Discriminator(nn.Module):
training: bool
@nn.compact
def __call__(self, x):
x = nn.Conv(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*8, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=1, kernel_size=(
1, 1), strides=(4, 4), padding='VALID', use_bias=False)(x)
x = jnp.reshape(x, [x.shape[0], -1])
return x
@jax.vmap
def bce_logits_loss(logit, label):
return jnp.maximum(logit, 0) - logit * label + jnp.log(1 + jnp.exp(-jnp.abs(logit)))
def loss_g(params_g, params_d, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
return jnp.mean(bce_logits_loss(fake_logits, real_labels)), (variables_g, variables_d)
def loss_d(params_d, params_g, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
real_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, batch, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
real_loss = bce_logits_loss(real_logits, real_labels)
fake_labels = jnp.zeros((batch.shape[0],), dtype=jnp.int32)
fake_loss = bce_logits_loss(fake_logits, fake_labels)
return jnp.mean(real_loss + fake_loss), (variables_g, variables_d)
@partial(jax.pmap, axis_name='batch')
def train_step(rng, variables_g, variables_d, optimizer_g, optimizer_d, batch):
rng, rng_g, rng_d = jax.random.split(rng, 3)
(g_loss, (variables_g, variables_d)), grad_g = jax.value_and_grad(loss_g, has_aux=True)(
optimizer_g.target, optimizer_d.target, batch, rng_g, variables_g, variables_d)
g_loss = jax.lax.pmean(g_loss, axis_name='batch')
grad_g = jax.lax.pmean(grad_g, axis_name='batch')
optimizer_g = optimizer_g.apply_gradient(grad_g)
(d_loss, (variables_g, variables_d)), grad_d = jax.value_and_grad(loss_d, has_aux=True)(
optimizer_d.target, optimizer_g.target, batch, rng_d, variables_g, variables_d)
d_loss = jax.lax.pmean(d_loss, axis_name='batch')
grad_d = jax.lax.pmean(grad_d, axis_name='batch')
optimizer_d = optimizer_d.apply_gradient(grad_d)
return rng, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss
def main(args):
wandb.init(project='flax-dcgan-selfie')
dataset = GANDataset({})
train_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
rng = jax.random.PRNGKey(42)
rng, rng_g, rng_d = jax.random.split(rng, 3)
init_batch_g = jnp.ones((1, 1, 1, 100), jnp.float32)
variables_g = Generator(training=True).init(rng_g, init_batch_g)
init_batch_d = jnp.ones((1, 32, 32, 3), jnp.float32)
variables_d = Discriminator(training=True).init(rng_d, init_batch_d)
optimizer_g = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_g["params"])
optimizer_g = flax.jax_utils.replicate(optimizer_g)
optimizer_d = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_d["params"])
optimizer_d = flax.jax_utils.replicate(optimizer_d)
variables_g = flax.jax_utils.replicate(variables_g)
variables_d = flax.jax_utils.replicate(variables_d)
rngs = jax.random.split(rng, num=jax.local_device_count())
global_step = 0
for epoch in range(100):
for i, (img_a, img_b) in tqdm(enumerate(train_dataloader)):
img_a = shard(img_a.numpy())
img_b = shard(img_b.numpy())
rngs, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss = train_step(
rngs, variables_g, variables_d, optimizer_g, optimizer_d, img_a)
if global_step % 10 == 0:
to_log = {'g_loss': float(jnp.mean(g_loss)),
'd_loss': float(jnp.mean(d_loss))}
if global_step % 100 == 0:
rng, rng_sample = jax.random.split(rng)
z = jax.random.normal(rng_sample, shape=(1, 1, 1, 100))
temp_params_g = flax.jax_utils.unreplicate(
optimizer_g.target)
temp_variables_g = flax.jax_utils.unreplicate(variables_g)
samples = Generator(training=False).apply(
{'params': temp_params_g, 'batch_stats': temp_variables_g['batch_stats']}, z, mutable=False)
img = jnp.reshape((samples + 1) / 2, [32, 32, 3])
to_log['img'] = wandb.Image(np.array(img))
wandb.log(to_log)
global_step += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--debug', default=False, action="store_true")
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--num_workers', default=0, type=int)
args = parser.parse_args()
if args.debug:
import ptvsd
ptvsd.enable_attach(address=('localhost', 5678),
redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
main(args)
# @dataclass
# class Data:
# batch_size: int
# num_workers: int
# main(Data(256, 4))
|
from .models import Followers
def add_first_follower(username, follower):
return Followers.add_one(username, follower)
def update_followers(username, follower):
return Followers.update_one(username, follower)
|
"""Provides the Predictor related classes."""
from .base import Base
import json
class Predictors(Base):
_base_path_ = 'predictors'
_auto_methods_ = ('get', 'post')
def create_collection(self, collection_name, dataset_id,
runs, event_files, descriptions=None):
""" Create new predictor collection
:param str collection_name: Force upload with unique timestamped name.
:param int dataset_id: Dataset id.
:param list(list((int)) runs: List of run ids corresponding to files
:param list(str) event_files: TSV files with new predictor columns.
Required columns: onset, duration,
any number of columns with values for new Predictors.
:param list(dict) descriptions: optional list of descriptions
for each columns
:return: JSON response
"""
files = tuple([('event_files', open(f, 'rb')) for f in event_files])
runs = [",".join([str(r) for r in s]) for s in runs]
descriptions = json.dumps(descriptions)
return self.post('collection', dataset_id=dataset_id, files=files,
runs=runs, collection_name=collection_name,
descriptions=descriptions)
def get_collection(self, collection_id):
return self.get(f'collection/{collection_id}')
class PredictorEvents(Base):
_base_path_ = 'predictor-events'
_auto_methods_ = ('get', )
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('arbeitsplan', '0008_auto_20141208_1906'),
]
operations = [
migrations.AddField(
model_name='mitglied',
name='arbeitslast',
field=models.IntegerField(default=10, help_text=b'Wieviele Stunden pro Jahr muss dieses Mitglied arbeiten?', verbose_name=b'Arbeitslast (h/Jahr)'),
preserve_default=True,
),
]
|
from .proscript import Word, Proscript, Segment |
from .e7_activity import E7Activity
from dataclasses import dataclass
@dataclass
class E11Modification(E7Activity):
"""
Scope note:
This class comprises instances of E7 Activity that create, alter or change instances of E24 Physical Human-Made Thing.
This class includes the production of an item from raw materials, and other so far undocumented objects, and the preventive treatment or restoration of an object for conservation.
Since the distinction between modification and production is not always clear, modification is regarded as the more generally applicable concept. This implies that some items may be consumed or destroyed in an instance of E11 Modification, and that others may be produced as a result of it. An event should also be documented using an instance of E81 Transformation if it results in the destruction of one or more objects and the simultaneous production of others using parts or material from the originals. In this case, the new items have separate identities.
If the instance of E29 Design or Procedure utilized for the modification prescribes the use of specific materials, they should be documented using property P68 foresees use of (use foreseen by): E57 Material of E29 Design or Procedure, rather than via P126 employed (was employed in): E57 Material.
Examples:
- the construction of the SS Great Britain (E12)(Gregor, 1971)
- the impregnation of the Vasa warship in Stockholm for preservation after 1956(Håfors, 2010)
- the transformation of the Enola Gay into a museum exhibit by the National Air and Space Museum in Washington DC between 1993 and 1995 (E12, E81) (Yakel, 2000)
- the last renewal of the gold coating of the Toshogu shrine in Nikko, Japan(Cali and Dougil, 2012)
In First Order Logic:
E11(x) ⊃ E7(x)
"""
TYPE_URI = "http://erlangen-crm.org/current/E11_Modification"
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import pytest
from source_linkedin_ads.utils import get_parent_stream_values
@pytest.mark.parametrize(
"record, key_value_map, output_slice",
[
({"id": 123, "ref": "abc"}, {"acc_id": "id"}, {"acc_id": 123}),
({"id": 123, "ref": "abc"}, {"acc_id": "id", "ref_id": "ref"}, {"acc_id": 123, "ref_id": "abc"}),
],
)
def test_get_parent_stream_values(record, key_value_map, output_slice):
assert get_parent_stream_values(record, key_value_map) == output_slice
|
i = 0
while True:
print(i)
i = i + 1
|
import os
import numpy
from abaqusGui import *
import testDB
import gui_defaults
reload(gui_defaults)
from desicos.logger import error
from desicos.abaqus.constants import TMP_DIR
# Note: The above form of the import statement is used for the prototype
# application to allow the module to be reloaded while the application is
# still running. In a non-prototype application you would use the form:
# from myDB import MyDB
#TODO ploads or cutouts lists to conecylDB in order to read the default values
# for
#TODO implement second dialog box that will confirm OK or BACK to the main DB
#TODO implement progress bar along the creation of all models
#TODO find a way to save the study object and all the cc objects to
# allow a following post-processing step
###########################################################################
# Class definition
###########################################################################
params = ['rbot', 'H', 'alphadeg','betadeg','omegadeg',
'betadegs','omegadegs','la','plyt',
'numel_r', 'elem_type',
'separate_load_steps', 'displ_controlled',
'axial_displ', 'axial_load', 'axial_step', 'pload_step',
'pressure_load', 'pressure_step',
#'Nxxtop', 'Nxxtop_vec',
'artificial_damping1', 'artificial_damping2',
'damping_factor1', 'minInc1', 'initialInc1', 'maxInc1', 'maxNumInc1',
'damping_factor2', 'minInc2', 'initialInc2', 'maxInc2', 'maxNumInc2',
'bc_fix_bottom_uR', 'bc_fix_bottom_v', 'bc_bottom_clamped',
'bc_fix_bottom_side_uR', 'bc_fix_bottom_side_v', 'bc_fix_bottom_side_u3',
'bc_fix_top_uR', 'bc_fix_top_v', 'bc_top_clamped',
'bc_fix_top_side_uR', 'bc_fix_top_side_v', 'bc_fix_top_side_u3',
'resin_add_BIR', 'resin_add_BOR',
'resin_add_TIR', 'resin_add_TOR',
'resin_E', 'resin_nu', 'resin_numel',
'resin_bot_h', 'resin_bir_w1', 'resin_bir_w2', 'resin_bor_w1', 'resin_bor_w2',
'resin_top_h', 'resin_tir_w1', 'resin_tir_w2', 'resin_tor_w1', 'resin_tor_w2',
'use_DLR_bc',
'use_job_stopper',
'laminate','stack',
'allowables', 'timeInterval', 'stress_output',
'pl_num', 'cb_num', 'd_num', 'ax_num', 'lbmi_num', 'cut_num',
'pl_table', 'cb_table', 'd_table', 'ax_table', 'lbmi_table', 'cut_table',
'ppi_enabled', 'ppi_extra_height', 'ppi_table',
'ffi_nominal_vf', 'ffi_E_matrix', 'ffi_nu_matrix', 'ffi_scalings',
'std_name',
'allowablesKey','laminapropKey','ccKey',
'last_loaded',
'post_put_in_Excel', 'post_open_Excel', 'post_outpath',
'ncpus', 'imp_ms', 'imp_ms_theta_z_format',
'imp_t_theta_z_format', 'imp_thick',
'imp_ms_stretch_H', 'imp_t_stretch_H',
'imp_ms_scalings', 'imp_t_scalings',
'imp_r_TOL', 'imp_ms_ncp', 'imp_t_ncp',
'imp_ms_power_parameter', 'imp_t_power_parameter',
'imp_ms_rotatedeg', 'imp_t_rotatedeg',
'imp_num_sets']
class TestForm(AFXForm):
"""
"""
def __init__(self, owner):
# Construct the base class.
#
self.owner = owner
AFXForm.__init__(self, owner)
# Command
#
TRUE_FALSE = 1
self.ID_DEL_OUT_FOLDER = 999
FXMAPFUNC(self, SEL_COMMAND, self.ID_DEL_OUT_FOLDER,
self.onCmdDelOutFolder)
self.cmd = AFXGuiCommand(self, 'create_study', 'gui_commands')
self.apply_imp_ms = AFXGuiCommand(self, 'apply_imp_ms', 'gui_commands')
self.apply_imp_t = AFXGuiCommand(self, 'apply_imp_t', 'gui_commands')
#
self.dummy = AFXGuiCommand(self, 'dummy', 'gui_commands')
#
#
self.std_nameKw = AFXStringKeyword(self.cmd, 'std_name', TRUE)
self.rbotKw = AFXFloatKeyword(self.cmd, 'rbot', TRUE)
self.HKw = AFXFloatKeyword(self.cmd, 'H', TRUE)
self.alphadegKw = AFXFloatKeyword(self.cmd, 'alphadeg', TRUE)
self.laKw = AFXIntKeyword( self.cmd, 'la', TRUE)
self.betadegKw = AFXFloatKeyword(self.cmd, 'betadeg', TRUE)
self.omegadegKw = AFXFloatKeyword(self.cmd, 'omegadeg', TRUE)
self.laminateKw = AFXTableKeyword(self.cmd, 'laminate', TRUE, 0, -1, AFXTABLE_TYPE_STRING)
self.stackKw = AFXTableKeyword(self.dummy, 'stack', FALSE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.betadegsKw = AFXTableKeyword(self.cmd, 'betadegs', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.omegadegsKw = AFXTableKeyword(self.cmd, 'omegadegs', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.plytKw = AFXFloatKeyword(self.dummy, 'plyt', FALSE)
self.elem_typeKw = AFXStringKeyword(self.cmd, 'elem_type', TRUE)
self.numel_rKw = AFXIntKeyword(self.cmd, 'numel_r', TRUE)
self.separate_load_stepsKw = AFXBoolKeyword(self.cmd, 'separate_load_steps', TRUE_FALSE, TRUE)
self.displ_controlledKw = AFXBoolKeyword(self.cmd, 'displ_controlled', TRUE_FALSE, TRUE)
self.axial_displKw = AFXFloatKeyword(self.cmd, 'axial_displ', TRUE)
self.axial_loadKw = AFXFloatKeyword(self.cmd, 'axial_load', TRUE)
self.axial_stepKw = AFXIntKeyword(self.cmd, 'axial_step', TRUE)
self.pressure_loadKw = AFXFloatKeyword(self.cmd, 'pressure_load', TRUE)
self.pressure_stepKw = AFXIntKeyword(self.cmd, 'pressure_step', TRUE)
self.pload_stepKw = AFXIntKeyword(self.cmd, 'pload_step', TRUE)
self.artificial_damping1Kw = AFXBoolKeyword(self.cmd, 'artificial_damping1', TRUE_FALSE, TRUE)
self.artificial_damping2Kw = AFXBoolKeyword(self.cmd, 'artificial_damping2', TRUE_FALSE, TRUE)
self.damping_factor1Kw = AFXFloatKeyword(self.cmd, 'damping_factor1', TRUE)
self.damping_factor2Kw = AFXFloatKeyword(self.cmd, 'damping_factor2', TRUE)
self.minInc1Kw = AFXFloatKeyword(self.cmd, 'minInc1', TRUE)
self.minInc2Kw = AFXFloatKeyword(self.cmd, 'minInc2', TRUE)
self.initialInc1Kw = AFXFloatKeyword(self.cmd, 'initialInc1', TRUE)
self.initialInc2Kw = AFXFloatKeyword(self.cmd, 'initialInc2', TRUE)
self.maxInc1Kw = AFXFloatKeyword(self.cmd, 'maxInc1', TRUE)
self.maxInc2Kw = AFXFloatKeyword(self.cmd, 'maxInc2', TRUE)
self.maxNumInc1Kw = AFXFloatKeyword(self.cmd, 'maxNumInc1', TRUE)
self.maxNumInc2Kw = AFXFloatKeyword(self.cmd, 'maxNumInc2', TRUE)
self.ncpusKw = AFXIntKeyword(self.cmd, 'ncpus', TRUE)
self.use_job_stopperKw = AFXBoolKeyword( self.dummy, 'use_job_stopper', TRUE_FALSE, TRUE)
self.bc_fix_bottom_uRKw = AFXBoolKeyword(self.cmd, 'bc_fix_bottom_uR', TRUE_FALSE, TRUE)
self.bc_fix_bottom_vKw = AFXBoolKeyword(self.cmd, 'bc_fix_bottom_v', TRUE_FALSE, TRUE)
self.bc_bottom_clampedKw = AFXBoolKeyword(self.cmd, 'bc_bottom_clamped', TRUE_FALSE, TRUE)
self.bc_fix_bottom_side_uRKw = AFXBoolKeyword(self.cmd, 'bc_fix_bottom_side_uR', TRUE_FALSE, TRUE)
self.bc_fix_bottom_side_vKw = AFXBoolKeyword(self.cmd, 'bc_fix_bottom_side_v', TRUE_FALSE, TRUE)
self.bc_fix_bottom_side_u3Kw = AFXBoolKeyword(self.cmd, 'bc_fix_bottom_side_u3', TRUE_FALSE, TRUE)
self.bc_fix_top_uRKw = AFXBoolKeyword(self.cmd, 'bc_fix_top_uR', TRUE_FALSE, TRUE)
self.bc_fix_top_vKw = AFXBoolKeyword(self.cmd, 'bc_fix_top_v', TRUE_FALSE, TRUE)
self.bc_top_clampedKw = AFXBoolKeyword(self.cmd, 'bc_top_clamped', TRUE_FALSE, TRUE)
self.bc_fix_top_side_uRKw = AFXBoolKeyword(self.cmd, 'bc_fix_top_side_uR', TRUE_FALSE, TRUE)
self.bc_fix_top_side_vKw = AFXBoolKeyword(self.cmd, 'bc_fix_top_side_v', TRUE_FALSE, TRUE)
self.bc_fix_top_side_u3Kw = AFXBoolKeyword(self.cmd, 'bc_fix_top_side_u3', TRUE_FALSE, TRUE)
# resin rings
self.resin_EKw = AFXFloatKeyword(self.cmd, 'resin_E', TRUE)
self.resin_nuKw = AFXFloatKeyword(self.cmd, 'resin_nu', TRUE)
self.resin_numelKw = AFXIntKeyword(self.cmd, 'resin_numel', TRUE)
self.resin_add_BIRKw = AFXBoolKeyword(self.cmd, 'resin_add_BIR', TRUE_FALSE, TRUE)
self.resin_add_BORKw = AFXBoolKeyword(self.cmd, 'resin_add_BOR', TRUE_FALSE, TRUE)
self.resin_bot_hKw = AFXFloatKeyword(self.cmd, 'resin_bot_h', TRUE)
self.resin_bir_w1Kw = AFXFloatKeyword(self.cmd, 'resin_bir_w1', TRUE)
self.resin_bir_w2Kw = AFXFloatKeyword(self.cmd, 'resin_bir_w2', TRUE)
self.resin_bor_w1Kw = AFXFloatKeyword(self.cmd, 'resin_bor_w1', TRUE)
self.resin_bor_w2Kw = AFXFloatKeyword(self.cmd, 'resin_bor_w2', TRUE)
self.resin_add_TIRKw = AFXBoolKeyword(self.cmd, 'resin_add_TIR', TRUE_FALSE, TRUE)
self.resin_add_TORKw = AFXBoolKeyword(self.cmd, 'resin_add_TOR', TRUE_FALSE, TRUE)
self.resin_top_hKw = AFXFloatKeyword(self.cmd, 'resin_top_h', TRUE)
self.resin_tir_w1Kw = AFXFloatKeyword(self.cmd, 'resin_tir_w1', TRUE)
self.resin_tir_w2Kw = AFXFloatKeyword(self.cmd, 'resin_tir_w2', TRUE)
self.resin_tor_w1Kw = AFXFloatKeyword(self.cmd, 'resin_tor_w1', TRUE)
self.resin_tor_w2Kw = AFXFloatKeyword(self.cmd, 'resin_tor_w2', TRUE)
self.use_DLR_bcKw = AFXBoolKeyword(self.cmd, 'use_DLR_bc', TRUE_FALSE, TRUE)
self.laminapropKw = AFXTupleKeyword(self.dummy, 'laminaprop',FALSE)
self.allowablesKw = AFXTupleKeyword(self.cmd, 'allowables', TRUE)
self.timeIntervalKw = AFXFloatKeyword(self.cmd, 'timeInterval', TRUE)
self.stress_outputKw = AFXBoolKeyword(self.cmd,'stress_output', TRUE_FALSE, TRUE)
self.pl_numKw = AFXIntKeyword( self.cmd, 'pl_num', TRUE)
self.cb_numKw = AFXIntKeyword( self.cmd, 'cb_num', TRUE)
self.d_numKw = AFXIntKeyword( self.cmd, 'd_num', TRUE)
self.ax_numKw = AFXIntKeyword( self.cmd, 'ax_num', TRUE)
self.lbmi_numKw = AFXIntKeyword( self.cmd, 'lbmi_num', TRUE)
self.cut_numKw = AFXIntKeyword( self.cmd, 'cut_num', TRUE)
self.pl_tableKw = AFXTableKeyword(self.cmd, 'pl_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.cb_tableKw = AFXTableKeyword(self.cmd, 'cb_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.d_tableKw = AFXTableKeyword(self.cmd, 'd_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.ax_tableKw = AFXTableKeyword(self.cmd, 'ax_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.lbmi_tableKw = AFXTableKeyword(self.cmd, 'lbmi_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.cut_tableKw = AFXTableKeyword(self.cmd, 'cut_table', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.ppi_enabledKw = AFXBoolKeyword(self.cmd, 'ppi_enabled', TRUE_FALSE, TRUE)
self.ppi_extra_heightKw = AFXFloatKeyword(self.cmd, 'ppi_extra_height', TRUE)
# Use AFXTABLE_TYPE_STRING to avoid default=0
self.ppi_tableKw = AFXTableKeyword(self.cmd, 'ppi_table', TRUE, 0, -1, AFXTABLE_TYPE_STRING)
self.ffi_nominal_vfKw = AFXFloatKeyword(self.cmd, 'ffi_nominal_vf', TRUE)
self.ffi_E_matrixKw = AFXFloatKeyword(self.cmd, 'ffi_E_matrix', TRUE)
self.ffi_nu_matrixKw = AFXFloatKeyword(self.cmd, 'ffi_nu_matrix', TRUE)
self.ffi_scalingsKw = AFXTableKeyword(self.cmd, 'ffi_scalings', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.plot_imp_modelKw = AFXStringKeyword(self.dummy, 'plot_imp_model', FALSE)
self.plot_ply_indexKw = AFXIntKeyword(self.dummy, 'plot_ply_index', FALSE)
self.plot_imp_typeKw = AFXStringKeyword(self.dummy, 'plot_imp_type', FALSE)
#
#
self.laminapropKeyKw = AFXStringKeyword(self.dummy, 'laminapropKey', FALSE)
self.allowablesKeyKw = AFXStringKeyword(self.dummy, 'allowablesKey', FALSE)
self.new_laminaprop_nameKw = AFXStringKeyword(self.dummy, 'new_laminaprop_name', FALSE)
self.new_allowables_nameKw = AFXStringKeyword(self.dummy, 'new_allowables_name', FALSE)
self.ccKeyKw = AFXStringKeyword(self.dummy, 'ccKey',FALSE)
self.new_cc_nameKw = AFXStringKeyword(self.dummy, 'new_cc_name', FALSE)
self.last_loadedKw = AFXStringKeyword(self.dummy, 'last_loaded', FALSE)
self.std_to_postKw = AFXStringKeyword(self.dummy, 'std_to_post', FALSE)
self.model_to_postKw = AFXStringKeyword(self.dummy, 'model_to_post', FALSE)
self.post_put_in_ExcelKw = AFXBoolKeyword(self.dummy, 'post_put_in_Excel', TRUE_FALSE, FALSE)
self.post_open_ExcelKw = AFXBoolKeyword(self.dummy, 'post_open_Excel', TRUE_FALSE, FALSE)
self.post_outpathKw = AFXStringKeyword(self.dummy, 'post_outpath', FALSE)
#
self.imp_ms_std_nameKw = AFXStringKeyword(self.apply_imp_ms, 'std_name', TRUE)
self.imp_msKw = AFXStringKeyword(self.apply_imp_ms, 'imp_ms', TRUE)
self.imp_ms_stretch_HKw = AFXBoolKeyword(self.apply_imp_ms, 'imp_ms_stretch_H', TRUE_FALSE, TRUE)
self.imp_ms_scalingsKw = AFXTableKeyword(self.apply_imp_ms, 'imp_ms_scalings', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.imp_r_TOLKw = AFXFloatKeyword(self.apply_imp_ms, 'imp_r_TOL', TRUE)
self.imp_ms_ncpKw = AFXIntKeyword(self.apply_imp_ms, 'imp_ms_ncp', TRUE)
self.imp_ms_power_parameterKw = AFXFloatKeyword(self.apply_imp_ms, 'imp_ms_power_parameter', TRUE)
self.imp_ms_theta_z_formatKw = AFXBoolKeyword(self.apply_imp_ms, 'imp_ms_theta_z_format', TRUE_FALSE, TRUE)
self.imp_ms_rotatedegKw = AFXFloatKeyword(self.apply_imp_ms, 'imp_ms_rotatedeg', TRUE)
#
self.imp_t_std_nameKw = AFXStringKeyword(self.apply_imp_t, 'std_name', TRUE)
self.imp_thickKw = AFXStringKeyword(self.apply_imp_t, 'imp_thick', TRUE)
self.imp_num_setsKw = AFXIntKeyword(self.apply_imp_t, 'imp_num_sets', TRUE)
self.imp_t_stretch_HKw = AFXBoolKeyword(self.apply_imp_t, 'imp_t_stretch_H', TRUE_FALSE, TRUE)
self.imp_t_scalingsKw = AFXTableKeyword(self.apply_imp_t, 'imp_t_scalings', TRUE, 0, -1, AFXTABLE_TYPE_FLOAT)
self.imp_t_ncpKw = AFXIntKeyword(self.apply_imp_t, 'imp_t_ncp', TRUE)
self.imp_t_power_parameterKw = AFXFloatKeyword(self.apply_imp_t, 'imp_t_power_parameter', TRUE)
self.imp_t_theta_z_formatKw = AFXBoolKeyword(self.apply_imp_t, 'imp_t_theta_z_format', TRUE_FALSE, TRUE)
self.imp_t_rotatedegKw = AFXFloatKeyword(self.apply_imp_t, 'imp_t_rotatedeg', TRUE)
self.loaded_study = False
self.setDefault()
def get_params_from_gui(self):
params_from_gui = {}
for param in params:
paramKw = param + 'Kw'
obj = getattr(self, paramKw)
if obj.__class__.__name__.find('TableKeyword')> -1 \
or obj.__class__.__name__.find('TupleKeyword')> -1:
value = obj.getValues()
else:
value = obj.getValue()
params_from_gui[param] = value
return params_from_gui
def read_params_from_gui(self, params_from_gui = {}):
for param, value in params_from_gui.iteritems():
paramKw = param + 'Kw'
if getattr(self, paramKw, 'NOTFOUND')== 'NOTFOUND':
continue
obj = getattr(self, paramKw)
if obj.__class__.__name__.find('TableKeyword')> -1 \
or obj.__class__.__name__.find('TupleKeyword')> -1:
obj.setValues(value)
else:
obj.setValue(value)
#TODO
# compatibility session
# laminate from laminapropKeys, plyt, stack
#
if params_from_gui.get('laminate', None) is None:
laminapropKeys = [self.laminapropKeyKw.getValue()]
plyts = [self.plytKw.getValue()]
stack = [float(i) for i in self.stackKw.getValues().split(',')]
tmp = numpy.empty((testDB.NUM_PLIES, 3), dtype='|S50')
tmp.fill('')
tmp[:len(laminapropKeys),0] = laminapropKeys
tmp[:len(plyts),1] = plyts
tmp[:len(stack),2] = stack
laminate = ','.join([str(tuple(i))for i in tmp])
self.laminateKw.setValues(laminate)
def setDefault(self,update_values=True, input_dict=None):
using_defaults = False
if input_dict is None:
using_defaults = True
input_dict = gui_defaults.defaults
ignore_list = ['stack','laminate','ploads','laminaprop',
'allowables']
for k, v in input_dict.iteritems():
if k in ignore_list:
continue
if (k == 'numel_r_linear' or k == 'numel_r_parabolic')\
and using_defaults:
if gui_defaults.defaults['elem_type'].find('S8')> -1:
v2 = gui_defaults.defaults['numel_r_parabolic']
else:
v2 = gui_defaults.defaults['numel_r_linear']
getattr(self, 'numel_rKw').setDefaultValue(v2)
if update_values:
getattr(self, 'numel_rKw').setValueToDefault()
else:
if using_defaults:
v2 = gui_defaults.defaults[k]
else:
v2 = v
if isinstance(v2, unicode):
v2 = str(v2)
attrname = k + 'Kw'
if getattr(self, attrname, 'NotFound')<> 'NotFound':
getattr(self, attrname).setDefaultValue(v2)
if update_values:
getattr(self, attrname).setValueToDefault()
def getFirstDialog(self):
# Note: The style below is used for the prototype application to
# allow the dialog to be reloaded while the application is
# still running. In a non-prototype application you would use:
#
# return MyDB(self)
# Reload the dialog module so that any changes to the dialog
# are updated.
#
path = TMP_DIR
sendCommand('import os')
sendCommand('if not os.path.isdir(r"{0}"):\n'.format(path) +
' os.makedirs(r"{0}")'.format(path))
sendCommand('os.chdir(r"{0}")'.format(path))
self.just_created_study = False
reload(testDB)
return testDB.TestDB(self)
def issueCommands(self):
self.laKw.setValue(self.db.lasw.getCurrent())
a = self.cmd.getCommandString()
a = a.replace(', ,',',False,')
a = a.replace(', ,',',False,')
a = a.replace(',)',',False)')
a = a.replace('(,False','(False,False')
b = ('import gui_commands\n'
+ 'reload(gui_commands)\n'
+ a)
#if not os.path.isdir(r'C:\Temp'):
# os.makedirs(r'C:\Temp')
#cmdpath = r'c:\Temp\cmd.py'
#cmdfile = open(cmdpath,'w')
#cmdfile.write(b + '\n')
#cmdfile.close()
with open(TMP_DIR + os.sep + 'tmpGUIcmd.py', 'w') as f:
f.write(b)
try:
sendCommand(r'execfile(r"{0}\tmpGUIcmd.py")'.format(TMP_DIR),
writeToReplay=False, writeToJournal=True)
except Exception as e:
msg = r'ERROR: For debugging purposes run: execfile(r"{0}\tmpGUIcmd.py")'.format(TMP_DIR)
sendCommand(r"""print(r'{0}')""".format(msg))
raise RuntimeError(str(e) + '\n' + msg)
self.just_created_study = True
self.loaded_study = True
outpath = os.path.join(TMP_DIR, self.std_nameKw.getValue())
os.chdir(outpath)
self.deactivateIfNeeded()
return TRUE
# Since this is a prototype application, just write the command to
# the Message Area so it can be visually verified. If you have
# defined a "real" command, then you can comment out this method to
# have the command issued to the kernel.
#
# In a non-prototype application you normally do not need to write
# the issueCommands()method.
#
#cmds = self.getCommandString()
#getAFXApp().getAFXMainWindow().writeToMessageArea('TEST ' + cmds)
#self.deactivateIfNeeded()
#return TRUE
def onCmdDelOutFolder(self, form, sender, sel, ptr):
if sender.getPressedButtonId() == AFXDialog.ID_CLICKED_YES:
std_name = self.std_to_postKw.getValue()
command = ('import gui_commands\n' +
'reload(gui_commands)\n' +
'gui_commands.clean_output_folder("{0}")\n'.format(
std_name))
sendCommand(command)
path = os.path.join(TMP_DIR, std_name, 'outputs')
ldir = [s for s in os.listdir(path) if not s.endswith('.gaps')]
if len(ldir) == 0:
text = 'Folder {0} has been cleaned!'.format(path)
else:
text = 'Some files in {0} cannot be removed!'.format(path)
showAFXInformationDialog(self.db, text)
else:
pass
|
from django.contrib import admin
from .models import NeighbourHood,User,Business,Profile
@admin.register(NeighbourHood)
class NeighbourHood(admin.ModelAdmin):
search_fields =['name','location','photo','counts','created_by']
class Meta:
model = NeighbourHood
# @admin.register(User)
# class User(admin.ModelAdmin):
# list_display =('user',)
@admin.register(Profile)
class Profile(admin.ModelAdmin):
search_fields=['user','profile_photo','email']
class Meta:
model = Profile
@admin.register(Business)
class Business(admin.ModelAdmin):
search_fields = ['name','neighbourhood','description','business_photo','created_by']
class Meta:
model = Business
# @admin.register(Account)
# class Account(admin.ModelAdmin):
# list_display=['email','username',]
# class Meta:
# model = Account
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for provide recommendation based on analysis results"""
from typing import Union, Text
from ml_eda.proto import analysis_entity_pb2
from ml_eda.reporting import template
from ml_eda.reporting import formatting
# Thresholds
MISSING_THRESHOLD = 0.1
CARDINALITY_THRESHOLD = 100
CORRELATION_COEFFICIENT_THRESHOLD = 0.3
P_VALUE_THRESHOLD = 0.05
Analysis = analysis_entity_pb2.Analysis
ScalarMetric = analysis_entity_pb2.ScalarMetric
def check_missing(
attribute_name: Text,
analysis: Analysis
) -> Union[None, Text]:
"""Check whether % of missing exceed threshold
Args:
attribute_name: (string),
analysis: (analysis_entity_pb2.Analysis), analysis that contain the result
of number of missing values
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
total = 0
missing = 0
for item in metrics:
if item.name == ScalarMetric.TOTAL_COUNT:
total = item.value
elif item.name == ScalarMetric.MISSING:
missing = item.value
if total == 0:
raise ValueError('The dataset is empty')
missing_rate = missing / total
if missing_rate > MISSING_THRESHOLD:
return template.HIGH_MISSING.format(
name=attribute_name,
value=missing_rate
)
return None
def check_cardinality(
attribute_name: Text,
analysis: Analysis
) -> Union[None, Text]:
"""Check whether the cardinality exceeds the predefined threshold
Args:
attribute_name: (string),
analysis: (analysis_entity_pb2.Analysis), analysis that contain the result
of cardinality
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
cardinality = 0
for item in metrics:
if item.name == ScalarMetric.CARDINALITY:
cardinality = item.value
if cardinality > CARDINALITY_THRESHOLD:
return template.HIGH_CARDINALITY.format(
name=attribute_name,
value=cardinality
)
return None
def check_pearson_correlation(analysis: Analysis) -> Union[None, Text]:
"""Check whether the correlation coefficients exceed the predefined threshold
Args:
analysis: (analysis_entity_pb2.Analysis), analysis that contain the result
of pearson correlation
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
name_list = [att.name for att in analysis.features]
coefficient = 0
for item in metrics:
if item.name == ScalarMetric.CORRELATION_COEFFICIENT:
coefficient = item.value
if abs(coefficient) > CORRELATION_COEFFICIENT_THRESHOLD:
return template.HIGH_CORRELATION.format(
name_one=name_list[0],
name_two=name_list[1],
metric='correlation coefficient',
value="{0:.2f}".format(coefficient)
)
return None
def check_p_value(analysis: Analysis) -> Union[None, Text]:
"""Check whether the p-value of statistical tests
exceed the predefined threshold
Args:
analysis: (analysis_entity_pb2.Analysis), analysis that contain the result
of statistical test
Returns:
Union[None, string]
"""
metric = analysis.smetrics[0]
name_list = [att.name for att in analysis.features]
p_value = metric.value
if p_value < P_VALUE_THRESHOLD:
return template.LOW_P_VALUE.format(
name_one=name_list[0],
name_two=name_list[1],
metric='p-value',
value=formatting.numeric_formatting(p_value)
)
return None
|
import numpy as np
import gym
from gym import spaces
import random
class TwoThinning(gym.Env):
REJECT = 0
ACCEPT = 1
def get_random_bin(self):
return random.randrange(self.n)
def evaluate(self):
return -np.max(self.load_configuration)
def __init__(self, n=8, m=15):
super(TwoThinning, self).__init__()
self.n=n
self.m=m
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Dict({"load_configuration": spaces.Box(low=np.array([0]*n), high=np.array([m]*n), dtype=np.float64), "location": spaces.Discrete(n)})
def reset(self):
self.load_configuration=np.array([0]*self.n).astype(np.float64)
self.currently_chosen=self.get_random_bin()
return {"load_configuration": self.load_configuration.copy(), "location": self.currently_chosen}
def step(self, action):
if action == self.ACCEPT:
self.load_configuration[self.currently_chosen]+=1
elif action == self.REJECT:
arbitrary=self.get_random_bin()
self.load_configuration[arbitrary]+=1
if np.sum(self.load_configuration)==self.m:
return {"load_configuration": self.load_configuration.copy(), "location": 0}, self.evaluate(), True, {}
else:
self.currently_chosen=self.get_random_bin()
return {"load_configuration": self.load_configuration.copy(), "location": self.currently_chosen}, 0, False, {}
|
from django.urls import path
from apps.job_submissions import views
urlpatterns = [
path("job-submission/", views.JobSubmissionListView.as_view()),
path("job-submission/<int:pk>/", views.JobSubmissionView.as_view()),
]
|
import factory
from .. import models
class PublisherFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Publisher
|
#from stack import Stack
from . import *
TInt = Type("Int")
#
# Integer handling
#
# Constructors
def op_int(c: AF_Continuation) -> None:
#print("\nop_int c.stack.contents = %s." % c.stack.contents())
i = int(c.stack.pop().value)
assert i < 999999999999, "int overflow > 999999999999"
assert i > -999999999999, "int underflow < -999999999999"
c.stack.push(StackObject(i,TInt))
# Int dictionary
Type.register_ctor('Int', Operation('int',op_int), [TInt])
Type.register_ctor('Int', Operation('int',op_int), [TAtom])
# Operations
def op_plus(c: AF_Continuation) -> None:
op1 = c.stack.pop().value
op2 = c.stack.pop().value
result = op1+op2
# Guarantee output is valid and not overflow.
assert int(result) - op2 == op1, "python math error"
c.stack.push(StackObject(result,TInt))
op_int(c) # We're cheating here cause, for now, op_int is supposed to take a TAtom!
Type.add_op(Operation('+',op_plus, sig=TypeSignature([TInt,TInt],[TInt]) ), "Int")
def op_minus(c: AF_Continuation) -> None:
op1 = c.stack.pop().value
op2 = c.stack.pop().value
result = op2-op1
# Guarantee output is valid and not overflow.
assert int(result) + op1 == op2, "python math error"
c.stack.push(StackObject(result,TInt))
op_int(c) # We're cheating here cause, for now, op_int is supposed to take a TAtom!
Type.add_op(Operation('-',op_minus, sig=TypeSignature([TInt,TInt],[TInt]) ), "Int")
def op_multiply(c: AF_Continuation) -> None:
op1 = c.stack.pop().value
op2 = c.stack.pop().value
result = op2*op1
# Guarantee output is valid and not overflow.
if op1 != 0: # Protect against divide by zero error on check.
assert int(result) / op1 == op2, "python math error"
c.stack.push(StackObject(result,TInt))
op_int(c) # We're cheating here cause, for now, op_int is supposed to take a TAtom!
Type.add_op(Operation('*',op_multiply, sig=TypeSignature([TInt,TInt],[TInt]) ), "Int")
def op_divide(c: AF_Continuation) -> None:
assert c.stack.tos().value != 0, "int division by zero error."
op1 = c.stack.pop().value
op2 = c.stack.pop().value
result = int(op2/op1)
remainder = op2 - (result * op1)
c.stack.push(StackObject(result, TInt))
c.stack.push(StackObject(remainder, TInt))
Type.add_op(Operation('/',op_divide, sig=TypeSignature([TInt,TInt],[TInt,TInt]) ), "Int")
|
"""Standard text for ROSS Report."""
__all__ = ["ReportTemplateText"]
class ReportTemplateText:
"""Create text instances to ROSS REPORT.
Each argument holds a dictionary related to a single Report section.
The arguments are collected and set to the HTML file according to the content
organization.
Parameters
----------
report : rp.Report object
An instance from Report class. The texts collects some arguments from the
report object to customize the informations for each rotor model and its
respective analyses.
"""
def __init__(self, report):
self.introduction = dict(
intro=(
"""
ROSS - Rotordynamics Open-Source Software is a library written in Python
for rotordynamic analyses. It's developed by Petrobrás and Federal
University of Rio de Janeiro.
It allows the construction of rotor models and their
numerical simulation. Shaft elements, as a default, are modeled with the
Timoshenko beam theory, which considers shear and rotary inertia
effects, and discretized by means of the Finite Element Method. Disks
are assumed to be rigid bodies, thus their strain energy is not taken
into account. And bearings/seals are included as linear
stiffness/damping coefficients.
ROSS carries out several different analyses which include:
1. Static analysis;
2. Modal analysis - Natural frequencies and mode shapes determination;
3. Damped and undemped critical speed analysis;
4. Unbalance response analysis;
5. Time domain linear analysis;
6. Stability analysis of the rotor.
"""
)
)
self.static_analysis = dict(
intro=(
"""
The static analysis calculates the shaft deformation, shearing forces
and bending moments for the rotor, given its self weight (shaft and
other couplings).
Figure XXXX shows the free-body diagram representation, where Fd stands
for Disk wieght and Fb stands for bearing reaction forces.
Figure XXXX shows the shaft static deformation (gyroscopic effect not
included - speed = 0 RPM).
Figures XXXX and XXXX show the diagrams for shearing force and
bending moment respectively.
"""
)
)
self.undamped_critical_speed_map = dict(
intro=(
"""
The Critical Speed Map determines approximated values of the natural
frequencies as function of the bearings stiffness coefficients.
The intersections of these bearings curves with the natural frequencies
curves define the undamped critical speeds. The horizontal dashed lines
represent the rotor operation speeds.
"""
),
min_clearance=(
"""
Figure XXXX shows the Undamped Critical Speed Map under the minimum
clearance of the bearings.
"""
),
rated_clearance=(
"""
Figure XXXX shows the Undamped Critical Speed Map under the rated
clearance of the bearings.
"""
),
max_clearance=(
"""
Figure XXXX shows the Undamped Critical Speed Map under the maximum
clearance of the bearings.
"""
),
)
self.damped_critical_speed_map = dict(
intro=(
"""
The Damped Critical Speed Map, also called Campbell Diagram determines
approximated values of the damped natural frequencies as function of
the rotor speed. The intersections of each harmonic curve with the
natural frequencies scatter curves define the critical speeds.
Furthermore, the damping level of each mode, measured by the logarithm
decrement, is presented with a color scale.
"""
),
min_clearance=(
"""
Figure XXXX shows the Campbell diagram under the minimum clearance of
the bearings.
"""
),
rated_clearance=(
"""
Figure XXXX shows the Campbell diagram under the rated clearance of the
bearings.
"""
),
max_clearance=(
"""
Figure XXXX shows the Campbell diagram under the maximum clearance of
the bearings.
"""
),
)
self.mode_shapes = dict(
intro=(
"""The mode shapes are calculate through the rotor modal analysis. The
results present the 2d shapes with respective natural frequencies, whirl
direction and the log dec. The modal analysis is performed to the rotor
operation speed.
"""
),
min_clearance=(
f"""
Figure XXXX shows first two mode shape of {report.tag} rotor, for the
minimum clearance.
"""
),
rated_clearance=(
f"""
Figure XXXX shows first two mode shape of {report.tag} rotor, for the
rated clearance.
"""
),
max_clearance=(
f"""
Figure XXXX shows first two mode shape of {report.tag} rotor, for the
maximum clearance.
"""
),
)
self.unbalance_response = dict(
intro=(
"""The Unbalance Response Analysis represents the rotor synchronous
excitation due to rotor unbalance. The results present a diagram with
plots of amplitude and phase versus frequency and a polar plot of
amplitude versus phase The setup of unbalance positions, weights and
phases is defined according to API 684 SP6.8.2.7 and SP6.8.2.8, which is
based on the machine type, bearings and couplings positions and the
mode shapes configurations. The Amplification Factors,
Separation Margins and Scale Factors are defined by
API684 - SP6.8.2.1, SP6.8.2.10 and SP6.8.2.11 respectively.
"""
),
min_clearance=(
"""
The unbalance response diagram is shown in Figure XXXX and Table XXXX
show a brief results summary under the minimum clearance of the
bearings. The amplitude in all curves are calculated for the nodes and
orientations for each probe selected in the analysis.
"""
),
rated_clearance=(
"""
The unbalance response diagram is shown in Figure XXXX and Table XXXX
show a brief results summary under the rated clearance of the bearings.
The amplitude in all curves are calculated for the nodes and
orientations for each probe selected in the analysis.
"""
),
max_clearance=(
"""
The unbalance response diagram is shown in Figure XXXX and Table XXXX
show a brief results summary under the maximum clearance of the
bearings. The amplitude in all curves are calculated for the nodes and
orientations for each probe selected in the analysis.
"""
),
)
self.deflected_shape = dict(
intro=(
"""The deflected shape analysis presets results for the 3d shape rotor
deformation due the applied imbalance for a given speed, the 2d shape of
the absolute value for the major axis and the bending moment diagram.
"""
),
min_clearance=(
f"""
The plots of deflected shapes for speed {report.config.run_unbalance_response.plot_deflected_shape.speed}
with minimum clearance are shown below.
"""
),
rated_clearance=(
f"""
The plots of deflected shapes for speed {report.config.run_unbalance_response.plot_deflected_shape.speed}
with rated clearance are shown below.
"""
),
max_clearance=(
f"""
The plots of deflected shapes for speed {report.config.run_unbalance_response.plot_deflected_shape.speed}
with maximum clearance are shown below.
"""
),
)
self.level_1_analysis = dict(
intro=(
"""
The Stability Level 1 Analysis determines the natural frequencies and
the corresponding logarithmic decrements (log decs) of the damped
rotor/support system using a complex value analysis.
This analysis is performed with a varying amount of cross coupling
introduced at the rotor mid-span for between bearing rotors or at the
center of gravity of the stage or impeller for single overhung rotors.
For double overhung rotors, the cross coupling shall be placed at each
stage or impeller concurrently and shall reflect the ratio of the
anticipated cross coupling (qa, calculated for each impeller or stage).
The anticipated cross coupling, QA, present in the rotor is defined by
the following procedures:
For centrifugal compressors:
INSERT FORMULA
HP is the rated power per impeller, Nm/s (HP);
Bc is 3;
C is 9.55 (63);
ρd is the discharge gas density per impeller, kg/m3 (lbm/ft3);
ρs is the suction gas density per impeller, kg/m3 (lbm/ft3);
Dc is the impeller diameter, mm (in.);
Hc is the minimum of diffuser or impeller discharge width per impeller, mm (in.);
Nr is the normal operating speed for calculation of aerodynamic excitation (rpm);
qa is the cross coupling for each individual impeller, kN/mm (klbf/in).
For axial flow rotors:
INSERT FORMULA
Bt is 1.5;
Dt is the blade pitch diameter, mm (in.);
Ht is the effective blade height, mm (in.).
"""
),
min_clearance=(
"""
The result of level I stability (plot of Applied Cross-coupled
stiffness vs logarithmic decrement) is shown as Figure XXXX. The plot
shows the relationship of the logarithmic decrement and Cross-coupled
stiffness of rotor, Qa is anticipated cross coupling stiffness, Q0 is
the amount of the applied cross coupling required to produce a zero
logarithmic decrement (where the curve crosses the abscissa).
Figure XXXX is a screening criteria relating the Critical Speed Ratio
(CSR) and the average gas density. If the screening point is located
on Region B, further stabiliy analysis is required.
"""
),
rated_clearance=(
"""
The result of level I stability (plot of Applied Cross-coupled
stiffness vs logarithmic decrement) is shown as Figure XXXX. The plot
shows the relationship of the logarithmic decrement and Cross-coupled
stiffness of rotor, Qa is anticipated cross coupling stiffness, Q0 is
the amount of the applied cross coupling required to produce a zero
logarithmic decrement (where the curve crosses the abscissa).
Figure XXXX is a screening criteria relating the Critical Speed Ratio
(CSR) and the average gas density. If the screening point is located
on Region B, further stabiliy analysis is required.
"""
),
max_clearance=(
"""
The result of level I stability (plot of Applied Cross-coupled
stiffness vs logarithmic decrement) is shown as Figure XXXX. The plot
shows the relationship of the logarithmic decrement and Cross-coupled
stiffness of rotor, Qa is anticipated cross coupling stiffness, Q0 is
the amount of the applied cross coupling required to produce a zero
logarithmic decrement (where the curve crosses the abscissa).
Figure XXXX is a screening criteria relating the Critical Speed Ratio
(CSR) and the average gas density. If the screening point is located
on Region B, further stabiliy analysis is required.
"""
),
)
self.level_2_analysis = dict(
intro=(
"""
The Stability Level 2 analysis shall include the dynamic characteristics
of all components that, somehow, affects the stability behaviour of the
rotor machine. These dynamic effects replace the anticipated cross
coupling, QA, calculated in the Stability Level 1 Analysis.
"""
),
min_clearance=(
"""
The Table XXXX below shows the log decrement for several rotor
configurations, considering each component individually and the full
rotor model. Each row present a configuration and the respective log
decrement calculated for the maximum continuous speed.
"""
),
rated_clearance=(
"""
The Table XXXX below shows the log decrement for several rotor
configurations, considering each component individually and the full
rotor model. Each row present a configuration and the respective log
decrement calculated for the maximum continuous speed.
"""
),
max_clearance=(
"""
The Table XXXX below shows the log decrement for several rotor
configurations, considering each component individually and the full
rotor model. Each row present a configuration and the respective log
decrement calculated for the maximum continuous speed.
"""
),
)
self.conclusion = dict(
Pass=(
f"""
According to the analysis, the rotor {report.tag} complies with the
API 617-2014 Standard, the lateral vibration and stability analysis of
the rotor are acceptable.
"""
),
Not_Pass=(
f"""
According to the analysis, the rotor {report.tag} do not complies with
the API 617-2014 Standard, the lateral vibration and / or stability
analysis of the rotor are not acceptable. Further verifications are
required
"""
),
)
def __getitem__(self, option):
"""Return the value for a given option from the dictionary.
Parameters
----------
option : str
A dictionary key corresponding to the text section.
Raises
------
KeyError
Raises an error if the parameter doesn't belong to the dictionary.
Returns
-------
Return the value for the given key.
"""
if option not in self.__dict__.keys():
raise KeyError("Option '{}' not found.".format(option))
return self.__dict__[option]
|
import gensim
from gensim.scripts import glove2word2vec
from gensim.models import fasttext
import os
import shutil
from sys import platform
import pickle
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# 计算行数,就是单词数
def getFileLineNums(filename):
f = open(filename, 'r')
count = 0
for line in f:
count += 1
return count
# Linux或者Windows下打开词向量文件,在开始增加一行
def prepend_line(infile, outfile, line):
with open(infile, 'r') as old:
with open(outfile, 'w') as new:
new.write(str(line) + "\n")
shutil.copyfileobj(old, new)
def prepend_slow(infile, outfile, line):
with open(infile, 'r') as fin:
with open(outfile, 'w') as fout:
fout.write(line + "\n")
for line in fin:
fout.write(line)
def load(filename):
num_lines = getFileLineNums(filename)
gensim_file = 'glove_model.txt'
gensim_first_line = "{} {}".format(num_lines, 300)
# Prepends the line.
if platform == "linux" or platform == "linux2":
prepend_line(filename, gensim_file, gensim_first_line)
else:
prepend_slow(filename, gensim_file, gensim_first_line)
model = gensim.models.KeyedVectors.load_word2vec_format(gensim_file)
print(model['word'])
# glove_6B = os.path.join(path, 'glove.6B.300d.txt')
# glove_6B_out = os.path.join(path, 'glove.6B.txt')
# load(glove_6B)
# print(glove2word2vec.get_glove_info(glove_6B))
# glove2word2vec.glove2word2vec(glove_6B, glove_6B_out)
# glove_840B = os.path.join(path, 'glove.840B.300d.txt')
# glove_840B_out = os.path.join(path, 'glove.840B.txt')
# load(glove_840B)
# print(glove2word2vec.get_glove_info(glove_840B))
# glove2word2vec.glove2word2vec(glove_840B, glove_840B_out)
path = '../data/processed_data'
# google = os.path.join(path, 'GoogleNews-vectors-negative300.bin')
# model = gensim.models.KeyedVectors.load_word2vec_format(google, binary=True)
# glove6B = os.path.join(path, 'glove.840B.bin')
# model = gensim.models.KeyedVectors.load_word2vec_format(glove6B, binary=True)
# model.save_word2vec_format(os.path.join(path, 'glove.840B.bin'), binary=True)
fast = os.path.join(path, 'cc.en.300.bin')
model = fasttext.load_facebook_vectors(fast)
print(len(model.vocab))
word_weights = {}
for word in model.vocab:
word_weights[word] = model[word]
with open(os.path.join(path, 'fastText.pkl'), 'wb') as file:
pickle.dump(word_weights, file)
|
import configparser
import os
import sys
from .resources import *
from .tabquery_path import TabQueryPath
tab_cli_exe = ''
def configure_tabquery_path():
"""Setup the tabquery path from ini settings."""
global tab_cli_exe
if os.environ.get('TABQUERY_CLI_PATH'):
tab_cli_exe = os.environ.get('TABQUERY_CLI_PATH')
logging.info(
"Tabquery path from TABQUERY_CLI_PATH environment variable is: {}"
.format(tab_cli_exe)
)
else:
logging.info("TABQUERY_CLI_PATH environment variable not set. Trying ini files.")
config = configparser.ConfigParser()
tdvt_cfg = get_ini_path_local_first('config/tdvt', 'tdvt')
logging.debug("Reading tdvt ini file [{}]".format(tdvt_cfg))
config.read(tdvt_cfg)
if sys.platform.startswith("darwin"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_MAC']
elif sys.platform.startswith("linux"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_LINUX']
else:
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_X64']
logging.debug("Reading tdvt ini file tabquerycli path is [{}]".format(tab_cli_exe))
def get_max_process_level_of_parallelization(desired_threads):
if sys.platform.startswith("darwin") and 'tabquerytool' in tab_cli_exe:
return 1
return desired_threads
def build_tabquery_command_line(work):
try:
sys.path.insert(0, get_extensions_dir())
from extend_tabquery import TabqueryCommandLineExtension
sys.path.pop(0)
tb = TabqueryCommandLineExtension()
logging.debug("Imported extension extend_tabquery")
except:
tb = TabqueryCommandLine()
cmdline = tb.build_tabquery_command_line(work)
return cmdline
def build_connectors_test_tabquery_command_line(conn_test_name, conn_test_file_name, conn_test_password_file):
global tab_cli_exe
cmdline = [tab_cli_exe]
cmdline.extend(["--conn-test", conn_test_name])
cmdline.extend(["--conn-test-file", conn_test_file_name])
if conn_test_password_file:
cmdline.extend(["--conn-test-password-file", conn_test_password_file])
return cmdline
class TabqueryCommandLine(object):
def extend_command_line(self, cmdline, work):
pass
def build_tabquery_command_line(self, work):
"""Build the command line string for calling tabquerycli."""
global tab_cli_exe
cli_arg = "--query-file-list" if work.test_config.logical else "--expression-file-list"
cmdline = [tab_cli_exe]
if work.test_config.tested_run_time_config is not None and work.test_config.tested_run_time_config.has_customized_tabquery_path():
cmdline = [work.test_config.tested_run_time_config.tabquery_paths.get_path(sys.platform)]
cmdline_base = [cli_arg, work.test_list_path]
cmdline.extend(cmdline_base)
tds_arg = ["-d", work.test_config.tds]
cmdline.extend(tds_arg)
cmdline.extend(["--combined"])
password_file = work.test_set.get_password_file_name()
if os.path.isfile(password_file):
password_arg = ["--password-file", password_file]
cmdline.extend(password_arg)
if work.test_config.output_dir:
cmdline.extend(["--output-dir", work.test_config.output_dir])
#Save all the log files from the core Tableau process.
cmdline.extend(["-DLogDir=" + work.test_config.log_dir])
cmdline.extend(["-DOverride=ProtocolServerNewLog"])
if work.test_config.d_override:
for override in work.test_config.d_override.split(' '):
cmdline.extend([override])
logical_rewrite_iter = next((i for i in cmdline if i.find('-DLogicalQueryRewriteDisable') != -1), None)
if logical_rewrite_iter == None:
#Disable constant expression folding. This will bypass the VizEngine for certain simple calculations. This way we run a full database query
#that tests what you would expect.
cmdline.extend(["-DLogicalQueryRewriteDisable=Funcall:RewriteConstantFuncall"])
# LogicalQuery cache can cache results across multiple expressions, and prevent
# issuance of queries to the underlying database, so disable it.
cmdline.extend(["-DInMemoryLogicalCacheDisable"])
self.extend_command_line(cmdline, work)
work.test_config.command_line = cmdline
return cmdline
def tabquerycli_exists(tabquery_cli_path: TabQueryPath = None):
global tab_cli_exe
if tabquery_cli_path:
resolved_path = tabquery_cli_path.get_path(sys.platform)
if os.path.isfile(resolved_path):
logging.debug("Found tabquery at [{0}]".format(resolved_path))
return True
if os.path.isfile(tab_cli_exe):
logging.debug("Found tabquery at [{0}]".format(tab_cli_exe))
return True
logging.debug("Could not find tabquery at [{0}]".format(tab_cli_exe))
return False
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 13 23:30:09 2022
@author: Daniel Bresnahan
"""
import os
import pandas as pd
class CornDataset():
""" Corn with and without NCLB Dataset"""
def __init__(self, csv_file, root_dir, transform=None):
"""
Parameters
----------
csv_file : String
Path to CSV File with Annotations.
root_dir : String
Directory with all the images.
transform : callable, optional
Optional Transformation to apply to an image
"""
self.img_labels = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
"""
Will Return the item at idx, with label 1 as healthy 0 as unhealthy
Parameters
----------
idx : list or tensor
Indexes to select
Returns
-------
None.
"""
label = sum(self.img_labels.iloc[idx, 1:5])
if label:
label = 1
return label
if __name__=="__main__":
csv = "E:\\Coding\\Dataset\\annotations_test.csv"
root_dir = "E:\\Coding\\Dataset"
data = CornDataset(csv, root_dir)
num_pos = 0
num_neg = 0
for i in range(len(data)):
label = data.__getitem__(i)
if label == 0:
num_neg += 1
else:
num_pos += 1
print("Number Positive Samples: {} \n Number Negative Samples: {}".format(num_pos, num_neg))
|
import os
import unittest
import numpy as np
import pandas as pd
from portformer import BreakpointAPI
from portformer.errors import PortformerInvalidAPIKeyError, PortformerMissingAPIKeyError
class TestBreakpointAPI(unittest.TestCase):
def setUp(self):
# Read environment variable = BREAKPOINT_API_KEY
self.api = BreakpointAPI(api_key=None)
def test_api_key_config(self):
"""Config from ENV VAR and from parameter"""
cur_env_var = os.environ.get("BREAKPOINT_API_KEY", None)
os.environ["BREAKPOINT_API_KEY"] = "ENVTEST"
api = BreakpointAPI()
self.assertEqual(api.api_key, "ENVTEST")
api = BreakpointAPI(api_key="TEST")
self.assertEqual(api.api_key, "TEST")
# reset BREAKPOINT_API_KEY
if cur_env_var is not None:
os.environ["BREAKPOINT_API_KEY"] = cur_env_var
def test_requires_auth_key(self):
"""Should return 401 / 403 Not authenticated if given a bad or missing api_keys"""
api = BreakpointAPI(api_key="NOT-A-VALID-KEY")
self.assertRaises(
PortformerInvalidAPIKeyError, api.forecast, ("TSLA",),
)
api = BreakpointAPI(api_key=None)
api.api_key = None # force non in case environ variable is set
self.assertRaises(
PortformerMissingAPIKeyError, api.forecast, ("TSLA",),
)
def test_example_forecast(self):
"""Get Latest AAPL forecasts."""
breakpoint_forecast = self.api.forecast("AAPL")
self.assertEqual(breakpoint_forecast["ticker"], "AAPL")
self.assertIsNotNone(breakpoint_forecast["as_of_date"])
self.assertIsNotNone(breakpoint_forecast["mu"])
self.assertIsNotNone(breakpoint_forecast["std"])
def test_example_historical_forecasts(self):
"""Get Historical TSLA forecasts"""
historical_breakpoints = self.api.historical_forecasts(
"TSLA", start_date="2020-02-01", end_date="2020-04-01"
)
self.assertGreater(len(historical_breakpoints["agg"]), 0)
def test_example_cross_sectional_forecasts(self):
"""Get Latest SPY AGG GLD forecasts."""
breakpoint_cross_section = self.api.cross_sectional_forecasts(
tickers=["SPY", "AGG", "GLD"]
)
results = {x["ticker"]: x["sharpe"] for x in breakpoint_cross_section}
self.assertIsNotNone(results["SPY"])
self.assertIsNotNone(results["AGG"])
self.assertIsNotNone(results["GLD"])
def test_example_crypto_forecasts(self):
"""Get Crypto Universe Bitcoin forecasts"""
btc = self.api.crypto_forecasts(ticker="BTCUSD")
self.assertEqual(btc["ticker"], "BTCUSD")
self.assertIsNotNone(btc["as_of_date"])
self.assertIsNotNone(btc["mu"])
self.assertIsNotNone(btc["std"])
def test_example_crypto_universe(self):
"""Get full crypto_universe list"""
universe = self.api.crypto_universe()
self.assertGreater(len(universe), 0)
def test_example_custom(self):
"""API request with custom timeseries data"""
N = 200
seed = 42
# Generate a random price series
np.random.seed(seed)
data = np.exp(pd.Series(np.random.normal(size=(N,)) * 0.01).cumsum())
data.index = pd.bdate_range("2020-01-01", periods=N)
bp = self.api.custom_timeseries_forecasts(
data,
name=None,
history_timedelta=None,
tform="log-diff",
no_whitten=False,
seed=seed,
)
self.assertIsNotNone(bp)
|
import numpy as np
# from numpy import fft
import pandas as pd
# from scipy import signal as sig
# from cmath import phase
# import math
from fft_utils import FFTFeatureExtractor
import logging
# import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from argparse import ArgumentParser
# mpl.rcParams['figure.figsize'] = (8, 6)
# mpl.rcParams['axes.grid'] = False
def main():
parser = ArgumentParser()
parser.add_argument('--num_days', help='Number of days to visualize into the future', type=int)
args = parser.parse_args()
print(args.num_days)
day = 24*60
# NYC taxi dataset
train_df = pd.read_csv('../sample-data/train/train.csv')
# test_df = pd.read_csv('../sample-data/test/test.csv')
train_df.pickup_datetime = pd.to_datetime(train_df.pickup_datetime)
train_df.dropoff_datetime = pd.to_datetime(train_df.dropoff_datetime)
train_df.dropoff_datetime = pd.to_datetime(train_df.dropoff_datetime)
train_df.store_and_fwd_flag = train_df.store_and_fwd_flag.apply(lambda x: 1 if x=='Y' else 0)
ts_train = train_df.groupby(pd.Grouper(key='pickup_datetime', freq='T'))[['passenger_count','trip_duration']].sum().reset_index()
ts_train['time_min'] = (ts_train['pickup_datetime'] - min(ts_train['pickup_datetime'])).dt.total_seconds()/60
ts_train['time_sec'] = (ts_train['pickup_datetime'] - min(ts_train['pickup_datetime'])).dt.total_seconds()
date_time = ts_train.pop('pickup_datetime')
timestamp_s = date_time.map(pd.Timestamp.timestamp)
ts_train['day_sin'] = np.sin(timestamp_s * (2 * np.pi / day))
ts_train['day_cos'] = np.cos(timestamp_s * (2 * np.pi / day))
ts_train['pass_count_standardized'] = (ts_train['passenger_count'] - np.mean(ts_train['passenger_count'])) / np.std(ts_train['passenger_count'])
fft = FFTFeatureExtractor(ts_train['pass_count_standardized'], time_series=date_time)
plt.show()
fft.fft_transform(freqlim_max=.005, timelim_max=48*60)
# print(ts_train.head())
x = fft.frequency_table_viewer()
# print(x)
filtered_residuals = fft.ifft_transform()
w=ts_train.join(pd.DataFrame(filtered_residuals.real)).rename(columns={0:'filtered_residuals'})
print(w)
X = w[['time_min','filtered_residuals']]
y = w['pass_count_standardized']
modelNew = LinearRegression()
modelNew.fit(X,y)
y_pred = modelNew.predict(X)
N = 24 * 60 * args.num_days
plt.figure(figsize=(10,4))
plt.plot(X['time_min'][:N], y[:N], linewidth=1, label='Original Signal')
plt.plot(X['time_min'][:N], y_pred[:N], linewidth=1, label='Predicted Signal')
plt.legend(loc='upper right')
plt.suptitle('First {} Days'.format(int(N/24/60)))
plt.grid()
plt.xticks(rotation=90)
plt.tight_layout()
# plt.show()
print()
fft.fourier_terms_df_creator()
decomposedResult = fft.decompose_df_into_pure_freq(signal=ts_train['pass_count_standardized'], time_min= ts_train['time_min'] )
decomposedResult['FT_All_Std'] = (decomposedResult['FT_All'] - np.mean(decomposedResult['FT_All'])) / np.std(decomposedResult['FT_All'])
decomposedResult['pass_count_std-FT_All_Std'] = (decomposedResult['pass_count_standardized'] - decomposedResult['FT_All_Std'])
print("Mean and standard deviation of standardized passenger count")
print(np.mean(decomposedResult['pass_count_standardized']), np.std(decomposedResult['pass_count_standardized']))
print("Mean and standard deviation of standardized passenger count subtract standardized total frequency")
print(np.mean(decomposedResult['pass_count_std-FT_All_Std']), np.std(decomposedResult['pass_count_std-FT_All_Std']))
print("Mean and standard deviation of Inverse FTT-transformed peaks of FFT-transformed standardized passenger counts")
print(np.mean(filtered_residuals), np.std(filtered_residuals))
plt.figure(figsize=(10,4))
# plt.plot(X['time_min'][:N], y[:N], linewidth=1, label='Original Signal')
plt.plot(X['time_min'][:N], decomposedResult['pass_count_std-FT_All_Std'][:N], linewidth=1, label='Predicted Signal')
plt.legend(loc='upper right')
plt.suptitle('First {} Days'.format(int(N/24/60)))
plt.grid()
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
print()
print(decomposedResult.head())
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 14:53:06 2021
@author: angus
"""
import numpy as np
from time import sleep
import base64
from io import BytesIO
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pandas as pd
### Options that allow chrome to run in streamlit
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
def infinite_query(ticker, xq_exten, sleep_time, freq = '全部', stock_data = False, statement = False):
'''Heavily Xueqiu customized function that refreshes page until it can gather the needed data
in: str, str, int, str, bool, bool
out: dataframe or list of dataframes
'''
driver = webdriver.Chrome(options=chrome_options) ### use google chrome
driver.get("https://xueqiu.com/snowman/S/" + ticker + xq_exten) ### go to website
sleep(1) ### gives time for page to load. This is a xueqiu specific solution
# time.sleep(sleep_time) ### gives time for page to load. This is a xueqiu specific solution
if stock_data == True: ### This is for gathering HKEX stock data
try:
int(ticker) ### only HKEX stocks get caught up in this logic
sleep(1) ### gives time for page to load. This is a xueqiu specific solution
button= driver.find_element_by_xpath('/html/body/div/div[2]/div[2]/div[5]/a')### selects button
button.click()
except ValueError:
pass
else:
pass
if freq == '全部': ### selects the frequency
pass
else:
path = "//span[contains(@class,'btn') and contains(text(), '"+ freq +"')]"
button= driver.find_element_by_xpath(path)### selects button
button.click()
sleep(sleep_time) ### gives time for page to load. This is a xueqiu specific solution
html = driver.page_source ## gather and read HTML
if statement == True:
### initialize dataframe
statements = pd.DataFrame()
### gathers first chart
html = driver.page_source ## gather and read HTML
statement1 = None
while statement1 is None:
try:
statement1 = pd.read_html(html)
except ValueError:
driver.refresh()
sleep(4)
html = driver.page_source## gather and read HTML
try:
statement1 = pd.read_html(html)
except ValueError:
statement1 = None
statement1 = statement1 [0]
statement1 = statement1.set_index(statement1.columns [0]) ### sets an index so that the data is merged rather than directly concated
# statement1 = statement1.iloc [:,1:]
statements = pd.concat([statements,statement1], ignore_index=False, axis = 1)
statements = statements.set_index(statements.columns [0]) ### sets an index so that the data is merged rather than directly concated
# statements = statements.iloc [:,1:]
statement1l = statement1.values.tolist()
### press button to gather next chart
path = "/html/body/div/div[2]/div[2]/div/div[1]/div[2]/span[2]" ## this presses the 下一页 button. It uses copy x-path from chrome inspect
button= driver.find_element_by_xpath(path)### selects button
button.click()
sleep(.5)
html = driver.page_source ## gather and read HTML
statement2 = pd.read_html(html)
statement2 = statement2 [0]
statement2 = statement2.set_index(statement2.columns [0]) ### sets an index so that the data is merged rather than directly concated
statement2l = statement2.values.tolist()
### compare first chart with second chart
comparison = statement1l == statement2l
while comparison is False:
statement2_edit = statement2.iloc [:,2:]
statements = pd.concat([statements,statement2_edit], ignore_index=False, axis = 1)
### gathers first chart
statement1 = statement2
statement1l = statement1.values.tolist()
### press button to gather next chart
path = "/html/body/div/div[2]/div[2]/div/div[1]/div[2]/span[2]" ## this presses the 下一页 button. It uses copy x-path from chrome inspect
button= driver.find_element_by_xpath(path)### selects button
button.click()
sleep(1)
html = driver.page_source ## gather and read HTML
statement2 = pd.read_html(html)
statement2 = statement2 [0]
statement2 = statement2.set_index(statement2.columns [0]) ### sets an index so that the data is merged rather than directly concated
# statement2 = statement2.iloc [:,1:]
statement2l = statement2.values.tolist()
comparison = statement1l == statement2l
statements = statements.reset_index()
statements = convert_table(statements)
table = statements
else:
table = None
while table is None:
try:
table = pd.read_html(html)
except ValueError:
driver.refresh()
sleep(4)
html = driver.page_source## gather and read HTML
try:
table = pd.read_html(html)
except ValueError:
table = None
driver.delete_all_cookies()
driver.quit()
return table
def convert(chinese):
"""converts Chinese numbers to int
in: string
out: string
"""
numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90}
units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000, '拾':10, '佰':100, '仟':1000}
number, pureNumber = 0, True
for i in range(len(chinese)):
if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']:
pureNumber = False
break
if chinese[i] in numbers:
number = number * 10 + numbers[chinese[i]]
if pureNumber:
return number
number = 0
for i in range(len(chinese)):
if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'):
base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个'
for j in range(i + 1, len(chinese)):
if chinese[j] in units:
if units[chinese[j]] >= units[currentUnit]:
base, currentUnit = base * units[chinese[j]], chinese[j]
number = number + base
return number
def convert_table(table):
"""coverts everything beyond the first column with chinese numbers to raw int number
in: dataframe
out: dataframe
"""
columns = table.columns [1:]
for column in columns:
abc = table [column]
abc2 = abc.str[-1]
abc = abc.str[:-1]
abc2 = '一' + abc2
abc2 = abc2.astype(str)
a = []
for numbers in abc2:
b = convert (numbers)
a.append(b)
a = pd.DataFrame(a, columns = [column])
abc = pd.DataFrame(abc)
abc = abc.replace(r'^\s*$', np.nan, regex=True)
abc = abc.astype(float)
df3 = a.mul(abc.values)
df3 = df3.round(2)
table [column] = df3
return table
def to_excel(df):
""""converts a dataframe to excel
in: dataframe
out: processed_data
"""
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', index = True, header = False)
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df): ### Hack that allows you to download the dataframe
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="extract.xlsx">Download xlsx</a>' # decode b'abc' => abc
|
'''
General grab bag of helper functions
'''
import numpy as np
import os
#from .settings import DEFAULT_AWRAL_MASK
from .precision import iround, quantize, aquantize, sanitize_cell, sanitize_geo_array
def _DEP_load_mask(fn_mask):
'''
Identify the AWAP cells required for the continental AWRA-L run.
Returns a list of 2 element tuples, with each element the indices of a AWAP cell.
'''
return list(zip(*np.where(np.logical_not(load_mask_grid(fn_mask)))))
def _DEP_load_mask_grid(fn_mask):
if os.path.splitext(fn_mask)[1] == '.flt':
return _load_mask_flt(fn_mask)
elif os.path.splitext(fn_mask)[1] == '.h5':
return _load_mask_h5(fn_mask)
else:
raise Exception("unknown mask grid format: %s" % fn_mask)
def _DEP_load_mask_h5(fn_mask):
import h5py
h = h5py.File(fn_mask,'r')
return h['parameters']['mask'][:] <= 0
def _DEP_load_mask_flt(fn_mask):
import osgeo.gdal as gd
gd_mask = gd.Open(fn_mask)
bd_mask = gd_mask.GetRasterBand(1)
return bd_mask.ReadAsArray() <= 0
def _DEP_load_meta():
import pandas as _pd
import os as _os
# from settings import AWRAPATH as _AWRAPATH
#TODO - metadata csv should it be a module
p = _os.path.join(_os.path.dirname(__file__),'data','awral_outputs.csv')
output_meta = _pd.DataFrame.from_csv(p)
# Read input metadata into dataframe as well and concat it with output metadata
# input_meta = _pd.DataFrame.from_csv(_os.path.join(_AWRAPATH,"Landscape/Metadata/awraL_inputs.csv"))
# return _pd.concat([output_meta, input_meta])
return output_meta
def print_error(message):
import sys
print(message,file=sys.stderr)
class IndexGetter:
'''
Helper class for using index creation shorthand
eg IndexGetter[10:,5] returns [slice(10,None),5]
'''
def __getitem__(self,indices):
return indices
class Indexer:
'''
Wrapper class that refers it's get/set item methods to another function
'''
def __init__(self,getter_fn,setter_fn = None):
self.getter_fn = getter_fn
self.setter_fn = setter_fn
def __getitem__(self,idx):
return self.getter_fn(idx)
def __setitem__(self,idx,value):
return self.setter_fn(idx,value)
index = IndexGetter()
def as_int(n):
return int(np.floor(n))
def shuffle(source,indices):
return [source[i] for i in indices] |
"""
@file
@brief Profiling helpers
"""
import os
from io import StringIO
import cProfile
import pstats
import site
def _process_pstats(ps, clean_text):
"""
Converts class `Stats <https://docs.python.org/3/library/
profile.html#pstats.Stats>`_ into something
readable for a dataframe.
"""
def add_rows(rows, d):
tt1, tt2 = 0, 0
for k, v in d.items():
stin = 0
stall = 0
row = {
'file': "%s:%d" % (clean_text(k[0]), k[1]),
'fct': k[2],
'ncalls1': v[0],
'ncalls2': v[1],
'tin': v[2],
'tall': v[3]
}
stin += v[2]
stall += v[3]
if len(v) == 5:
t1, t2 = add_rows(rows, v[-1])
stin += t1
stall += t2
row['cum_tin'] = stin
row['cum_tall'] = stall
rows.append(row)
tt1 += stin
tt2 += stall
return tt1, tt2
rows = []
add_rows(rows, ps.stats)
return rows
def profile(fct, sort='cumulative', rootrem=None, as_df=False,
pyinst_format=None, **kwargs):
"""
Profiles the execution of a function.
@param fct function to profile
@param sort see `sort_stats <https://docs.python.org/3/library/
profile.html#pstats.Stats.sort_stats>`_
@param rootrem root to remove in filenames
@param as_df return the results as a dataframe and not text
@param pyinst_format format for :epkg:`pyinstrument`, if not empty,
the function uses this module or raises an exception if not
installed, the options are *text*, *textu* (text with colors),
*json*, *html*
@param kwargs additional parameters used to create the profiler
@return raw results, statistics text dump (or dataframe is *as_df* is True)
.. plot::
import matplotlib.pyplot as plt
from pyquickhelper.pycode.profiling import profile
from pyquickhelper.texthelper import compare_module_version
def fctm():
return compare_module_version('0.20.4', '0.22.dev0')
pr, df = profile(lambda: [fctm() for i in range(0, 1000)], as_df=True)
ax = df[['namefct', 'cum_tall']].head(n=15).set_index(
'namefct').plot(kind='bar', figsize=(8, 3), rot=30)
ax.set_title("example of a graph")
for la in ax.get_xticklabels():
la.set_horizontalalignment('right');
plt.show()
"""
if pyinst_format is None:
pr = cProfile.Profile(**kwargs)
pr.enable()
fct()
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats(sort)
ps.print_stats()
res = s.getvalue()
try:
pack = site.getsitepackages()
except AttributeError: # pragma: no cover
import numpy
pack = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(numpy.__file__), "..")))
pack = [pack]
pack_ = os.path.normpath(os.path.join(pack[-1], '..'))
def clean_text(res):
res = res.replace(pack[-1], "site-packages")
res = res.replace(pack_, "lib")
if rootrem is not None:
if isinstance(rootrem, str):
res = res.replace(rootrem, '')
else:
for sub in rootrem:
if isinstance(sub, str):
res = res.replace(sub, '')
elif isinstance(sub, tuple) and len(sub) == 2:
res = res.replace(sub[0], sub[1])
else:
raise TypeError(
"rootrem must contains strings or tuple not {0}".format(rootrem))
return res
if as_df:
def better_name(row):
if len(row['fct']) > 15:
return "{}-{}".format(row['file'].split(':')[-1], row['fct'])
name = row['file'].replace("\\", "/")
return "{}-{}".format(name.split('/')[-1], row['fct'])
rows = _process_pstats(ps, clean_text)
import pandas
df = pandas.DataFrame(rows)
df = df[['fct', 'file', 'ncalls1', 'ncalls2', 'tin', 'cum_tin',
'tall', 'cum_tall']]
df['namefct'] = df.apply(lambda row: better_name(row), axis=1)
df = df.groupby(['namefct', 'file'], as_index=False).sum().sort_values(
'cum_tall', ascending=False).reset_index(drop=True)
return ps, df
else:
res = clean_text(res)
return ps, res
elif as_df:
raise ValueError( # pragma: no cover
"as_df is not a compatible option with pyinst_format")
else:
try:
from pyinstrument import Profiler
except ImportError as e: # pragma: no cover
raise ImportError("pyinstrument is not installed.") from e
profiler = Profiler(**kwargs)
profiler.start()
fct()
profiler.stop()
if pyinst_format == "text":
return profiler, profiler.output_text(unicode=False, color=False)
elif pyinst_format == "textu":
return profiler, profiler.output_text(unicode=True, color=True)
elif pyinst_format == "json":
from pyinstrument.renderers import JSONRenderer
return profiler, profiler.output(JSONRenderer())
elif pyinst_format == "html":
return profiler, profiler.output_html()
else:
raise ValueError("Unknown format '{}'.".format(pyinst_format))
|
import os
from git import Repo, Actor
from conda_build.conda_interface import (VersionOrder, MatchSpec, get_installed_version, root_dir, get_index, Resolve)
from .utils import tmp_directory
def update_me():
"""
Update the webservice on Heroku by pushing a commit to this repo.
"""
pkgs = ["conda-build", "nwb-extensions-smithy", "conda-forge-pinning"]
installed_vers = get_installed_version(root_dir, pkgs)
index = get_index(channel_urls=['conda-forge'])
r = Resolve(index)
to_install = {}
for pkg in pkgs:
available_versions = [p.version for p in r.get_pkgs(MatchSpec(pkg))]
available_versions = sorted(available_versions, key=VersionOrder)
latest_version = available_versions[-1]
print(latest_version, installed_vers[pkg])
if VersionOrder(latest_version) > VersionOrder(installed_vers[pkg]):
to_install[pkg] = latest_version
if not to_install:
return
with tmp_directory() as tmp_dir:
repo_name = "nwb-extensions-webservices"
clone_dir = os.path.join(tmp_dir, repo_name)
url = "https://{}@github.com/conda-forge/{}.git".format(
os.environ['GH_TOKEN'], repo_name)
repo = Repo.clone_from(url, clone_dir)
msg_vers = ", ".join(["{}={}".format(k, v) for k, v in to_install.items()])
author = Actor("nwb-extensions-admin", "nwbexten" + "sions" + "@g" + "mail.c" + "om")
repo.index.commit("Empty commit to rebuild for {}".format(msg_vers))
repo.git.push("origin", "master")
|
from random import choice, randint
N = 10000
K = 233333333
CMAX = 1000
L = []
T = [1]
# for i in range(1, N):
# v = i + 1
# f = choice(T)
# L.append("{} {} {}".format(f, v, randint(1, CMAX)))
# T.append(v)
for i in range(1, N):
L.append("{} {} 100".format(i, i + 1))
with open("data.in", "w") as file:
file.write("{} {}\n".format(N, K))
file.write("\n".join(L))
file.write("\n0 0\n")
|
from pyxmpp.clientstream import ClientStream
from pyxmpp.exceptions import ClientStreamError
from pyxmpp.exceptions import FatalStreamError
from dispatcher import StreamDispatcher
import logging
class ClientStreamAsyncore(ClientStream):
def __init__(self, jid, password=None, server=None, port=None,
auth_methods=("sasl:DIGEST-MD5",), tls_settings=None, keepalive=0, owner=None):
ClientStream.__init__(
self, jid=jid, password=password, server=server, port=port, auth_methods=auth_methods,
tls_settings=tls_settings, keepalive=keepalive, owner=owner)
self.__logger = logging.getLogger("iabot.xmpp.ClientStreamAsyncore")
def _write_raw(self, data):
logging.getLogger("pyxmpp.Stream.out").debug("OUT: %r", data)
self.dispatcher.buffer += data
# try:
# self.socket.send(data)
# except (IOError,OSError,socket.error),e:
# raise FatalStreamError("IO Error: "+str(e))
def _connect(self, server=None, port=None):
if not self.my_jid.node or not self.my_jid.resource:
raise ClientStreamError, "Client JID must have username and resource"
if not server:
server = self.server
if not port:
port = self.port
if server:
self.__logger.debug("server: %r", (server,))
service = None
else:
service = "xmpp-client"
if port is None:
port = 5222
if server is None:
server = self.my_jid.domain
self.me = self.my_jid
# Having to deal with a service would be painful, and isn't needed
if service:
raise ClientStreamError, "IABot cannot deal with SRV record lookups"
if self.my_jid.domain is None:
to = str(addr)
else:
to = self.my_jid.domain
self.dispatcher = StreamDispatcher(self, server, port)
self._connect_socket(sock=True, to=to)
# self.initiator=1
# self._send_stream_start()
# We need support for custom from stanzas
def fix_out_stanza(self, stanza):
if not stanza.get_from():
stanza.set_from(self.my_jid)
|
#Author: Ulya Bayram
#email : ulya.bayram@comu.edu.tr
#
# The original versions of these scripts can be found from the StellarGraph website
#------------------------------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------------------------------
#
#These code are writen for a research project, published in OIR. If you use any of them, please cite:
#Ulya Bayram, Runia Roy, Aqil Assalil, Lamia Ben Hiba,
#"The Unknown Knowns: A Graph-Based Approach for Temporal COVID-19 Literature Mining",
#Online Information Review (OIR), COVID-19 Special Issue, 2021.
#
#------------------------------------------------------------------------------------------------------
# Code to split the graph into training and test parts for running node2vec, and other link prediction methods
import pandas as pd
#from codetiming import Timer
import numpy as np
import networkx as nx
import stellargraph as sg
from math import isclose
import os
from stellargraph import StellarGraph, datasets
from stellargraph.data import EdgeSplitter
from collections import Counter
from sklearn.model_selection import train_test_split
def splitSampleGraph():
print('Graph post 2020')
graph = nx.read_gml('../../graphs/graph_postCOVID_final_netx.gml.gz')
for i in range(1, 6):
print('Current run ' + str(i))
# Define an edge splitter on the original graph:
edge_splitter_ = EdgeSplitter(graph)
# Randomly sample a fraction p of the graph (positive links), and same number of negative links, from graph, and obtain the
# reduced graph graph_subset with the sampled links removed:
graph_, sampled_edges, sample_labels = edge_splitter_.train_test_split(p=0.5, method="global")
nx.write_gml(graph_, '../../graphs/graph_sampled_' + str(i) + '.gml.gz')
del graph_
# Now, split the sampled edges into training-test-validation sets for performing link prediction
# Split operation 1 - obtain test versus train+validation
(sampled_comp, sampled_test, labels_comp, labels_test,) = train_test_split(sampled_edges, sample_labels, train_size=0.65, test_size=0.35)
# Split operation 2 - divide the comp block into training and validation sets
(sampled_training, sampled_validation, labels_training, labels_validation,) = train_test_split(sampled_comp, labels_comp, train_size=0.77, test_size=0.23)
# Save the sampled training validation test sets
df_train = pd.DataFrame({'node1': np.array(sampled_training)[:, 0], 'node2': np.array(sampled_training)[:, 1], 'labels': labels_training})
df_train.to_csv('../../graphs/graph_train_edges_sampled_' + str(i) + '.csv')
del df_train
print('Number of training samples (positive) ' + str(len(labels_training)/2.0))
df_val = pd.DataFrame({'node1': np.array(sampled_validation)[:, 0], 'node2': np.array(sampled_validation)[:, 1], 'labels': labels_validation})
df_val.to_csv('../../graphs/graph_val_edges_sampled_' + str(i) + '.csv')
del df_val
print('Number of validation samples (positive) ' + str(len(labels_validation)/2.0))
df_test = pd.DataFrame({'node1': np.array(sampled_test)[:, 0], 'node2': np.array(sampled_test)[:, 1], 'labels': labels_test})
df_test.to_csv('../../graphs/graph_test_edges_sampled_' + str(i) + '.csv')
del df_test
print('Number of test samples (positive) ' + str(len(labels_test)/2.0))
|
import discord
import datetime as dt
from datetime import datetime, timedelta, timezone
import time
from discord.ext import tasks, commands
import random
import os
client = discord.Client()
pretime_dict = {}
memberlist = {}
inmemberlist = {}
JST = timezone(timedelta(hours=+9), 'JST')
token = os.environ['DISCORD_BOT_TOKEN']
# Botログイン処理
@client.event
async def on_ready():
print('起動完了しました!')
print(client.user.name)
print(client.user.id)
print('------')
channel = client.get_channel(682141572317446167)
await channel.send('今から活動開始します!')
await Resetvclist()
await channel.send('再起動に伴い総接続時間をリセットしました')
await Resetinlist()
await channel.send('再起動に伴いIn率をリセットしました')
activity = discord.Game(name='おしごと🍎')
await client.change_presence(activity=activity)
# 指定時間に総接続時間をリセットする処理
async def Resetvclist():
global memberlist
membername = [member.name for member in client.get_all_members() if not member.bot] # Bot以外のユーザー名を辞書のkeyに入れる処理
zero = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # 辞書の値に全員分0を代入
memberlist = dict(zip(membername, zero)) # リストを使用して辞書に格納
channel = client.get_channel(682141572317446167)
await channel.send('総接続時間をリセットしたよ!')
async def Resetinlist():
global inmemberlist
inmembername = [member.name for member in client.get_all_members() if not member.bot] # Bot以外のユーザー名を辞書に入れる処理
inmemberzero = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
inmemberlist = dict(zip(inmembername, inmemberzero)) # In率処理の辞書作成
channel = client.get_channel(682141572317446167)
await channel.send('In率をリセットしたよ!')
# 1週間の総接続時間を出力する処理
async def Sendvclist():
channel = client.get_channel(682141572317446167)
for memberkey, membervalue in memberlist.items():
await channel.send(f'ユーザー名: {memberkey} 通話時間: {membervalue} 秒')
for memberkey60, membervalue60 in memberlist.items():
if membervalue60 >= 3600:
vc60 = {memberkey60}
await channel.send(f'総接続時間が60分以上のユーザー: {vc60}')
elif membervalue60 < 3600:
vc0 = {memberkey60}
await channel.send(f'総接続時間が60分未満のユーザー: {vc0}')
async def Sendinlist():
channel = client.get_channel(682141572317446167)
for inkey, invalue in inmemberlist.items():
await channel.send(f'ユーザー名: {inkey} In率: {invalue} 日')
for inkey4, invalue4 in inmemberlist.items():
if invalue4 >= 4:
in4 = {inkey4}
await channel.send(f'In率が4日以上のユーザー: {in4}')
elif invalue4 < 4:
in0 = {inkey4}
await channel.send(f'In率が4日未満のユーザー: {in0}')
# 1週間のIn率を検知、出力する処理
async def Incheck():
global inmemberlist
channel = client.get_channel(682141572317446167)
for memberkey0, membervalue0 in memberlist.items(): # 1秒以上通話した人の名前を検知
if membervalue0 > 0:
inmemberlist[memberkey0] = inmemberlist[memberkey0] + 1
else:
return
print(inmemberlist)
await channel.send('昨日の0時0分より今までにInした人を記録したよ!')
# 60秒に一回ループさせる処理
@tasks.loop(seconds=60)
async def weekloop():
checktime = datetime.now(JST).strftime('%a-%H:%M')
channel = client.get_channel(682141572317446167)
if checktime == 'Mon-00:00':
await channel.send('月曜日の0時0分になったから総接続時間を出力してデータをクリアするね!')
await Sendvclist()
await Resetvclist()
await Sendinlist()
await Resetinlist()
@tasks.loop(seconds=60)
async def dayloop():
checkday = datetime.now(JST).strftime('%H:%M')
if checkday == '01:00':
channel = client.get_channel(682141572317446167)
await channel.send('前日、Inしたかどうかを検知して記録するね!')
await Incheck()
# ここからボイスチャンネルの入退出を検知する処理
@client.event
async def on_voice_state_update(member, before, after):
global pretime_dict # 入退出時刻を記録する辞書
global memberlist # VCの1週間の記録用の辞書
channel = client.get_channel(682141572317446167)
oneroom = [681867519379767322, 681867557627756613, 681867619246145550, 681867705329909765, 681867763505037321, 681867861937225729, 681867973127962792, 681868176501506079]
if member.guild.id == 681853809789501440 and (before.channel != after.channel): # 特定のサーバーだけ処理が行われるように
if not member.bot: # Botだった場合弾く処理
print('ボイスチャンネルに変化があったよ!')
now = datetime.now(JST)
if before.channel is None: # ここから入室時の処理
if not after.channel.id in oneroom:
pretime_dict[member.name] = time.time()
msg = f'{now:%m/%d-%H:%M} に {member.name} さんが {after.channel.name} に参加したよ!' # 入室時メッセージ
await channel.send(msg)
print(msg)
print(f'入室を検知したため辞書を更新したよ!{pretime_dict}')
# 直接個通部屋に入った時の処理
elif after.channel.id in oneroom:
pretime_dict[member.name] = time.time()
msg = f'{now:%m/%d-%H:%M} に {member.name} さんが 個通部屋 {after.channel.name} に入室したよ!' # 入室メッセージ
await channel.send(msg)
print(msg)
print(f'入室を検知したため辞書を更新したよ!{pretime_dict}')
elif after.channel is None: # 退出時の処理
duration_time = time.time() - pretime_dict[member.name] # 入室時からの経過時間を計算
roundingtime = round((duration_time / 1), 1) # 経過時間の小数点一桁で四捨五入
# ここから原始的な方法でduration_timeを時間、分、秒に変換
endhours = 0
endminutes = 0
endseconds = roundingtime
# 1分以上1時間未満の通話時間の場合の処理
if 3600 > roundingtime >= 60:
endminutes = roundingtime / 60
endseconds = roundingtime % 60
# 1時間以上の通話時間の場合の処理
elif roundingtime >= 3600:
endhours = roundingtime / 3600
interimendminutes = roundingtime % 3600
endminutes = interimendminutes / 60
endseconds = interimendminutes % 60
# 退出時のメッセージ
msg = f'{now:%m/%d-%H:%M} に {member.name} さんが {before.channel.name} から退出したよ! 通話時間は {int(endhours)} 時間 {int(endminutes)} 分 {int(endseconds)} 秒だったよ!'
await channel.send(msg)
print(msg)
print(f'退室を検知したため辞書を更新したよ!{pretime_dict}')
# ここから通話時間を記録していく処理
memberlist[member.name] = memberlist[member.name] + int(roundingtime)
await channel.send('総接続時間を更新したよ!')
# 個通部屋入室を検知
elif after.channel.id in oneroom:
msg = f'{now:%m/%d-%H:%M} に {member.name} さんが 個通部屋 {after.channel.name} に入室したよ!'
await channel.send(msg)
print(msg)
# ここから部屋移動を通知する処理
elif before.channel != after.channel:
msg = f'{now:%m/%d-%H:%M} に {member.name} さんが {before.channel.name} から {after.channel.name} に移動したよ!'
await channel.send(msg)
print(msg)
# ランダムに話題を出すプログラム
wadai = [ # 話題リスト
"修学旅行みたいに恋バナ...とかどう?",
"趣味の話とかしようよ!",
"自己紹介に書いてない自分のこととかあったりしない?",
"フォルダのネタ画像出してみて笑!",
"推しの子とかの話はどうかな?",
"最近欲しいものってなんかあるー?",
"明日の予定教えて!",
"今週末の予定は!?",
"将来の夢の話しよう!",
"お悩み相談室開いて...みる?",
"縛りありのしりとりとかしてみようよ!",
"わざわざ言うほどでもないけど自慢できることある?",
"心理テストとかやってみない?",
"なにかゲームしようよ! そうだなぁ、英語禁止ゲームとかどう?",
"私はゲームがしたいなぁーー 濁音禁止ゲームやりたいな!",
"過去の恋愛の失敗談とか...ある?",
"学生時代にやってた部活とかある?",
"子供のころどんな性格だった?",
"小学生の時どんな遊びが流行ったー?",
"好きだった給食ってなに!?",
"駄菓子何が好きだった?",
"卒業文集何書いた?",
"移動教室で一番テンション上がったとこどこ!?",
"好きな食べ物なに?",
"出身地どこ?",
"出身地の観光スポット教えてよ!",
"方言で喋ってみよ!",
"休日何してるー?",
"今なら笑える失敗談とか話してみる?",
"生で見たことある有名人いる?",
"もし宝くじで一億円当たったらどう使う?",
"もしも、1日だけ性別が変わったら何をしてみたい?",
"飼うなら猫?犬?どっち!",
"ハンバーガーならマクド派?モス派?",
"目玉焼きに何かける?",
"朝ごはん、パン派?ごはん派?",
"学生時代、部活とかやってた?",
"学生の頃の、これはうちの学校だけだろ!っていう校則とかあったw?",
"名前の由来教えてよ!",
"今日の夜ごはん何?",
"最後に付き合ったのいつ?",
]
# ここからコマンド関連の処理
@client.event
async def on_message(message):
global memberlist
global inmemberlist
if client.user != message.author:
# ?helpでembedを表示させる処理
if message.content == '?help':
authorname = 'れんあいのくにの乙女🍎'
authorurl = 'https://github.com/WinterProduce/discordpy-startup/blob/master/discordbot.py'
authoricon = 'https://cdn.discordapp.com/attachments/508795281299603469/684325828112547850/image_-_2.jpg'
embed = discord.Embed(title ='私の使い方だよ!', description = 'コマンドと使い方をお見せするね!', color=0X0000FF)
embed.add_field(name = '?help', value = 'あなたが今見ているこれを表示するよ!', inline=False)
embed.add_field(name = '?wadai', value = 'みんなに話題を提供するよ!', inline = False)
embed.add_field(name = '?count', value = 'サーバーのメンバーカウントを表示するよ!', inline=False)
embed.add_field(name = '?members', value = 'メンバー一覧を表示するよ!', inline = False)
embed.add_field(name = '?vc', value = '全員のおしゃべりした時間を表示するよ!', inline=False)
embed.add_field(name = '?in', value = '全員のIn率を表示するよ!', inline = False)
embed.add_field(name = '?resetvclist', value = '総接続時間をリセットするよ!', inline=False)
embed.add_field(name = '?resetinlist', value = 'In率リストをリセットするよ!', inline = False)
embed.set_thumbnail(url = 'https://cdn.discordapp.com/attachments/508795281299603469/684324816525983775/ERn70g_UUAAUx-1.png')
embed.set_author(name = authorname, url = authorurl, icon_url = authoricon)
await message.channel.send(embed=embed)
# 話題リストにある話題をランダムに出力する処理
if message.content == '?wadai':
choice = random.choice(wadai)
await message.channel.send(choice)
# メンバー、ユーザー、Bot数をそれぞれ出力する処理
if message.content == '?count':
guild = message.guild
# ユーザとBOTを区別しない場合
member_count = guild.member_count
await message.channel.send(f'メンバー数:{member_count}')
# ユーザのみ
user_count = sum(1 for member in guild.members if not member.bot)
await message.channel.send(f'ユーザ数:{user_count}')
# BOTのみ
bot_count = sum(1 for member in guild.members if member.bot)
await message.channel.send(f'BOT数:{bot_count}')
# メンバーの名前をすべて出力する処理
if message.content == '?members':
for memberkey, in memberlist.keys():
await message.channel.send(f'メンバー一覧 : {memberkey}')
# 総接続時間の辞書の値すべてに0を代入する処理
if message.content == '?resetvclist':
if message.author.guild_permissions.administrator: # 管理者しか実行できないようにする
membername = [member.name for member in client.get_all_members() if not member.bot] # Bot以外のユーザー名を辞書のkeyに入れる処理
zero = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # 辞書の値に全員分0を代入
memberlist = dict(zip(membername, zero)) # リストを使用して辞書に格納
await message.channel.send('総接続時間をリセットしたよ!')
else:
await message.channel.send('君の権限だと実行できないよ!')
# In率辞書の値すべてに0を代入する処理
if message.content == '?resetinlist':
if message.author.guild_permissions.administrator:
inmembername = [member.name for member in client.get_all_members() if not member.bot] # Bot以外のユーザー名を辞書に入れる処理
inmemberzero = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
inmemberlist = dict(zip(inmembername, inmemberzero)) # In率処理の辞書作成
await message.channel.send('In率リストをリセットしたよ!')
else:
await message.channel.send('君の権限だと実行できないよ!')
# 全員の総接続時間と60分以上Inしている人を出力
if message.content == '?vc':
channel = client.get_channel(682141572317446167)
for memberkey, membervalue in memberlist.items():
await channel.send(f'ユーザー名: {memberkey} 通話時間: {membervalue} 秒')
for memberkey60, membervalue60 in memberlist.items():
if membervalue60 >= 3600:
vc60 = {memberkey60}
await channel.send(f'総接続時間が60分以上のユーザー: {vc60}')
elif membervalue60 < 3600:
vc0 = {memberkey60}
await channel.send(f'総接続時間が60分未満のユーザー: {vc0}')
# 全員のIn率と4日以上Inしている人を出力
if message.content == '?in':
channel = client.get_channel(682141572317446167)
for inkey, invalue in inmemberlist.items():
await channel.send(f'ユーザー名: {inkey} In率: {invalue} 日')
for inkey4, invalue4 in inmemberlist.items():
if invalue4 >= 4:
in4 = {inkey4}
await channel.send(f'In率が4日以上のユーザー: {in4}')
elif invalue4 < 4:
in0 = {inkey4}
await channel.send(f'In率が4日未満のユーザー: {in0}')
# ループ処理実行
weekloop.start()
dayloop.start()
# Botスタート
client.run(token)
|
"""
Lightgbm algorithm wrapper.
"""
import lightgbm as lgb
def lgb_train_cv(params, df_train, df_val, predictors, target='target',
objective='binary', metrics='auc', feval=None, early_stopping_rounds=20,
num_boost_round=3000, verbose_eval=10, categorical_features=None
):
"""Train an lgb model with cross-validation.
:param params: parameters used to train the lgb model
:type params: dict
:param df_train: input dataframe
:type df_train: pd.DataFrame
:param df_val: cross-validation dataframe
:type df_val: pd.DataFrame
:param predictors: list of predictor columns
:type predictors: list[str]
:param target: name of target column
:type target: str
:param objective: objective to train on.
:type objective: str
:param metrics: evaluation metrics to be monitored while CV
:type metrics: str
:param feval: customized evaluation function
:type feval: callable or None
:param early_stopping_rounds: a value activates early stopping
:type early_stopping_rounds: int or None
:param num_boost_round: number of boosting iterations
:type num_boost_round: int
:param verbose_eval: specifies if the eval metric is printed on each boosting stage.
:type verbose_eval: bool
:param categorical_features: list of categorical columns
:type categorical_features: list[str]
:returns: the boosted model, the estimated ideal number of boosting rounds, and the crossvalidation score.
:rtype: tuple(lightgbm.Booster, int, float)
"""
lgb_params = {
'boosting_type': 'gbdt', 'objective': objective, 'metric':metrics,
'learning_rate': 0.2,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 7, # 4
'verbose': 0,
'metric':metrics
}
lgb_params.update(params) # Overriding default params
print("Preparing validation datasets")
xgtrain = lgb.Dataset(
df_train[predictors].values, label=df_train[target].values,
feature_name=predictors, categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(
df_val[predictors].values, label=df_val[target].values,
feature_name=predictors, categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(
lgb_params, xgtrain, valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10, feval=feval
)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
eval_score = evals_results['valid'][metrics][bst1.best_iteration-1]
print(metrics+":", eval_score)
return (bst1, bst1.best_iteration, eval_score) |
/usr/local/lib/python2.7/_abcoll.py |
# Mapping of GDAL to Numpy data types.
#
# Since 0.13 we are not importing numpy here and data types are strings.
# Happily strings can be used throughout Numpy and so existing code will
# break.
#
# Within Rasterio, to test data types, we use Numpy's dtype() factory to
# do something like this:
#
# if np.dtype(destination.dtype) == np.dtype(rasterio.uint8): ...
#
bool_ = 'bool'
ubyte = uint8 = 'uint8'
uint16 = 'uint16'
int16 = 'int16'
uint32 = 'uint32'
int32 = 'int32'
float32 = 'float32'
float64 = 'float64'
complex_ = 'complex'
complex64 = 'complex64'
complex128 = 'complex128'
# Not supported:
# GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFloat32 = 10, GDT_CFloat64 = 11
dtype_fwd = {
0: None, # GDT_Unknown
1: ubyte, # GDT_Byte
2: uint16, # GDT_UInt16
3: int16, # GDT_Int16
4: uint32, # GDT_UInt32
5: int32, # GDT_Int32
6: float32, # GDT_Float32
7: float64, # GDT_Float64
8: complex_, # GDT_CInt16
9: complex_, # GDT_CInt32
10: complex64, # GDT_CFloat32
11: complex128 } # GDT_CFloat64
dtype_rev = dict((v, k) for k, v in dtype_fwd.items())
dtype_rev['uint8'] = 1
typename_fwd = {
0: 'Unknown',
1: 'Byte',
2: 'UInt16',
3: 'Int16',
4: 'UInt32',
5: 'Int32',
6: 'Float32',
7: 'Float64',
8: 'CInt16',
9: 'CInt32',
10: 'CFloat32',
11: 'CFloat64' }
typename_rev = dict((v, k) for k, v in typename_fwd.items())
def _gdal_typename(dt):
try:
return typename_fwd[dtype_rev[dt]]
except KeyError:
return typename_fwd[dtype_rev[dt().dtype.name]]
def check_dtype(dt):
if dt not in dtype_rev:
try:
return dt().dtype.name in dtype_rev
except:
return False
return True
def get_minimum_int_dtype(values):
"""
Uses range checking to determine the minimum integer data type required
to represent values.
:param values: numpy array
:return: named data type that can be later used to create a numpy dtype
"""
min_value = values.min()
max_value = values.max()
if min_value >= 0:
if max_value <= 255:
return uint8
elif max_value <= 65535:
return uint16
elif max_value <= 4294967295:
return uint32
elif min_value >= -32768 and max_value <= 32767:
return int16
elif min_value >= -2147483648 and max_value <= 2147483647:
return int32
|
import numpy as np
class DataPreProcessing:
pass |
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore, QtGui
class SmallOverlayWidget(QtGui.QWidget):
"""
Simpler spinner widget
"""
MODE_OFF = 0
MODE_SPIN = 1
def __init__(self, parent=None):
"""
Constructor
"""
QtGui.QWidget.__init__(self, parent)
self._bundle = sgtk.platform.current_bundle()
# hook up a listener to the parent window so we
# can resize the overlay at the same time as the parent window
# is being resized.
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter)
# make it transparent
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents)
# turn off the widget
self.setVisible(False)
self._mode = self.MODE_OFF
# setup spinner timer
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self._on_animation)
self._spin_angle = 0
############################################################################################
# public interface
def start_spin(self):
"""
Turn on spinning
"""
self._timer.start(40)
self.setVisible(True)
self._mode = self.MODE_SPIN
def hide(self):
"""
Hide the overlay.
"""
self._timer.stop()
self._mode = self.MODE_OFF
self.setVisible(False)
############################################################################################
# internal methods
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
# resize overlay
self.resize(self.parentWidget().size())
def _on_animation(self):
"""
Spinner async callback to help animate the progress spinner.
"""
self._spin_angle += 2
if self._spin_angle == 90:
self._spin_angle = 0
self.repaint()
def paintEvent(self, event):
"""
Render the UI.
"""
if self._mode == self.MODE_OFF:
return
painter = QtGui.QPainter()
painter.begin(self)
try:
# set up semi transparent backdrop
painter.setRenderHint(QtGui.QPainter.Antialiasing)
overlay_color = QtGui.QColor(30, 30, 30, 100)
painter.setBrush( QtGui.QBrush(overlay_color))
painter.setPen(QtGui.QPen(overlay_color))
painter.drawRect(0, 0, painter.device().width(), painter.device().height())
# show the spinner
painter.translate((painter.device().width() / 2) - 10,
(painter.device().height() / 2) - 10)
pen = QtGui.QPen(QtGui.QColor(self._bundle.style_constants["SG_HIGHLIGHT_COLOR"]))
pen.setWidth(1)
painter.setPen(pen)
r = QtCore.QRectF(0.0, 0.0, 20.0, 20.0)
start_angle = (0 + self._spin_angle) * 4 * 16
span_angle = 340 * 16
painter.drawArc(r, start_angle, span_angle)
finally:
painter.end()
class ResizeEventFilter(QtCore.QObject):
"""
Event filter which emits a resized signal whenever
the monitored widget resizes. This is so that the overlay wrapper
class can be informed whenever the Widget gets a resize event.
"""
resized = QtCore.Signal()
def eventFilter(self, obj, event):
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False
|
#!/usr/bin/env python
from util import *
from regress import *
from loaddata import *
import openopt
from collections import defaultdict
import argparse
halfdays = ['20111125', '20120703', '20121123', '20121224']
breaks = ['20110705', '20120102', '20120705', '20130103']
parser = argparse.ArgumentParser(description='G')
parser.add_argument("--start", action="store", dest="start", default=None)
parser.add_argument("--end", action="store", dest="end", default=None)
parser.add_argument("--fill", action="store", dest='fill', default='mid')
parser.add_argument("--slipbps", action="store", dest='slipbps', default=0.0001)
parser.add_argument("--fcast", action="store", dest='fcast', default=None)
parser.add_argument("--weights", action="store", dest='weights', default=None)
args = parser.parse_args()
participation = 0.015
cols = ['split', 'div', 'close', 'iclose', 'bvwap_b', 'bvolume', 'tradable_med_volume_21_y', 'close_y']
cache_df = load_cache(dateparser.parse(args.start), dateparser.parse(args.end), cols)
cache_df['bvolume_d'] = cache_df['bvolume'].groupby(level='sid').diff()
cache_df.loc[cache_df['bvolume_d'] < 0, 'bvolume_d'] = cache_df['bvolume']
cache_df = push_data(cache_df, 'bvolume_d')
cache_df['max_trade_size'] = cache_df['bvolume_d_n'] * cache_df['iclose'] * participation
cache_df['min_trade_size'] = -1 * cache_df['max_trade_size']
cache_df = push_data(cache_df, 'bvwap_b')
cache_df = push_data(cache_df, 'iclose')
trades_df = None
forecasts = list()
fcasts = args.fcast.split(",")
for pair in fcasts:
fdir, fcast = pair.split(":")
print
"Loading {} {}".format(fdir, fcast)
forecasts.append(fcast)
flist = list()
for ff in sorted(glob.glob("./" + fdir + "/opt/opt." + fcast + ".*.csv")):
m = re.match(r".*opt\." + fcast + "\.(\d{8})_\d{6}.csv", str(ff))
if m is None: continue
d1 = int(m.group(1))
if d1 < int(args.start) or d1 > int(args.end): continue
print
"Loading {}".format(ff)
flist.append(pd.read_csv(ff, parse_dates=True))
fcast_trades_df = pd.concat(flist)
# fcast_trades_df = fcast_trades_df[ fcast_trades_df['sid'] == testid]
fcast_trades_df['iclose_ts'] = pd.to_datetime(fcast_trades_df['iclose_ts'])
fcast_trades_df = fcast_trades_df.set_index(['iclose_ts', 'sid']).sort()
if trades_df is None:
trades_df = fcast_trades_df
trades_df['traded_' + fcast] = trades_df['traded']
trades_df['shares_' + fcast] = trades_df['shares']
else:
trades_df = pd.merge(trades_df, fcast_trades_df, how='outer', left_index=True, right_index=True,
suffixes=['', '_dead'])
trades_df['traded_' + fcast] = trades_df['traded_dead']
trades_df['shares_' + fcast] = trades_df['shares_dead'].unstack().fillna(method='ffill').stack().fillna(0)
# print trades_df['shares_' + fcast].xs(testid, level=1).head(50)
trades_df = remove_dup_cols(trades_df)
trades_df = pd.merge(trades_df.reset_index(), cache_df.reset_index(), how='left', left_on=['iclose_ts', 'sid'],
right_on=['iclose_ts', 'sid'], suffixes=['', '_dead'])
trades_df = remove_dup_cols(trades_df)
trades_df.set_index(['iclose_ts', 'sid'], inplace=True)
cache_df = None
max_dollars = 1e6
max_adv = 0.02
trades_df['max_notional'] = (trades_df['tradable_med_volume_21_y'] * trades_df['close_y'] * max_adv).clip(0,
max_dollars)
trades_df['min_notional'] = (-1 * trades_df['tradable_med_volume_21_y'] * trades_df['close_y'] * max_adv).clip(
-max_dollars, 0)
trades_df['cash'] = 0
# trades_df['cash_last'] = 0
trades_df['traded'] = 0
trades_df['shares'] = 0
trades_df['pnl'] = 0
trades_df['cum_pnl'] = 0
trades_df['day_pnl'] = 0
if args.fill == "vwap":
print
"Filling at vwap..."
trades_df['fillprice'] = trades_df['bvwap_b_n']
print
"Bad count: {}".format(len(trades_df) - len(trades_df[trades_df['fillprice'] > 0]))
trades_df.ix[(trades_df['fillprice'] <= 0) | (trades_df['fillprice'].isnull()), 'fillprice'] = trades_df['iclose']
else:
print
"Filling at mid..."
trades_df['fillprice'] = trades_df['iclose']
trades_df.replace([np.inf, -np.inf], np.nan, inplace=True)
def objective(weights):
ii = 0
for fcast in forecasts:
print
"Weight {}: {}".format(fcast, weights[ii])
ii += 1
day_bucket = {
'not': defaultdict(int),
'pnl': defaultdict(int),
'trd': defaultdict(int),
}
lastgroup_df = None
lastday = None
pnl_last_day_tot = 0
totslip = 0
for ts, group_df in trades_df.groupby(level='iclose_ts'):
dayname = ts.strftime("%Y%m%d")
if int(dayname) > 20121227: continue
monthname = ts.strftime("%Y%m")
weekdayname = ts.weekday()
timename = ts.strftime("%H%M")
if dayname in halfdays and int(timename) > 1245:
continue
if lastgroup_df is not None:
# group_df = pd.merge(group_df.reset_index().set_index('sid'), lastgroup_df.reset_index().set_index('sid'), how='left', left_index=True, right_index=True, suffixes=['', '_last'])
for col in lastgroup_df.columns:
if col == "sid": continue
lastgroup_df[col + "_last"] = lastgroup_df[col]
del lastgroup_df[col]
group_df = pd.concat([group_df.reset_index().set_index('sid'), lastgroup_df.reset_index().set_index('sid')],
join='outer', axis=1, verify_integrity=True)
group_df['iclose_ts'] = ts
group_df.reset_index().set_index(['iclose_ts', 'sid'], inplace=True)
if dayname != lastday and lastday is not None:
group_df['cash_last'] += group_df['shares_last'] * group_df['div'].fillna(0)
group_df['shares_last'] *= group_df['split'].fillna(1)
else:
group_df['shares_last'] = 0
group_df['cash_last'] = 0
ii = 0
for fcast in forecasts:
# print fcast
# print group_df['shares_' + fcast].xs(testid, level=1)
group_df['shares'] += group_df['shares_' + fcast].fillna(0) * weights[ii]
# print group_df['shares'].xs(testid, level=1)
ii += 1
group_df['shares_traded'] = group_df['shares'] - group_df['shares_last'].fillna(0)
# group_df['shares'] = group_df['traded'] / group_df['fillprice']
group_df['dollars_traded'] = group_df['shares_traded'] * group_df['fillprice'] * -1.0
group_df['cash'] = group_df['cash_last'] + group_df['dollars_traded']
# fillslip_tot += (group_df['pdiff_pct'] * group_df['traded']).sum()
# traded_tot += np.abs(group_df['traded']).sum()
# print "Slip2 {} {}".format(fillslip_tot, traded_tot)
markPrice = 'iclose_n'
# if ts.strftime("%H%M") == "1530" or (dayname in halfdays and timename == "1230"):
if ts.strftime("%H%M") == "1545" or (dayname in halfdays and timename == "1245"):
markPrice = 'close'
group_df['slip'] = np.abs(group_df['dollars_traded']).fillna(0) * float(args.slipbps)
totslip += group_df['slip'].sum()
group_df['cash'] = group_df['cash'] - group_df['slip']
group_df['pnl'] = group_df['shares'] * group_df[markPrice] + group_df['cash'].fillna(0)
notional = np.abs(group_df['shares'] * group_df[markPrice]).dropna().sum()
group_df['lsnot'] = group_df['shares'] * group_df[markPrice]
pnl_tot = group_df['pnl'].dropna().sum()
# print group_df[['shares', 'shares_tgt', 'shares_qhl_b', 'cash', 'dollars_traded', 'pnl']]
# if lastgroup_df is not None:
# group_df['pnl_diff'] = (group_df['pnl'] - group_df['pnl_last'])
# print group_df['pnl_diff'].order().dropna().head()
# print group_df['pnl_diff'].order().dropna().tail()
# pnl_incr = pnl_tot - pnl_last_tot
traded = np.abs(group_df['dollars_traded']).fillna(0).sum()
day_bucket['trd'][dayname] += traded
# month_bucket['trd'][monthname] += traded
# dayofweek_bucket['trd'][weekdayname] += traded
# time_bucket['trd'][timename] += traded
# try:
# print group_df.xs(testid, level=1)[['target', 'traded', 'cash', 'shares', 'close', 'iclose', 'shares_last', 'cash_last']]
# except KeyError:
# pass
# print group_df['shares'].describe()
# print group_df[markPrice].describe()
if markPrice == 'close' and notional > 0:
delta = pnl_tot - pnl_last_day_tot
ret = delta / notional
daytraded = day_bucket['trd'][dayname]
notional2 = np.sum(np.abs((group_df['close'] * group_df['position'] / group_df['iclose'])))
print
"{}: {} {} {} {:.4f} {:.2f} {}".format(ts, notional, pnl_tot, delta, ret, daytraded / notional, notional2)
day_bucket['pnl'][dayname] = delta
# month_bucket['pnl'][monthname] += delta
# dayofweek_bucket['pnl'][weekdayname] += delta
day_bucket['not'][dayname] = notional
# day_bucket['long'][dayname] = group_df[ group_df['lsnot'] > 0 ]['lsnot'].dropna().sum()
# day_bucket['short'][dayname] = np.abs(group_df[ group_df['lsnot'] < 0 ]['lsnot'].dropna().sum())
# month_bucket['not'][monthname] += notional
# dayofweek_bucket['not'][weekdayname] += notional
# trades_df.ix[ group_df.index, 'day_pnl'] = group_df['pnl'] - group_df['pnl_last']
pnl_last_day_tot = pnl_tot
# totturnover += daytraded/notional
# short_names += len(group_df[ group_df['traded'] < 0 ])
# long_names += len(group_df[ group_df['traded'] > 0 ])
# cnt += 1
lastgroup_df = group_df.reset_index()[['shares', 'cash', 'pnl', 'sid', 'target']]
nots = pd.DataFrame([[d, v] for d, v in sorted(day_bucket['not'].items())], columns=['date', 'notional'])
nots.set_index(keys=['date'], inplace=True)
pnl_df = pd.DataFrame([[d, v] for d, v in sorted(day_bucket['pnl'].items())], columns=['date', 'pnl'])
pnl_df.set_index(['date'], inplace=True)
rets = pd.merge(pnl_df, nots, left_index=True, right_index=True)
print
"Total Pnl: ${:.0f}K".format(rets['pnl'].sum() / 1000.0)
rets['day_rets'] = rets['pnl'] / rets['notional']
rets['day_rets'].replace([np.inf, -np.inf], np.nan, inplace=True)
rets['day_rets'].fillna(0, inplace=True)
rets['cum_ret'] = (1 + rets['day_rets']).dropna().cumprod()
mean = rets['day_rets'].mean() * 252
std = rets['day_rets'].std() * math.sqrt(252)
sharpe = mean / std
print
"Day mean: {:.4f} std: {:.4f} sharpe: {:.4f} avg Notional: ${:.0f}K".format(mean, std, sharpe,
rets['notional'].mean() / 1000.0)
penalty = 0.05 * np.std(weights)
print
"penalty: {}".format(penalty)
print
return sharpe - penalty
if args.weights is None:
initial_weights = np.ones(len(forecasts)) * .5
else:
initial_weights = np.array([float(x) for x in args.weights.split(",")])
lb = np.ones(len(forecasts)) * 0.0
ub = np.ones(len(forecasts))
plotit = False
p = openopt.NSP(goal='max', f=objective, x0=initial_weights, lb=lb, ub=ub, plot=plotit)
p.ftol = 0.001
p.maxFunEvals = 150
r = p.solve('ralg')
if (r.stopcase == -1 or r.isFeasible == False):
print
objective_detail(target, *g_params)
raise Exception("Optimization failed")
print
r.xf
ii = 0
for fcast in forecasts:
print
"{}: {}".format(fcast, r.xf[ii])
ii += 1
|
import unittest
import magics
#from prog_edu_assistant_tools import magics
class TestCutPrompt(unittest.TestCase):
def testCutPrompt_intact(self):
self.assertEqual(magics.MyMagics.CutPrompt('abc'), 'abc')
self.assertEqual(magics.MyMagics.CutPrompt('\nabc\ncde\n'), '\nabc\ncde\n')
def testCutPrompt_solution_markers(self):
self.assertEqual(magics.MyMagics.CutPrompt('''
aaa
# BEGIN SOLUTION
xxx
# END SOLUTION
bbb
'''), '''
aaa
xxx
bbb
''')
self.assertEqual(magics.MyMagics.CutPrompt('''
aaa
# BEGIN SOLUTION
xxx
# END SOLUTION
bbb
# BEGIN SOLUTION
yyy
# END SOLUTION
ccc
'''), '''
aaa
xxx
bbb
yyy
ccc
''')
def testCutPrompt_prompt(self):
self.assertEqual(magics.MyMagics.CutPrompt('''
aaa
""" # BEGIN PROMPT
xxx
""" # END PROMPT
bbb
""" # BEGIN PROMPT
yyy
""" # END PROMPT
ccc
'''), '''
aaa
bbb
ccc
''')
if __name__ == '__main__':
unittest.main()
|
import json
import time
from base64 import b64encode
from pprint import pprint
from urllib.parse import urljoin
import requests
import telepot
from celery import Celery
from peewee import fn
from retry import retry
from conf import Config
from model import DarkNet_DataSale, DarkNet_IMGS, DarkNet_Notice, DarkNet_User
telepot.api.set_proxy(Config.telegram_proxy)
bot = telepot.Bot(Config.telegram_token)
app = Celery(
'darknet', broker=f'redis://{Config.redis_host}:{Config.redis_port}//')
@app.task()
def telegram(msg, sid,rid):
bot.sendMessage(rid, msg)
query = DarkNet_Notice.update({
'telegram': True
}).where(DarkNet_Notice.sid == sid)
query.execute()
@app.task()
def telegram_withpic(pic,details,sid,rid):
# bot.sendDocument(rid,pic,details) # unpretty~
bot.sendPhoto(rid,pic,details)
query = DarkNet_Notice.update({
'telegram': True
}).where(DarkNet_Notice.sid == sid)
query.execute()
@app.task()
def logreport(msg):
bot.sendMessage(Config.ReportGroupID,msg)
|
# Copyright 2016 - Parsely, Inc. (d/b/a Parse.ly)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''base_bolt.py'''
import copy
from heronpy.api.tuple import TupleHelper
from heronpy.api.component.component_spec import HeronComponentSpec
from heronpy.api.component.base_component import BaseComponent
from heronpy.api.stream import Stream
class BaseBolt(BaseComponent):
"""BaseBolt class
This is the base for heron bolt, which wraps the implementation of publicly available methods.
This includes:
- <classmethod> spec()
- emit()
- <staticmethod> is_tick()
- ack()
- fail()
They are compatible with StreamParse API.
"""
# pylint: disable=no-member
@classmethod
def spec(cls, name=None, inputs=None, par=1, config=None, optional_outputs=None):
"""Register this bolt to the topology and create ``HeronComponentSpec``
This method takes an optional ``outputs`` argument for supporting dynamic output fields
declaration. However, it is recommended that ``outputs`` should be declared as
an attribute of your ``Bolt`` subclass. Also, some ways of declaring inputs is not supported
in this implementation; please read the documentation below.
:type name: str
:param name: Name of this bolt.
:type inputs: dict or list
:param inputs: Streams that feed into this Bolt.
Two forms of this are acceptable:
1. A `dict` mapping from ``HeronComponentSpec`` to ``Grouping``.
In this case, default stream is used.
2. A `dict` mapping from ``GlobalStreamId`` to ``Grouping``.
This ``GlobalStreamId`` object itself is different from StreamParse, because
Heron does not use thrift, although its constructor method is compatible.
3. A `list` of ``HeronComponentSpec``. In this case, default stream with
SHUFFLE grouping is used.
4. A `list` of ``GlobalStreamId``. In this case, SHUFFLE grouping is used.
:type par: int
:param par: Parallelism hint for this spout.
:type config: dict
:param config: Component-specific config settings.
:type optional_outputs: list of (str or Stream) or tuple of (str or Stream)
:param optional_outputs: Additional output fields for this bolt. These fields are added to
existing ``outputs`` class attributes of your bolt. This is an optional
argument, and exists only for supporting dynamic output field
declaration.
"""
python_class_path = "%s.%s" % (cls.__module__, cls.__name__)
if hasattr(cls, 'outputs'):
# avoid modification to cls.outputs
_outputs = copy.copy(cls.outputs)
else:
_outputs = []
if optional_outputs is not None:
assert isinstance(optional_outputs, (list, tuple))
for out in optional_outputs:
assert isinstance(out, (str, Stream))
_outputs.append(out)
return HeronComponentSpec(name, python_class_path, is_spout=False, par=par,
inputs=inputs, outputs=_outputs, config=config)
def emit(self, tup, stream=Stream.DEFAULT_STREAM_ID,
anchors=None, direct_task=None, need_task_ids=False):
"""Emits a new tuple from this Bolt
It is compatible with StreamParse API.
:type tup: list or tuple
:param tup: the new output Tuple to send from this bolt,
which should contain only serializable data.
:type stream: str
:param stream: the ID of the stream to emit this Tuple to.
Leave empty to emit to the default stream.
:type anchors: list
:param anchors: a list of HeronTuples to which the emitted Tuples should be anchored.
:type direct_task: int
:param direct_task: the task to send the Tuple to if performing a direct emit.
:type need_task_ids: bool
:param need_task_ids: indicate whether or not you would like the task IDs the Tuple was emitted.
"""
self.delegate.emit(tup, stream, anchors, direct_task, need_task_ids)
@staticmethod
def is_tick(tup):
"""Returns whether or not a given HeronTuple is a tick Tuple
It is compatible with StreamParse API.
"""
return tup.stream == TupleHelper.TICK_TUPLE_ID
def ack(self, tup):
"""Indicate that processing of a Tuple has succeeded
It is compatible with StreamParse API.
"""
self.delegate.ack(tup)
def fail(self, tup):
"""Indicate that processing of a Tuple has failed
It is compatible with StreamParse API.
"""
self.delegate.fail(tup)
|
"""
public_account
==============
Account public key and address.
License
-------
Copyright 2019 NEM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import typing
from .address import Address
from ..blockchain.network_type import NetworkType
from ... import util
from ...util.signature import ed25519
__all__ = ['PublicAccount']
@util.inherit_doc
@util.dataclass(frozen=True)
class PublicAccount(util.Object):
"""
Describe public account information via public key and account address.
:param address: Address for the account.
:param public_key: Hex-encoded public key (with or without '0x' prefix).
"""
address: Address
public_key: str
CATBUFFER_SIZE: typing.ClassVar[int] = 32 * util.U8_BYTES
def __init__(
self,
address: Address,
public_key: str,
) -> None:
public_key = util.encode_hex(public_key)
if len(public_key) != 64:
raise ValueError("Invalid public key length")
self._set('address', address)
self._set('public_key', public_key)
@property
def network_type(self) -> NetworkType:
"""Get network type."""
return self.address.network_type
@classmethod
def create_from_public_key(
cls,
public_key: typing.AnyStr,
network_type: NetworkType,
):
"""
Create PublicAccount from the public key and network type.
:param public_key: Hex-encoded or raw bytes for public key.
:param network_type: Network type for address.
:return: PublicAccount object.
"""
public_key = util.encode_hex(public_key)
address = Address.create_from_public_key(public_key, network_type)
return cls(address, public_key)
def verify_signature(
self,
data: typing.AnyStr,
signature: typing.AnyStr
) -> bool:
"""
Verify a signature.
:param data: Hex-encoded or raw bytes used to generate signature.
:param public_key: Hex-encoded or raw bytes for signature.
:return: Boolean representing if the signature was verified.
"""
data = util.decode_hex(data, with_prefix=True)
signature = util.decode_hex(signature, with_prefix=True)
if len(signature) != 64:
raise ValueError("Signature length is incorrect.")
public_key = util.unhexlify(self.public_key)
key = ed25519.sha3.VerifyingKey(public_key)
try:
key.verify(signature, data)
return True
except ed25519.sha3.BadSignatureError:
return False
def verify_transaction(
self,
transaction: typing.AnyStr
) -> bool:
"""
Verify signed transaction data.
:param transaction: Hex-encoded or raw bytes for transaction data.
:return: Boolean representing if the transaction signature was verified.
"""
transaction = util.decode_hex(transaction, with_prefix=True)
# Skip first 100 bytes.
# uint32_t size
# uint8_t[64] signature
# uint8_t[32] signer
data = transaction[100:]
signature = transaction[4:68]
return self.verify_signature(data, signature)
PublicAccountList = typing.Sequence[PublicAccount]
|
import os
import sys
from dolfin import MPI, timings, TimingClear, TimingType, File, dump_timings_to_xml
from subprocess import check_output, CalledProcessError
from distutils.util import strtobool
import json
import time
import mpi4py
# Colored printing functions for strings that use universal ANSI escape sequences.
# fail: bold red, pass: bold green, warn: bold yellow,
# info: bold blue, bold: bold white
def get_code_ver():
version = check_output(['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
status = 'clean'
try:
check_output(['git','diff-index', '--quiet', '--exit-code', 'HEAD', '--'])
except CalledProcessError as e:
status = 'dirty'
ColorPrint.print_warn('*** Warning: you are (or I am) working with a dirty repository ')
ColorPrint.print_warn('*** with outstanding uncommitted changes.')
ColorPrint.print_warn('*** Consider the importance of housekeeping.')
ColorPrint.print_warn('*** Clean and commit.')
ColorPrint.print_warn('*** And consider yourself warned.')
return version+'-'+status
def get_petsc_ver():
from petsc4py import __version__ as _ver
return _ver
def get_slepc_ver():
from slepc4py import __version__ as _ver
return _ver
def get_dolfin_ver():
from dolfin import __version__ as dolfin_ver
return dolfin_ver
def get_versions():
versions = {"petsc": get_petsc_ver(),
"slepc": get_slepc_ver(),
"dolfin": get_dolfin_ver(),
"mechanics": get_code_ver()}
return versions
def check_bool(parameter, bool_val = True):
return bool(strtobool(str(parameter))) == bool_val
class ColorPrint:
@staticmethod
def print_fail(message, end="\n"):
if MPI.comm_world.rank == 0:
sys.stderr.write("\x1b[1;31m" + message.strip() + "\x1b[0m" + end)
@staticmethod
def print_pass(message, end="\n"):
if MPI.comm_world.rank == 0:
sys.stdout.write("\x1b[1;32m" + message.strip() + "\x1b[0m" + end)
@staticmethod
def print_warn(message, end="\n"):
if MPI.comm_world.rank == 0:
sys.stderr.write("\x1b[1;33m" + message.strip() + "\x1b[0m" + end)
@staticmethod
def print_info(message, end="\n"):
if MPI.comm_world.rank == 0:
sys.stdout.write("\x1b[1;34m" + message.strip() + "\x1b[0m" + end)
@staticmethod
def print_bold(message, end="\n"):
if MPI.comm_world.rank == 0:
sys.stdout.write("\x1b[1;37m" + message.strip() + "\x1b[0m" + end)
def collect_timings(outdir, tic):
# list_timings(TimingClear.keep, [TimingType.wall, TimingType.system])
# t = timings(TimingClear.keep, [TimingType.wall, TimingType.user, TimingType.system])
t = timings(TimingClear.keep, [TimingType.wall])
# Use different MPI reductions
t_sum = MPI.sum(MPI.comm_world, t)
# t_min = MPI.min(MPI.comm_world, t)
# t_max = MPI.max(MPI.comm_world, t)
t_avg = MPI.avg(MPI.comm_world, t)
# Print aggregate timings to screen
print('\n'+t_sum.str(True))
# print('\n'+t_min.str(True))
# print('\n'+t_max.str(True))
print('\n'+t_avg.str(True))
# Store to XML file on rank 0
if MPI.rank(MPI.comm_world) == 0:
f = File(MPI.comm_self, os.path.join(outdir, "timings_aggregate.xml"))
f << t_sum
# f << t_min
# f << t_max
f << t_avg
dump_timings_to_xml(os.path.join(outdir, "timings_avg_min_max.xml"), TimingClear.clear)
elapsed = time.time() - tic
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
with open(os.path.join(outdir, 'timings.pkl'), 'w') as f:
json.dump({'elapsed': elapsed, 'size': size}, f)
pass
|
# -*- coding: utf-8 -*-
# Copyright 2020 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from unittest.mock import Mock, PropertyMock, call
import pytest
from iconservice.base.address import AddressPrefix, GOVERNANCE_SCORE_ADDRESS, Address, \
SYSTEM_SCORE_ADDRESS
from iconservice.base.block import Block
from iconservice.base.exception import InvalidParamsException, ExceptionCode
from iconservice.base.message import Message
from iconservice.base.transaction import Transaction
from iconservice.base.type_converter import TypeConverter
from iconservice.deploy import engine as isde
from iconservice.deploy.icon_score_deployer import IconScoreDeployer
from iconservice.deploy.storage import IconScoreDeployTXParams, IconScoreDeployInfo, Storage
from iconservice.icon_constant import DeployType, IconScoreContextType, Revision
from iconservice.icon_constant import IconServiceFlag
from iconservice.iconscore.context.context import ContextContainer
from iconservice.iconscore.icon_score_context import IconScoreContext
from iconservice.iconscore.icon_score_context_util import IconScoreContextUtil
from iconservice.iconscore.icon_score_mapper import IconScoreMapper
from iconservice.iconscore.icon_score_step import IconScoreStepCounter, StepType
from iconservice.utils import ContextStorage
from tests import create_address, create_tx_hash, create_block_hash
EOA1 = create_address(AddressPrefix.EOA)
EOA2 = create_address(AddressPrefix.EOA)
SCORE_ADDRESS = create_address(AddressPrefix.CONTRACT)
@pytest.fixture(scope="module")
def mock_engine():
engine = isde.Engine()
return engine
@pytest.fixture(scope="function")
def context():
ctx = IconScoreContext(IconScoreContextType.DIRECT)
ctx.tx = Transaction(tx_hash=create_tx_hash(), origin=EOA1)
ctx.block = Block(block_height=0, block_hash=create_block_hash(), timestamp=0, prev_hash=None)
ctx.msg = Message(sender=EOA1, value=0)
ctx.icon_score_mapper = IconScoreMapper()
ctx.new_icon_score_mapper = {}
ctx.step_counter = IconScoreStepCounter(1, {}, 1000, False)
ctx.event_logs = []
ctx.traces = []
ctx.current_address = EOA1
IconScoreContext.storage = ContextStorage(deploy=Mock(spec=Storage), fee=None, icx=None, iiss=None, prep=None,
issue=None, meta=None, rc=None, inv=None)
ContextContainer._push_context(ctx)
yield ctx
ContextContainer._pop_context()
@pytest.mark.parametrize("data",
[{"contentType": "application/tbears", "content": "path"}, {}])
def test_invoke_install(context, mock_engine, mocker, data):
new_score_address = create_address(1)
mocker.patch.object(isde, "generate_score_address_for_tbears", return_value=new_score_address)
mocker.patch.object(isde, "generate_score_address", return_value=new_score_address)
mocker.patch.object(IconScoreContextUtil, "get_deploy_info", return_value=None)
mocker.patch.object(IconScoreStepCounter, "apply_step")
mocker.patch.object(isde.Engine, "_invoke")
expected_apply_step_args_list = list()
expected_apply_step_args_list.append(call(StepType.CONTRACT_CREATE, 1))
content_size = len(data.get("content", ""))
expected_apply_step_args_list.append(call(StepType.CONTRACT_SET, content_size))
ret = mock_engine.invoke(context, SYSTEM_SCORE_ADDRESS, data)
if data.get("contentType") == "application/tbears":
isde.generate_score_address_for_tbears.assert_called_with("path")
else:
isde.generate_score_address.assert_called_with(context.tx.origin, context.tx.timestamp, context.tx.nonce)
IconScoreContextUtil.get_deploy_info.assert_called_with(context, new_score_address)
apply_step_args_list = IconScoreStepCounter.apply_step.call_args_list
assert expected_apply_step_args_list == apply_step_args_list
mock_engine._invoke.assert_called_with(context=context, to=SYSTEM_SCORE_ADDRESS,
icon_score_address=new_score_address, data=data)
assert ret == new_score_address
mocker.stopall()
def test_invoke_update(context, mock_engine, mocker):
score_address = create_address(1)
data = {}
mocker.patch.object(isde.Engine, "_invoke")
mocker.patch.object(IconScoreStepCounter, "apply_step")
expected_apply_step_args_list = list()
expected_apply_step_args_list.append(call(StepType.CONTRACT_UPDATE, 1))
content_size = len(data.get("content", ""))
expected_apply_step_args_list.append(call(StepType.CONTRACT_SET, content_size))
ret = mock_engine.invoke(context, score_address, data)
assert expected_apply_step_args_list == IconScoreStepCounter.apply_step.call_args_list
mock_engine._invoke.assert_called_with(context=context, to=score_address,
icon_score_address=score_address, data=data)
assert ret == score_address
mocker.stopall()
@pytest.mark.parametrize("to,score_address,expect",
[(SYSTEM_SCORE_ADDRESS, SYSTEM_SCORE_ADDRESS, pytest.raises(AssertionError)),
(SYSTEM_SCORE_ADDRESS, None, pytest.raises(AssertionError))])
def test_invoke_invalid_score_addresses(context, mock_engine, mocker, to, score_address, expect):
"""case when icon_score_address is in (None, ZERO_ADDRESS)"""
mocker.patch.object(isde.Engine, "_is_audit_needed", return_value=True)
mocker.patch.object(isde.Engine, "deploy")
with expect:
mock_engine._invoke(context, to, score_address, {})
context.storage.deploy.put_deploy_info_and_tx_params.assert_not_called()
mock_engine._is_audit_needed.assert_not_called()
mock_engine.deploy.assert_not_called()
mocker.stopall()
@pytest.mark.parametrize("to,score_address,deploy_type",
[(SYSTEM_SCORE_ADDRESS, GOVERNANCE_SCORE_ADDRESS, DeployType.INSTALL),
(GOVERNANCE_SCORE_ADDRESS, GOVERNANCE_SCORE_ADDRESS, DeployType.UPDATE)])
def test_invoke_valid_score_addresses(context, mock_engine, mocker, to, score_address, deploy_type):
"""case when icon_score_address is not in (None, ZERO_ADDRESS)"""
mocker.patch.object(isde.Engine, "_is_audit_needed", return_value=False)
mocker.patch.object(isde.Engine, "deploy")
mock_engine._invoke(context, to, score_address, {})
context.storage.deploy.put_deploy_info_and_tx_params.assert_called_with(context, score_address, deploy_type,
context.tx.origin, context.tx.hash, {})
mock_engine._is_audit_needed.assert_called_with(context, score_address)
mock_engine.deploy.assert_called_with(context, context.tx.hash)
mocker.stopall()
@pytest.mark.parametrize("deploy_data, call_method",
[
({"contentType": "application/tbears", 'content': '0x1234'},
"_write_score_to_score_deploy_path_on_tbears_mode"),
({"contentType": "application/zip", 'content': '0x1234'},
"_write_score_to_score_deploy_path"),
])
def test_write_score_to_filesystem(mock_engine, context, mocker, deploy_data, call_method):
"""tbears mode"""
content = '0x1234'
mocker.patch.object(isde.Engine, call_method)
mock_engine._write_score_to_filesystem(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, deploy_data)
method = getattr(mock_engine, call_method)
method.assert_called_with(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, content)
mocker.stopall()
@pytest.mark.parametrize("get_score_info_return_value", [None, 'dummy'])
def test_create_score_info(mock_engine, context, mocker, get_score_info_return_value):
"""case when current_score_info is None"""
score_info = None if get_score_info_return_value is None else Mock(score_db=get_score_info_return_value)
mocker.patch.object(IconScoreContextUtil, "create_score_info")
mocker.patch.object(IconScoreContextUtil, "get_score_info", return_value=score_info)
mock_engine._create_score_info(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash)
IconScoreContextUtil.create_score_info. \
assert_called_with(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, get_score_info_return_value)
mocker.stopall()
def test_write_score_to_score_deploy_path_on_tbears_mode(mock_engine, context, mocker):
score_deploy_path = 'score_deploy_path'
score_path = 'score_path'
mocker.patch("iconservice.deploy.engine.get_score_deploy_path", return_value=score_deploy_path)
mocker.patch("iconservice.deploy.engine.get_score_path", return_value=score_path)
mocker.patch.object(os, "symlink")
mocker.patch.object(os, "makedirs")
mock_engine._write_score_to_score_deploy_path_on_tbears_mode(context, GOVERNANCE_SCORE_ADDRESS,
context.tx.hash, None)
isde.get_score_path.assert_called_with(context.score_root_path, GOVERNANCE_SCORE_ADDRESS)
os.makedirs.assert_called_with(score_path, exist_ok=True)
os.symlink.assert_called_with(None, score_deploy_path, target_is_directory=True)
mocker.stopall()
def test_on_deploy(mock_engine, context, mocker):
"""Case when deploy_info is not None, zip, revision0, score validator flag False, SCORE is not None"""
mocker.patch.object(IconScoreContextUtil, 'validate_score_package')
mock_score = Mock()
mock_score.owner = EOA1
deploy_params = {"a": 1}
deploy_data = {"params": deploy_params}
deploy_info = Mock(spec=IconScoreDeployInfo)
deploy_type = 'deploy_type'
next_tx_hash = b'\00\01' * 16
deploy_info.configure_mock(next_tx_hash=next_tx_hash)
tx_params = Mock(spec=IconScoreDeployTXParams)
tx_params.configure_mock(score_address=SCORE_ADDRESS, deploy_data=deploy_data, deploy_type=deploy_type,
params=deploy_params)
backup_msg, backup_tx = context.msg, context.tx
mocker.patch.object(mock_engine, "_write_score_to_filesystem")
score_info = Mock()
score_info.configure_mock(get_score=Mock(return_value=mock_score))
mocker.patch.object(mock_engine, "_create_score_info", return_value=score_info)
context.storage.deploy.get_deploy_info = Mock(return_value=deploy_info)
mocker.patch.object(mock_engine, "_initialize_score")
mock_engine._on_deploy(context, tx_params)
context.storage.deploy.get_deploy_info.assert_called_with(context, SCORE_ADDRESS)
mock_engine._write_score_to_filesystem.assert_called_with(context, SCORE_ADDRESS, next_tx_hash, deploy_data)
IconScoreContextUtil.validate_score_package.assert_called_with(context, SCORE_ADDRESS,
next_tx_hash)
mock_engine._create_score_info.assert_called_with(context, SCORE_ADDRESS, next_tx_hash)
score_info.get_score.assert_called_with(context.revision)
mock_engine._initialize_score.assert_called_with(context, deploy_type, mock_score, deploy_params)
assert context.msg == backup_msg
assert context.tx == backup_tx
mocker.stopall()
class TestIsAuditNeeded:
@staticmethod
def set_test(mocker, is_service_flag_one_return_value: bool, get_owner_return_value: Address, revision: int):
mocker.patch.object(IconScoreContextUtil, "is_service_flag_on", return_value=is_service_flag_one_return_value)
mocker.patch.object(IconScoreContextUtil, "get_owner", return_value=get_owner_return_value)
mocker.patch.object(IconScoreContext, "revision", PropertyMock(return_value=revision))
@pytest.mark.parametrize("audit_flag", [True, False])
def test_is_audit_needed_case_revision0(self, mock_engine, context, mocker, audit_flag):
"""case when revision0, owner, audit false"""
self.set_test(mocker, audit_flag, EOA1, 0)
result = mock_engine._is_audit_needed(context, SCORE_ADDRESS)
IconScoreContextUtil.get_owner.assert_called_with(context, SCORE_ADDRESS)
IconScoreContextUtil.is_service_flag_on.assert_called_with(context, IconServiceFlag.AUDIT)
assert result is audit_flag
mocker.stopall()
@pytest.mark.parametrize("owner", [EOA1, EOA2])
@pytest.mark.parametrize("score_address", [SCORE_ADDRESS, GOVERNANCE_SCORE_ADDRESS])
@pytest.mark.parametrize("audit_flag", [True, False])
@pytest.mark.parametrize("revision", [revision.value for revision in Revision if revision.value >= 2])
def test_is_audit_needed_case_revision_gt2(self, mock_engine, context, mocker,
audit_flag, owner, score_address, revision):
"""
case when revision >= 2
tx sender = EOA1
"""
self.set_test(mocker, audit_flag, owner, revision)
is_owner = owner == EOA1
is_system_score = score_address == GOVERNANCE_SCORE_ADDRESS
result = mock_engine._is_audit_needed(context, score_address)
IconScoreContextUtil.get_owner.assert_called_with(context, score_address)
IconScoreContextUtil.is_service_flag_on.assert_called_with(context, IconServiceFlag.AUDIT)
assert result is (audit_flag and not (is_system_score and is_owner))
mocker.stopall()
class TestDeploy:
@staticmethod
def set_test(mocker, get_deploy_tx_param_return_value: Optional[IconScoreDeployTXParams]):
mocker.patch.object(isde.Engine, "_score_deploy")
mocker.patch.object(IconScoreContext.storage.deploy, "update_score_info")
mocker.patch.object(IconScoreContext.storage.deploy, "get_deploy_tx_params",
return_value=get_deploy_tx_param_return_value)
def test_deploy_case_tx_param_none(self, mock_engine, context, mocker):
"""case when tx_param is None"""
self.set_test(mocker, get_deploy_tx_param_return_value=None)
with pytest.raises(InvalidParamsException) as e:
mock_engine.deploy(context, context.tx.hash)
context.storage.deploy.get_deploy_tx_params.assert_called_with(context, context.tx.hash)
assert e.value.code == ExceptionCode.INVALID_PARAMETER
mock_engine._score_deploy.assert_not_called()
context.storage.deploy.update_score_info.assert_not_called()
mocker.stopall()
def test_deploy_case_tx_param_not_none(self, mock_engine, context, mocker):
"""case when tx_param is not None"""
tx_params = Mock(spec=IconScoreDeployTXParams)
tx_params.configure_mock(score_address=GOVERNANCE_SCORE_ADDRESS)
self.set_test(mocker, get_deploy_tx_param_return_value=tx_params)
mock_engine.deploy(context, context.tx.hash)
mock_engine._score_deploy.assert_called_with(context, tx_params)
context.storage.deploy. \
update_score_info.assert_called_with(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash)
mocker.stopall()
class TestScoreDeploy:
content = f"0x{b'content'.hex()}"
@staticmethod
def set_test(mocker, mock_engine):
mocker.patch.object(mock_engine, "_on_deploy")
@pytest.mark.parametrize("tbears_mode", [True, False])
def test_score_deploy_tbears_mode(self, mock_engine, context, mocker, tbears_mode):
context.legacy_tbears_mode = tbears_mode
if tbears_mode:
deploy_data = {'contentType': 'application/tbears', 'content': self.content}
else:
deploy_data = {'contentType': 'application/zip', 'content': self.content}
tx_params = Mock(spec=IconScoreDeployTXParams)
tx_params.configure_mock(deploy_data=deploy_data)
self.set_test(mocker, mock_engine)
mock_engine._score_deploy(context, tx_params)
mock_engine._on_deploy.assert_called_with(context, tx_params)
mocker.stopall()
@pytest.mark.parametrize("content_type", [
"application/tbears",
"wrong/content"
])
def test_score_deploy_invalid_content_type(self, mock_engine, context, mocker, content_type):
context.legacy_tbears_mode = False
tx_params = Mock(spec=IconScoreDeployTXParams)
tx_params.configure_mock(deploy_data={'contentType': content_type, 'content': self.content})
self.set_test(mocker, mock_engine)
with pytest.raises(InvalidParamsException) as e:
mock_engine._score_deploy(context, tx_params)
assert e.value.code == ExceptionCode.INVALID_PARAMETER
assert e.value.message == f"Invalid contentType: {content_type}"
mock_engine._on_deploy.assert_not_called()
mocker.stopall()
class TestWriteScoreToScoreDeployPath:
score_path = "score_path"
score_deploy_path = "score_deploy_path"
@staticmethod
def set_test(mocker, score_path: str, score_deploy_path: str, revision: int):
mocker.patch.object(IconScoreDeployer, "deploy")
mocker.patch.object(IconScoreDeployer, "deploy_legacy")
mocker.patch("iconservice.deploy.engine.remove_path")
mocker.patch("iconservice.deploy.engine.get_score_deploy_path", return_value=score_deploy_path)
mocker.patch.object(os.path, "join", return_value=score_path)
mocker.patch.object(IconScoreContext, "revision", PropertyMock(return_value=revision))
@pytest.mark.parametrize('revision', [revision.value for revision in Revision if revision.value >= 3])
def test_write_score_to_score_deploy_path_revision_ge3(self, mock_engine, context, mocker, revision):
self.set_test(mocker, self.score_path, self.score_deploy_path, revision)
mock_engine._write_score_to_score_deploy_path(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, None)
isde.get_score_deploy_path.assert_called_with(context.score_root_path, GOVERNANCE_SCORE_ADDRESS,
context.tx.hash)
os.path.join.assert_called_with(context.score_root_path, GOVERNANCE_SCORE_ADDRESS.to_bytes().hex(),
f"0x{context.tx.hash.hex()}")
isde.remove_path.assert_called_with(self.score_path)
IconScoreDeployer.deploy.assert_called_with(self.score_deploy_path, None, revision)
mocker.stopall()
def test_write_score_to_score_deploy_path_revision_2(self, mock_engine, context, mocker):
self.set_test(mocker, self.score_path, self.score_deploy_path, 2)
mock_engine._write_score_to_score_deploy_path(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, None)
isde.get_score_deploy_path.assert_called_with(context.score_root_path,
GOVERNANCE_SCORE_ADDRESS, context.tx.hash)
os.path.join.assert_not_called()
isde.remove_path.assert_not_called()
IconScoreDeployer.deploy.assert_called_with(self.score_deploy_path, None, 2)
mocker.stopall()
@pytest.mark.parametrize('revision', [revision.value for revision in Revision if 0 <= revision.value < 2])
def test_write_score_to_score_deploy_path_revision_lt2(self, mock_engine, context, mocker, revision):
"""case when revision < 2"""
self.set_test(mocker, self.score_path, self.score_deploy_path, revision)
mock_engine._write_score_to_score_deploy_path(context, GOVERNANCE_SCORE_ADDRESS, context.tx.hash, None)
isde.get_score_deploy_path.assert_called_with(context.score_root_path,
GOVERNANCE_SCORE_ADDRESS, context.tx.hash)
os.path.join.assert_not_called()
isde.remove_path.assert_not_called()
IconScoreDeployer.deploy.assert_not_called()
IconScoreDeployer.deploy_legacy.assert_called_with(self.score_deploy_path, None)
mocker.stopall()
class TestInitializeScore:
mock_score = Mock()
on_install = None
on_update = None
on_invalid = None
params = {"param1": "0x1", "param2": "string"}
def set_test(self, mocker):
mocker.patch.object(TypeConverter, "adjust_params_to_method")
self.mock_score.on_install = self.on_install = Mock()
self.mock_score.on_update = self.on_update = Mock()
self.mock_score.on_invalid = self.on_invalid = Mock()
@pytest.mark.skip("TypeConverter is replaced with convert_score_parameters()")
def test_initialize_score_on_install(self, mock_engine, mocker):
"""case on_install"""
deploy_type = DeployType.INSTALL
self.set_test(mocker)
mock_engine._initialize_score(deploy_type, self.mock_score, self.params)
TypeConverter.adjust_params_to_method.assert_called_with(self.on_install, self.params)
self.on_install.assert_called_with(**self.params)
self.on_update.assert_not_called()
self.on_invalid.assert_not_called()
mocker.stopall()
@pytest.mark.skip("TypeConverter is replaced with convert_score_parameters()")
def test_initialize_score_case_on_update(self, mock_engine, mocker):
"""case on_update"""
deploy_type = DeployType.UPDATE
self.set_test(mocker)
mock_engine._initialize_score(deploy_type, self.mock_score, self.params)
TypeConverter.adjust_params_to_method.assert_called_with(self.on_update, self.params)
self.on_install.assert_not_called()
self.on_update.assert_called_with(**self.params)
self.on_invalid.assert_not_called()
mocker.stopall()
def test_initialize_score_invalid(self, mock_engine, mocker):
"""case invalid method name"""
deploy_type = 'invalid'
context = Mock(spec=["revision"])
context.revision = 0
self.set_test(mocker)
with pytest.raises(InvalidParamsException) as e:
mock_engine._initialize_score(
context, deploy_type, self.mock_score, self.params)
assert e.value.code == ExceptionCode.INVALID_PARAMETER
assert e.value.message == f"Invalid deployType: {deploy_type}"
TypeConverter.adjust_params_to_method.assert_not_called()
self.on_install.assert_not_called()
self.on_update.assert_not_called()
self.on_invalid.assert_not_called()
mocker.stopall()
|
from distutils.core import setup
setup(
name = 'rest_apscheduler',
packages = ['rest_apscheduler', 'rest_apscheduler/migrations'],
version = '0.1.3',
license='MIT',
description = 'You can use this package only for django and can schedule jobs using any database and maintain record.',
author = 'Ronak Jain',
author_email = 'jronak515@gmail.com',
url = 'https://github.com/Ronakjain515/django-rest-apscheduler.git',
download_url = 'https://github.com/Ronakjain515/django-rest-apscheduler/archive/refs/tags/0.1.2.tar.gz',
keywords = ['django', 'rest', 'restframework', 'apscheduler', 'scheduler'],
install_requires=[
'apscheduler'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import Scripts
# %reload_ext autoreload
# %autoreload 2
import sys
sys.path.insert(0, '../src/utils')
import utils
# Packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
pd.options.display.max_columns = 999
# -
# ### Start connection with db
#
#
con = utils.connect_to_database()
# ### Understanding the items data
# Find the number of items in the database
query = """
select count(*)
from raw.item_solicitado
"""
df = pd.read_sql_query(query, con).head(1)
df.head()
# Number of items procured over the year
query = """
select count(*), cast(extract(year from fecha_publicacion) as int) as year
from raw.item_solicitado
group by extract(year from fecha_publicacion)
order by year;
"""
df = pd.read_sql_query(query, con)
plt.bar('year', 'count', data = df)
# Number of categories in producto_nombre_convocante
query = """
with convocante as
(select distinct producto_nombre_convocante
from raw.item_solicitado)
select count(*) from convocante;
"""
pd.read_sql_query(query, con)
# Number of categories in producto_nombre_catalogo
query = """
with convocante as
(select distinct producto_nombre_catalogo
from raw.item_solicitado)
select count(*) from convocante;
"""
pd.read_sql_query(query, con)
# What are the most frequent items procured by producto_nombre_catalogo
query = """
select distinct producto_nombre_catalogo, count(producto_nombre_catalogo) as freq
from raw.item_solicitado
group by producto_nombre_catalogo
order by freq desc;
"""
df = pd.read_sql_query(query, con)
df[:10]
# Cumulative graph of product categories
df['cumsum'] = np.cumsum(df['freq'])
df['perc'] = df['cumsum']/np.sum(df.freq)
df['index'] = np.arange(len(df))
fig, ax = plt.subplots()
ax.fill_between(df.index, 0, df.perc,)
plt.axvline(x=df['index'].loc[df['perc']>=0.8].iloc[0], color='red')
plt.xlabel('Items ordered by frequency in procurement')
plt.ylabel('Cumulative percentage % of total procurements')
# Number of types of presentation of items
query = """
with presentation as
(select distinct presentacion
from raw.item_solicitado)
select count(*) from presentation;
"""
pd.read_sql_query(query, con)
# Most frequent types of presentation
query = """
select distinct presentacion, count(presentacion) as freq
from raw.item_solicitado
group by presentacion
order by freq desc
limit 10;
"""
pd.read_sql_query(query,con)
# Number of units of measure
query = """
with units as
(select distinct unidad_medida
from raw.item_solicitado)
select count(*) from units;
"""
pd.read_sql_query(query, con)
# Distribution of value of items
query = """
select monto * precio_unitario_estimado as total_value
from raw.item_solicitado;
"""
df = pd.read_sql_query(query, con)
df['total_value'].describe()
# ### Creating path to save data in shared folder
path = utils.path_to_shared('wen', 'data_outpt', 'test', 'csv')
df.to_csv(path)
|
from nipype.interfaces.base import BaseInterface, \
BaseInterfaceInputSpec, traits, File, TraitedSpec
from nipype.utils.filemanip import split_filename
import nibabel as nb
import numpy as np
import os
class RestAverageInputSpec(BaseInterfaceInputSpec):
func = File(exists=True, desc='functional to analyze', mandatory=True)
task = traits.Int(desc='length of task, volumes')
rest = traits.Int(desc='length of rest, volumes')
trim = traits.Int(desc='number of buffer volumes to remove from rest (allowing return to baseline)')
class RestAverageOutputSpec(TraitedSpec):
tsnr = File(exists=True, desc='temporal snr', mandatory=True)
mean = File(exists=True, desc='mean', mandatory=True)
noise = File(exists=True, desc='noise', mandatory=True)
class RestAverage(BaseInterface):
input_spec = RestAverageInputSpec
output_spec = RestAverageOutputSpec
def _run_interface(self, runtime):
# read in data
fname = self.inputs.func
_, base, _ = split_filename(fname)
img = nb.load(fname)
func = np.array(img.get_data()).astype(float)
nvols = func.shape[3]
if self.inputs.rest>0 and self.inputs.task>0:
if not self.inputs.trim:
self.inputs.trim=1
#assume block task
volsrest = np.zeros([1,self.inputs.rest])
volstask = np.ones([1,self.inputs.task])
print(volsrest, self.inputs.trim)
volsrest[:,:self.inputs.trim]=1
print(volsrest)
volsrest[:,-self.inputs.trim:]=1
print(volsrest)
activity = np.tile(np.concatenate((volsrest, volstask),axis=1)[0], int(np.ceil(float(nvols)/len([volsrest, volstask]))))
activity = activity[0:nvols]
activity[0] = 0
activity[-1] = 0
print(activity)
print(np.sum(activity)/len(activity))
else:
#assume resting state
activity=np.zeros([nvols,])
imgmean = np.mean(func[:,:,:,activity!=1], axis=3)
imgstd = np.std(func[:, :, :, activity!= 1], axis = 3)
# save figure
new_img = nb.Nifti1Image(imgmean, img.affine, img.header)
nb.save(new_img, base + '_mean.nii.gz')
new_img = nb.Nifti1Image(imgmean/imgstd, img.affine, img.header)
nb.save(new_img, base + '_tsnr.nii.gz')
new_img = nb.Nifti1Image(imgstd, img.affine, img.header)
nb.save(new_img, base + '_noise.nii.gz')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.func
_, base, _ = split_filename(fname)
outputs["tsnr"] = os.path.abspath(base + '_tsnr.nii.gz')
outputs["mean"] = os.path.abspath(base + '_mean.nii.gz')
outputs["noise"] = os.path.abspath(base + '_noise.nii.gz')
return outputs
|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2005-2021 Al von Ruff, Bill Longley, Uzume, Ahasuerus and Dirk Stoecker
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from SQLparsing import *
from awardClass import *
from common import *
from library import *
from login import *
def displayCommon(title, user):
printRecordID('Title', title[TITLE_PUBID], user.id)
def PrintOneVariantType(variants, title, authors, translation):
print '<td class="variants">'
print '<table class="variantscolumn">'
print '<tr class="table2">'
print '<th>Year</th>'
if translation:
print '<th>Language</th>'
print '<th>Title</th>'
print '</tr>'
bgcolor = 0
for variant in variants:
PrintOneVariant(variant, title, authors, bgcolor, translation)
bgcolor ^= 1
print '</table>'
print '</td>'
def PrintOneVariant(variant, parent, parent_authors, bgcolor, translation):
print '<tr class="table%d">' % (bgcolor+1)
# Display variant year and VT notes in a mouseover bubble
print '<td>'
if variant[TITLE_NOTE]:
bubble_values = []
notes = SQLgetNotes(variant[TITLE_NOTE])
bubble_values.append(FormatNote(notes, '', 'full', 0, '', 0))
print ISFDBMouseover(bubble_values, convertTitleYear(variant), '', INFO_SIGN)
else:
print convertTitleYear(variant)
print '</td>'
# Display translation language
if translation:
print '<td>'
print LANGUAGES[int(variant[TITLE_LANGUAGE])]
print '</td>'
# Display variant title and, optionally, type and author(s)
print '<td>'
output = ISFDBLink('title.cgi', variant[TITLE_PUBID], variant[TITLE_TITLE])
# Display the title type of the variant only if:
# 1. it's different from the title type of the parent
# 2. it is not SERIAL
# 3. it is not a COVERART title reprinted as an INTERIORART title
# 4. it is not an INTERIORART title reprinted as a COVERART title
if ((variant[TITLE_TTYPE] != parent[TITLE_TTYPE])
and (variant[TITLE_TTYPE] != 'SERIAL')
and not ((variant[TITLE_TTYPE] == 'INTERIORART' and parent[TITLE_TTYPE] == 'COVERART'))
and not ((variant[TITLE_TTYPE] == 'COVERART' and parent[TITLE_TTYPE] == 'INTERIORART'))):
output += ' [%s]' % variant[TITLE_TTYPE]
print output
variant_authors = SQLTitleBriefAuthorRecords(variant[TITLE_PUBID])
# Display the variant's authors only if they are different from the parent title's authors
if set(variant_authors) != set(parent_authors):
displayVariantAuthors(variant_authors, '', None)
print '</td>'
print '</tr>'
return
def PrintReviews(reviews, title_language):
# First, create a four level dictionary of review Titles sorted by date, review ID, pub date and pub ID
dict = AutoVivification()
for review in reviews:
review_date = review[TITLE_YEAR]
# Change '0000-00-00' to '9999-99-99' so that they appear last
if review_date == '0000-00-00':
review_date = '9999-99-99'
# If this review's month is 00, display it after the reviews whose month is not 00
if review_date[5:7] == '00':
review_date = review_date[:4] + '-13-' + review_date[8:]
# Retrieve pubs for this review
review_id = review[TITLE_PUBID]
#
# Retrieve all pubs for this review
review_pubs = SQLGetPubsByTitle(review_id)
# Re-sort pubs so that the ones with a '0000-00-00' date appear last
for pub in review_pubs:
pub_date=pub[PUB_YEAR]
if pub_date == '0000-00-00':
pub_date = '9999-99-99'
# If this pub's month is 00, display it after the pubs whose month is not 00
if pub_date[5:7] == '00':
pub_date = pub_date[:4] + '-13-' + pub_date[8:]
dict[review_date][review_id][pub_date][pub[PUB_PUBID]] = [pub, review]
# If no eligible pubs were found, don't print anything
if not dict:
return
print '<div class="ContentBox">'
print '<h3 class="contentheader">Reviews</h3>'
print '<ul class="noindent">'
for review_date in sorted(dict.keys()):
for review_id in dict[review_date]:
# Initialize the counter of publications for this review
pub_counter = 0
for pub_date in sorted(dict[review_date][review_id]):
for pub_id in dict[review_date][review_id][pub_date]:
pub = dict[review_date][review_id][pub_date][pub_id][0]
review = dict[review_date][review_id][pub_date][pub_id][1]
display_review_date = review_date
if display_review_date == '9999-99-99':
display_review_date = '0000-00-00'
display_pub_date = pub_date
if display_pub_date == '9999-99-99':
display_pub_date = '0000-00-00'
#
pub_counter += 1
if pub_counter == 1:
print '<li>%s ' % ISFDBLink('title.cgi', review_id, 'Review')
review_language = review[TITLE_LANGUAGE]
# Only display the review language if both titles' languages are defined and are different
if review_language and title_language and (review_language != title_language):
print ' [%s] ' % (LANGUAGES[int(review_language)])
print ' by '
PrintAllAuthors(review_id)
print ' (%s) ' % convertYear(display_review_date[0:4])
output = ' in %s' % ISFDBLink('pl.cgi', pub[PUB_PUBID], pub[PUB_TITLE])
if display_review_date != display_pub_date:
output += ', (%s)' % convertYear(display_pub_date[0:4])
print output
else:
if pub_counter == 2:
print ', reprinted in:'
output = '<li>    %s' % ISFDBLink('pl.cgi', pub[PUB_PUBID], pub[PUB_TITLE])
output += ', (%s)' % convertYear(display_pub_date[0:4])
print output
print '</ul>'
print '</div>'
return
if __name__ == '__main__':
title_id = SESSION.Parameter(0, 'int')
user = User()
user.load()
# Get the variant display option:
# 0 means display all variants
# 1 means do not display translations, but display same-language variants
# 2 means do not display any variants, either translated or same-language
if user.display_title_translations:
default_variant_display = 0
else:
default_variant_display = 1
variant_display = SESSION.Parameter(1, 'int', default_variant_display, (0, 1, 2))
########################################
# STEP 1 - Get the title record
########################################
title = SQLloadTitle(title_id)
if not title:
if SQLDeletedTitle(title_id):
SESSION.DisplayError('This title has been deleted. See %s for details.' % ISFDBLink('title_history.cgi', title_id, 'Edit History'))
else:
SESSION.DisplayError('Unknown Title Record')
browser_title = "Title: " + title[TITLE_TITLE]
PrintHeader(browser_title)
PrintNavbar('title', title[TITLE_TTYPE], title_id, 'title.cgi', title_id)
SQLupdateTitleViews(title_id)
# Retrieve this title's variants
titles = SQLgetTitleVariants(title_id)
print '<div class="ContentBox">'
# Transliterated title(s)
trans_titles = SQLloadTransTitles(title_id)
if title[TITLE_TTYPE] == 'REVIEW':
reviewed_title = SQLfindReviewedTitle(title_id)
# If this is a VT'd REVIEW and not linked to a regular title, check
# if its parent is linked to a regular title
if not reviewed_title and title[TITLE_PARENT]:
parent_title = SQLloadTitle(title[TITLE_PARENT])
reviewed_title = SQLfindReviewedTitle(parent_title[TITLE_PUBID])
if reviewed_title:
trans_titles_dict = {reviewed_title: trans_titles}
print '<b>Review of:</b> %s' % ISFDBLink('title.cgi', reviewed_title,
title[TITLE_TITLE], False, '', trans_titles_dict)
else:
print "<b>Review of:</b>", ISFDBMouseover(trans_titles, title[TITLE_TITLE], '')
displayCommon(title, user)
authors = SQLReviewBriefAuthorRecords(title_id)
displayPersonLabel('Author', authors)
displayPersons(authors)
elif title[TITLE_TTYPE] == 'INTERVIEW':
print "<b>Interview Title:</b>", ISFDBMouseover(trans_titles, title[TITLE_TITLE], '')
displayCommon(title, user)
authors = SQLInterviewBriefAuthorRecords(title_id)
displayPersonLabel('Interviewee', authors)
displayPersons(authors)
else:
print "<b>Title:</b>", ISFDBMouseover(trans_titles, title[TITLE_TITLE], '')
displayCommon(title, user)
########################################
# STEP 2 - Get the title's authors
########################################
authors = SQLTitleBriefAuthorRecords(title_id)
if title[TITLE_TTYPE] in ('ANTHOLOGY', 'EDITOR'):
displayPersonLabel('Editor', authors)
elif title[TITLE_TTYPE] == 'REVIEW':
displayPersonLabel('Reviewer', authors)
elif title[TITLE_TTYPE] == 'INTERVIEW':
displayPersonLabel('Interviewer', authors)
else:
displayPersonLabel('Author', authors)
displayPersons(authors)
print '<br>'
print '<b>Date:</b> ', convertDate(title[TITLE_YEAR], 1)
if title[TITLE_PARENT]:
parent_title = SQLloadTitle(title[TITLE_PARENT])
if parent_title == []:
print "<br>"
print '<b>Variant Title ERROR:</b> Parent Title=%d' % title[TITLE_PARENT]
else:
print "<br>"
label = 'Variant Title of'
if title[TITLE_TTYPE] == 'COVERART' and parent_title[TITLE_TTYPE] == 'INTERIORART':
label = '%s interior art' % label
if parent_title[TITLE_TTYPE] == 'COVERART' and title[TITLE_TTYPE] == 'INTERIORART':
label = '%s cover art for' % label
print '<b>%s:</b> %s' % (label, ISFDBLink('title.cgi', title[TITLE_PARENT], parent_title[TITLE_TITLE]))
if parent_title[TITLE_LANGUAGE] and title[TITLE_LANGUAGE] != parent_title[TITLE_LANGUAGE]:
print '[%s]' % LANGUAGES[int(parent_title[TITLE_LANGUAGE])]
if title[TITLE_YEAR] != parent_title[TITLE_YEAR]:
print '(%s)' % convertYear(parent_title[TITLE_YEAR][:4])
vauthors = SQLTitleBriefAuthorRecords(parent_title[TITLE_PUBID])
if set(authors) != set(vauthors):
output = ' (by '
counter = 0
for vauthor in vauthors:
if counter:
output += ' <b>and</b> '
output += ISFDBLink('ea.cgi', vauthor[0], vauthor[1])
counter += 1
output += ')'
print output
print ' [may list more publications, awards, reviews, votes and covers]'
if title[TITLE_TTYPE]:
print "<br>"
print "<b>Type:</b>", title[TITLE_TTYPE]
if title[TITLE_JVN] == 'Yes':
print ' [juvenile]'
if title[TITLE_NVZ] == 'Yes':
print ' [novelization]'
if title[TITLE_NON_GENRE] == 'Yes':
print ' [non-genre]'
if title[TITLE_GRAPHIC] == 'Yes':
print ' [graphic format]'
if title[TITLE_STORYLEN]:
print "<br>"
print "<b>Length:</b>"
print title[TITLE_STORYLEN]
if title[TITLE_CONTENT]:
print "<br>"
print "<b>Content:</b>"
print title[TITLE_CONTENT]
if title[TITLE_SERIES]:
series = SQLget1Series(title[TITLE_SERIES])
print '<br>'
print '<b>Series:</b> %s' % ISFDBLink('pe.cgi', series[SERIES_PUBID], series[SERIES_NAME])
if title[TITLE_SERIESNUM] is not None:
print "<br>"
output = '<b>Series Number:</b> %d' % title[TITLE_SERIESNUM]
if title[TITLE_SERIESNUM_2] is not None:
output += '.%s' % title[TITLE_SERIESNUM_2]
print output
elif title[TITLE_PARENT]:
parent_title = SQLloadTitle(title[TITLE_PARENT])
if parent_title == []:
# Already generated an error message above
pass
else:
if parent_title[TITLE_SERIES]:
series = SQLget1Series(parent_title[TITLE_SERIES])
print '<br>'
print '<b>Series:</b> %s' % ISFDBLink('pe.cgi', series[SERIES_PUBID], series[SERIES_NAME])
if parent_title[TITLE_SERIESNUM] is not None:
print "<br>"
output = '<b>Series Number:</b> %d' % parent_title[TITLE_SERIESNUM]
if parent_title[TITLE_SERIESNUM_2] is not None:
output += '.%s' % parent_title[TITLE_SERIESNUM_2]
print output
# Webpages
webpages = SQLloadTitleWebpages(int(title_id))
PrintWebPages(webpages, '<br>')
if title[TITLE_LANGUAGE]:
print '<br><b>Language:</b> %s' % (LANGUAGES[int(title[TITLE_LANGUAGE])])
br_required = 1
if title[TITLE_NOTE]:
note = SQLgetNotes(title[TITLE_NOTE])
print FormatNote(note, "Note", 'short', title_id, 'Title')
br_required = 0
if title[TITLE_SYNOP]:
note = SQLgetNotes(title[TITLE_SYNOP])
print FormatNote(note, "Synopsis", 'short', title_id, 'Synopsis')
br_required = 0
if br_required:
print '<br>'
# Votes
(vote_count, average_vote, composite_vote_count, composite_average_vote, user_vote) = SQLLoadVotes(title[TITLE_PUBID], titles, user.id)
print '<b>User Rating:</b>'
if composite_vote_count:
if vote_count:
if vote_count > 1:
plural = 's'
else:
plural = ''
print ' %2.2f (%d vote%s)' % (average_vote, vote_count, plural)
elif composite_vote_count:
print 'None.'
if composite_vote_count != vote_count:
if composite_vote_count > 1:
plural = 's'
else:
plural = ''
print ' <b>Including variants and translations:</b> %2.2f (%d vote%s)' % (composite_average_vote, composite_vote_count, plural)
print '<b>Your vote:</b>'
if user_vote:
print user_vote
else:
print 'Not cast'
else:
print 'This title has no votes.'
print ISFDBLink('edit/vote.cgi', title[TITLE_PUBID], '<b>VOTE</b>', False, 'class="inverted"')
# Retrieve all tags for this title and its parent (if present)
tags = SQLgetAllTitleTags(title[TITLE_PUBID], title[TITLE_PARENT], int(user.id))
print '<br>'
print '<b>Current Tags:</b>'
if not tags:
print 'None'
else:
first = 1
output = ''
for tag in tags:
if first:
output = '%s (%d)' % (ISFDBLink('tag.cgi', tag[0], tag[1]), tag[2])
first = 0
else:
output += ', %s (%d)' % (ISFDBLink('tag.cgi', tag[0], tag[1]), tag[2])
print output
if SQLisUserModerator(user.id):
print ISFDBLink('mod/tag_breakdown.cgi', title[TITLE_PUBID], 'View Tag Breakdown', False, 'class="inverted" ')
# Only allow adding tags if the current title is not a variant of another one
if not title[TITLE_PARENT]:
if user.id:
my_tags = SQLgetUserTags(title_id, user.id)
print '<br>'
print '<form method="post" action="%s:/%s/edit/addquicktag.cgi" name="quicktag">' % (PROTOCOL, HTFAKE)
# We need a div here because "strict" HTML rules only allow block level elements in forms
print '<div class="quicktag">'
print '<b>Add quick tag:</b> '
print '<select name="tag">'
print '<option value="">select a value</option>'
options = ['alternate history',
'fantasy',
'historical fantasy',
'horror',
'juvenile fantasy',
'juvenile sf',
'military sf',
'near future',
'parallel universe',
'paranormal romance',
'science fiction',
'space opera',
'steampunk',
'time travel',
'urban fantasy',
'vampires',
'werewolf',
'young-adult fantasy',
'young-adult horror',
'young-adult sf',
'zombies']
#Create an all-lowercase version of "my tag" list
my_tags_lower = []
for my_tag in my_tags:
my_tags_lower.append(my_tag.lower())
for option in options:
#Ignore any tags that this user has already used for this title
if option.lower() in my_tags_lower:
continue
print '<option>%s</option>' % (option)
print '</select>'
print '<input NAME="title_id" VALUE="%d" TYPE="HIDDEN">' % int(title_id)
print '<input type="Submit" VALUE="Submit Tag">'
print '%s ' % ISFDBLink('edit/edittags.cgi', title[TITLE_PUBID], '<b>or manage Tags</b>', False, 'class="inverted"')
print '</div>'
print '</form>'
else:
print '%s ' % ISFDBLink('edit/edittags.cgi', title[TITLE_PUBID], '<b>Add Tags</b>', False, 'class="inverted"')
print '</div>'
########################################
# STEP 3 - Get any variants
########################################
if titles:
headers = []
variants = []
translations = []
serials = []
translated_serials = []
coverart = []
translated_coverart = []
interiorart = []
translated_interiorart = []
print '<div class="ContentBox">'
print '<h3 class="contentheader">Other Titles</h3>'
# Split the list of variants into four lists:
# variants
# translations
# serializations
# translated serializations
# Each list is displayed in its own table column
for variant in titles:
if not variant[TITLE_LANGUAGE]:
same_language = 1
elif not title[TITLE_LANGUAGE]:
same_language = 1
elif variant[TITLE_LANGUAGE] == title[TITLE_LANGUAGE]:
same_language = 1
else:
same_language = 0
if variant[TITLE_TTYPE] == 'SERIAL':
if same_language:
serials.append(variant)
else:
translated_serials.append(variant)
elif variant[TITLE_TTYPE] == 'COVERART' and title[TITLE_TTYPE] == 'INTERIORART':
if same_language:
coverart.append(variant)
else:
translated_coverart.append(variant)
elif variant[TITLE_TTYPE] == 'INTERIORART' and title[TITLE_TTYPE] == 'COVERART':
if same_language:
interiorart.append(variant)
else:
translated_interiorart.append(variant)
else :
if same_language:
variants.append(variant)
else:
translations.append(variant)
if variants:
headers.append('Variant Titles')
if translations:
headers.append('Translations')
if serials:
headers.append('Serializations')
if translated_serials:
headers.append('Translated Serializations')
if coverart:
headers.append('As Cover Art')
if translated_coverart:
headers.append('Translated as Cover Art')
if interiorart:
headers.append('As Interior Art')
if translated_interiorart:
headers.append('Translated as Interior Art')
print '<table>'
print '<tr class="table2">'
for header in headers:
print '<th>%s</th>' % header
print '</tr>'
print '<tr>'
if variants:
PrintOneVariantType(variants, title, authors, 0)
if translations:
PrintOneVariantType(translations, title, authors, 1)
if serials:
PrintOneVariantType(serials, title, authors, 0)
if translated_serials:
PrintOneVariantType(translated_serials, title, authors, 1)
if coverart:
PrintOneVariantType(coverart, title, authors, 0)
if translated_coverart:
PrintOneVariantType(translated_coverart, title, authors, 1)
if interiorart:
PrintOneVariantType(interiorart, title, authors, 0)
if translated_interiorart:
PrintOneVariantType(translated_interiorart, title, authors, 1)
print '</tr>'
print '</table>'
print '</div>'
########################################
# STEP 4 - Get the title's award data
########################################
if not user.suppress_awards:
awards_list = SQLTitleAwards(title_id)
if awards_list:
print '<div class="ContentBox">'
print '<h3 class="contentheader">Awards</h3>'
award = awards(db)
award.PrintAwardTable(awards_list, 0)
print '</div>'
########################################################
# STEP 5 - Get the title's pub data and display all pubs
########################################################
print '<div class="ContentBox">'
print '<h3 class="contentheader">Publications</h3>'
retrieval_function = SQLGetPubsByTitle
# If there are variants and/or translations, let the user limit
# the list if displayed pubs in different ways
if titles:
options = {}
options[0] = ('Displaying all variants and translations',
'Display all variants and translations',
SQLGetPubsByTitle)
options[1] = ('Not displaying translations',
'Do not display translations',
SQLGetPubsByTitleNoTranslations)
options[2] = ('Not displaying variants or translations',
'Do not display variants or translations',
SQLGetPubsByTitleNoParent)
output = options[variant_display][0]
retrieval_function = options[variant_display][2]
for option_number in sorted(options.keys()):
if option_number != variant_display:
output += ' %s ' % BULLET
output += ISFDBLink('title.cgi',
'%d+%d' % (int(title_id), option_number),
options[option_number][1],
False,
'',
{})
print output
print '<p>'
pubs = retrieval_function(title_id)
PrintPubsTable(pubs, "title", user)
###################################################################
# STEP 6a - Display cover art of the pubs for COVERART records only
###################################################################
if title[TITLE_TTYPE] == 'COVERART':
title_ids = [title_id]
for one_title in titles:
title_ids.append(one_title[TITLE_PUBID])
for pub in pubs:
# Skip pubs without a cover image
if not pub[PUB_IMAGE]:
continue
# Skip pubs with INTERIORART variants of this cover art title
covers = SQLPubCovers(pub[PUB_PUBID])
eligible = 0
for cover in covers:
cover_id = cover[TITLE_PUBID]
if cover_id in title_ids:
eligible = 1
if eligible:
print ISFDBLink("pl.cgi", pub[PUB_PUBID], '<img src="%s" alt="Coverart" class="scans">' % pub[PUB_IMAGE].split("|")[0])
#####################################################################
# STEP 6b - Display a link to the cover page for non-COVERART records
#####################################################################
elif pubs:
for pub in pubs:
if pub[PUB_IMAGE]:
if user.covers_display:
print ISFDBLink("pl.cgi", pub[PUB_PUBID], '<img src="%s" alt="Coverart" class="scans">' % pub[PUB_IMAGE].split("|")[0])
else:
print ISFDBLink('titlecovers.cgi', title[TITLE_PUBID], '<b>View all covers for %s</b>' % title[TITLE_TITLE])
if user.id:
print ' (or change %s to always display covers on this page)' % ISFDBLink('mypreferences.cgi', '', 'User Preferences')
else:
print ' (logged in users can change User Preferences to always display covers on this page)'
break
print '</div>'
########################################
# STEP 7 - Get the title's reviews
########################################
if user.suppress_reviews:
pass
else:
reviews = SQLloadTitleReviews(title[TITLE_PUBID])
if len(reviews):
PrintReviews(reviews, title[TITLE_LANGUAGE])
###################################################################################
# STEP 8 - Print bibliographic warnings only if this user's Preferences call for it
###################################################################################
if user.id and not user.suppress_bibliographic_warnings and len(pubs):
print '<div class="ContentBox">'
print '<h3 class="contentheader">Bibliographic Warnings</h3>'
print '<ul class="noindent">'
nonefound = 1
for pub in pubs:
# Check to make sure that if the title is a collection, then the
# pub is a collection (unless it is an omnibus)
if title[TITLE_TTYPE] == 'COLLECTION':
if (pub[PUB_CTYPE] != 'COLLECTION') and (pub[PUB_CTYPE] != 'OMNIBUS'):
print '<li> Type Mismatch (Pub=<i>%s</i>, should be <i>%s</i>): ' % (pub[PUB_CTYPE], title[TITLE_TTYPE])
print ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE])
nonefound = 0
# Check to make sure that if the title is an anthology, then the
# pub is an anthology (unless it is an omnibus)
if title[TITLE_TTYPE] == 'ANTHOLOGY':
if (pub[PUB_CTYPE] != 'ANTHOLOGY') and (pub[PUB_CTYPE] != 'OMNIBUS'):
print '<li> Type Mismatch (Pub=<i>%s</i>, should be <i>%s</i>): ' % (pub[PUB_CTYPE], title[TITLE_TTYPE])
print ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE])
nonefound = 0
if pub[PUB_YEAR] == '0000-00-00':
print '<li> Unknown Publication Date:'
print '%s (%s)' % (ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE]), convertDate(pub[PUB_YEAR], 1))
nonefound = 0
year_num = int(pub[PUB_YEAR][0:4])
# If the book has an ISBN or a catalog ID or is an e-book, magazine, fanzine, then we are OK
if (pub[PUB_ISBN]
or pub[PUB_CATALOG]
or pub[PUB_PTYPE] in ('ebook', 'digital audio download')
or pub[PUB_CTYPE] in ('MAGAZINE', 'FANZINE')):
pass
elif year_num > 1950:
# Do not check for catalog ID/ISBN for hardcovers published prior to 1972
if (year_num < 1972) and (pub[PUB_PTYPE] == 'hc'):
pass
else:
print '<li> Missing ISBN/Catalog ID:'
print '%s (%s)' % (ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE]), convertDate(pub[PUB_YEAR], 1))
nonefound = 0
if pub[PUB_PRICE] or pub[PUB_PTYPE] == 'webzine':
pass
else:
print '<li> Missing price:'
print '%s (%s)' % (ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE]), convertDate(pub[PUB_YEAR], 1))
nonefound = 0
if (pub[PUB_PAGES]
or (pub[PUB_PTYPE] in ('ebook', 'webzine'))
or ('audio' in pub[PUB_PTYPE])
or ('digital' in pub[PUB_PTYPE])):
pass
else:
print '<li> Missing page count:'
print '%s (%s)' % (ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE]), convertDate(pub[PUB_YEAR], 1))
nonefound = 0
if pub[PUB_PTYPE]:
if pub[PUB_PTYPE] == 'unknown':
print '<li> Unknown publication format:'
print '%s (%s)' % (ISFDBLink("pl.cgi", pub[PUB_PUBID], pub[PUB_TITLE]), convertDate(pub[PUB_YEAR], 1))
nonefound = 0
if nonefound:
print "<li> None."
print "</ul>"
print '</div>'
PrintTrailer('title', title_id, title_id)
|
##############################################
#### Author: Hunter Gregal ####
##############################################
##To Do
##add geolocate by zip ...Google Maps API?
##Crack user ID hash scheme
import requests
import json
#Coords of Town
lat = "44.4758800"
lon = "-73.2120700"
raw = requests.get("https://us-east-api.yikyakapi.net/api/getMessages?lat="+lat+"&long="+lon+"&userID=92C0B0D7EBFDC55AD108F88F67A47D34")
data = json.loads(raw.content)
for x in data["messages"]:
print "MESSAGE: " + x["message"]
print "COORDINATES: " + str(x["latitude"]) + "," + str(x["longitude"])
print "MAP: " + "http://www.google.com/maps/place/"+ str(x["latitude"]) + "," + str(x["longitude"])
|
import os
import argparse
import numpy as np
from scipy.io import savemat, loadmat
from PIL import Image
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', default='../data/VOCSegmentation2012/VOCdevkit/VOC2012', help='root to the images')
parser.add_argument('--sp_folder', default='300SuperpixelSegmentation', help='sp folder')
parser.add_argument('--save_folder', default='300AdjacencyMatrix', help='Adjacency Matrix')
parser.add_argument('-n', '--n_segments', default=300, help='n_segments')
def form_adj(mat, max_label):
h,w=mat.shape
M=np.pad(mat,pad_width=((1, 1), (1, 1)))
N=np.zeros((max_label+1,max_label+1),dtype=np.bool)
for row in range(1,h+1):
for col in range(1, w+1):
label=M[row,col]
nbrs=M[row-1:row+2,col-1:col+2].flatten()
if label > max_label:
label = max_label
nbrs[nbrs>max_label]=max_label
N[label][nbrs]=True
np.fill_diagonal(N,False)
N[:,0]=False
return N
def main():
args = parser.parse_args()
args.sp_path=os.path.join(args.root_path,args.sp_folder)
args.save_path=os.path.join(args.root_path,args.save_folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
print(args)
max_segment=0
count = 0
file_names = [f[:-4] for f in os.listdir(args.sp_path) if f.endswith('.mat')]
for name in tqdm(file_names):
mat = loadmat(os.path.join(args.sp_path, name+'.mat'))['data']
if max_segment< mat.max():
max_segment = mat.max()
if mat.max()>args.n_segments:
count += 1
adj_mat = form_adj(mat, max_label=args.n_segments)
adj_mat = Image.fromarray(adj_mat, mode='1')
adj_mat.save(os.path.join(args.save_path, name+'.png'))
# mat[mat>args.n_segments] = args.n_segments
# savemat(os.path.join(args.save_path, name+'.mat'), {'data':mat,'n_segments':mat.max()})
print("max_segment: {}, count: {}".format(max_segment, count))
if __name__ == '__main__':
main()
## Test
# image = Image.open(img1_path)
# segments = slic(img_as_float(image), n_segments = 1000, start_label=1)
# # show the output of SLIC
# plt.imshow(mark_boundaries(image, segments))
# plt.axis("off")
# plt.show()
# seg=torch.FloatTensor(segments).unsqueeze(0).unsqueeze(0)
# r=torch.nn.functional.interpolate(seg, size=500, mode='nearest').to(torch.int32)
# plt.imshow(mark_boundaries(image.resize((500,500)), r[0,0].numpy()))
# plt.axis("off")
# plt.show()
## Test
# image = Image.open(img1_path)
# segments = slic(img_as_float(image), n_segments = 1000, start_label=1)
# # show the output of SLIC
# plt.imshow(mark_boundaries(image, segments))
# plt.axis("off")
# plt.show()
# seg=torch.FloatTensor(segments).unsqueeze(0).unsqueeze(0)
# r=torch.nn.functional.interpolate(seg, size=500, mode='nearest').to(torch.int32)
# plt.imshow(mark_boundaries(image.resize((500,500)), r[0,0].numpy()))
# plt.axis("off")
# plt.show() |
from requests_html import HTMLSession
import json
class InstagramGrabber:
def __init__(self, username):
self.username = username
def getLinks(self):
session = HTMLSession()
r = session.get('https://instagram.com/' + self.username)
l = r.html.find('body > script:nth-child(2)')[0].text
json_str = l[21:]
json_str = json_str[:-1]
json_parsed = json.loads(json_str)
shortcodes = []
try:
images = json_parsed['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['edges']
for image in images:
node = image['node']
shortcode = node['shortcode']
shortcodes.append(shortcode)
links = []
for sc in shortcodes:
r = session.get('https://instagram.com/p/' + sc + '/?taken-by=' + self.username)
img = r.html.find('head > meta[property="og:image"]')
if len(img) > 0:
img = img[0]
links.append(img.attrs['content'])
return links
except:
return [] |
# This file is part of COFFEE
#
# COFFEE is Copyright (c) 2014, Imperial College London.
# Please see the AUTHORS file in the main source directory for
# a full list of copyright holders. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Imperial College London or that of other
# contributors may not be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS
# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, print_function, division
from six.moves import zip
from collections import Counter
from itertools import combinations
from operator import itemgetter
from .base import *
from .utils import *
from coffee.visitors import *
from .hoister import Hoister
from .expander import Expander
from .factorizer import Factorizer
from .logger import warn
class ExpressionRewriter(object):
"""Provide operations to re-write an expression:
* Loop-invariant code motion: find and hoist sub-expressions which are
invariant with respect to a loop
* Expansion: transform an expression ``(a + b)*c`` into ``(a*c + b*c)``
* Factorization: transform an expression ``a*b + a*c`` into ``a*(b+c)``"""
def __init__(self, stmt, expr_info, header=None, hoisted=None):
"""Initialize the ExpressionRewriter.
:param stmt: the node whose rvalue is the expression for rewriting
:param expr_info: ``MetaExpr`` object describing the expression
:param header: the kernel's top node
:param hoisted: dictionary that tracks all hoisted expressions
"""
self.stmt = stmt
self.expr_info = expr_info
self.header = header or Root()
self.hoisted = hoisted if hoisted is not None else StmtTracker()
self.codemotion = Hoister(self.stmt, self.expr_info, self.header, self.hoisted)
self.expander = Expander(self.stmt)
self.factorizer = Factorizer(self.stmt)
def licm(self, mode='normal', **kwargs):
"""Perform generalized loop-invariant code motion, a transformation
detailed in a paper available at:
http://dl.acm.org/citation.cfm?id=2687415
:param mode: drive code motion by specifying what subexpressions should
be hoisted and where.
* normal: (default) all subexpressions that depend on one loop at most
* aggressive: all subexpressions, depending on any number of loops.
This may require introducing N-dimensional temporaries.
* incremental: apply, in sequence, only_const, only_outlinear, and
one sweep for each linear dimension
* only_const: only all constant subexpressions
* only_linear: only all subexpressions depending on linear loops
* only_outlinear: only all subexpressions independent of linear loops
* reductions: all sub-expressions that are redundantly computed within
a reduction loop; if possible, pull the reduction loop out of
the nest.
:param kwargs:
* look_ahead: (default: False) should be set to True if only a projection
of the hoistable subexpressions is needed (i.e., hoisting not performed)
* max_sharing: (default: False) should be set to True if hoisting should be
avoided in case the same set of symbols appears in different hoistable
sub-expressions. By not hoisting, factorization opportunities are preserved
* iterative: (default: True) should be set to False if interested in
hoisting only the smallest subexpressions matching /mode/
* lda: an up-to-date loop dependence analysis, as returned by a call
to ``loops_analysis(node, 'dim'). By providing this information, loop
dependence analysis can be avoided, thus speeding up the transformation.
* global_cse: (default: False) search for common sub-expressions across
all previously hoisted terms. Note that no data dependency analysis is
performed, so this is at caller's risk.
* with_promotion: compute hoistable subexpressions within clone loops
even though this doesn't necessarily result in fewer operations.
Examples
========
1) With mode='normal': ::
for i
for j
for k
a[j][k] += (b[i][j] + c[i][j])*(d[i][k] + e[i][k])
Redundancies are spotted along both the i and j dimensions, resulting in: ::
for i
for k
ct1[k] = d[i][k] + e[i][k]
for j
ct2 = b[i][j] + c[i][j]
for k
a[j][k] += ct2*ct1[k]
2) With mode='reductions'.
Consider the following loop nest: ::
for i
for j
a[j] += b[j]*c[i]
By unrolling the loops, one clearly sees that: ::
a[0] += b[0]*c[0] + b[0]*c[1] + b[0]*c[2] + ...
a[1] += b[1]*c[0] + b[1]*c[1] + b[1]*c[2] + ...
Which is identical to: ::
ct = c[0] + c[1] + c[2] + ...
a[0] += b[0]*ct
a[1] += b[1]*ct
Thus, the original loop nest is simplified as: ::
for i
ct += c[i]
for j
a[j] += b[j]*ct
"""
dimension = self.expr_info.dimension
dims = set(self.expr_info.dims)
linear_dims = set(self.expr_info.linear_dims)
out_linear_dims = set(self.expr_info.out_linear_dims)
if kwargs.get('look_ahead'):
hoist = self.codemotion.extract
else:
hoist = self.codemotion.licm
if mode == 'normal':
should_extract = lambda d: d != dims
hoist(should_extract, **kwargs)
elif mode == 'reductions':
should_extract = lambda d: d != dims
# Expansion and reassociation may create hoistable reduction loops
candidates = self.expr_info.reduction_loops
if not candidates:
return self
candidate = candidates[-1]
if candidate.size == 1:
# Otherwise the operation count will just end up increasing
return
self.expand(mode='all')
lda = loops_analysis(self.header, value='dim')
non_candidates = {l.dim for l in candidates[:-1]}
self.reassociate(lambda i: not lda[i].intersection(non_candidates))
hoist(should_extract, with_promotion=True, lda=lda)
self.codemotion.trim(candidate)
elif mode == 'incremental':
lda = kwargs.get('lda') or loops_analysis(self.header, value='dim')
should_extract = lambda d: not (d and d.issubset(dims))
hoist(should_extract, lda=lda)
should_extract = lambda d: d.issubset(out_linear_dims)
hoist(should_extract, lda=lda)
for i in range(1, dimension):
should_extract = lambda d: len(d.intersection(linear_dims)) <= i
hoist(should_extract, lda=lda, **kwargs)
elif mode == 'only_const':
should_extract = lambda d: not (d and d.issubset(dims))
hoist(should_extract, **kwargs)
elif mode == 'only_outlinear':
should_extract = lambda d: d.issubset(out_linear_dims)
hoist(should_extract, **kwargs)
elif mode == 'only_linear':
should_extract = lambda d: not d.issubset(out_linear_dims) and d != linear_dims
hoist(should_extract, **kwargs)
elif mode == 'aggressive':
should_extract = lambda d: True
self.reassociate()
hoist(should_extract, with_promotion=True, **kwargs)
else:
warn('Skipping unknown licm strategy.')
return self
return self
def expand(self, mode='standard', **kwargs):
"""Expand expressions based on different rules. For example: ::
(X[i] + Y[j])*F + ...
can be expanded into: ::
(X[i]*F + Y[j]*F) + ...
The expanded term could also be lifted. For example, if we have: ::
Y[j] = f(...)
(X[i]*Y[j])*F + ...
where ``Y`` was produced by code motion, expansion results in: ::
Y[j] = f(...)*F
(X[i]*Y[j]) + ...
Reasons for expanding expressions include:
* Exposing factorization opportunities
* Exposing higher level operations (e.g., matrix multiplies)
* Relieving register pressure
:param mode: multiple expansion strategies are possible
* mode == 'standard': expand along the loop dimension appearing most
often in different symbols
* mode == 'dimensions': expand along the loop dimensions provided in
/kwargs['dimensions']/
* mode == 'all': expand when symbols depend on at least one of the
expression's dimensions
* mode == 'linear': expand when symbols depend on the expressions's
linear loops.
* mode == 'outlinear': expand when symbols are independent of the
expression's linear loops.
:param kwargs:
* subexprs: an iterator of subexpressions rooted in /self.stmt/. If
provided, expansion will be performed only within these trees,
rather than within the whole expression.
* lda: an up-to-date loop dependence analysis, as returned by a call
to ``loops_analysis(node, 'symbol', 'dim'). By providing this
information, loop dependence analysis can be avoided, thus
speeding up the transformation.
"""
if mode == 'standard':
symbols = Find(Symbol).visit(self.stmt.rvalue)[Symbol]
# The heuristics privileges linear dimensions
dims = self.expr_info.out_linear_dims
if not dims or self.expr_info.dimension >= 2:
dims = self.expr_info.linear_dims
# Get the dimension occurring most often
occurrences = [tuple(r for r in s.rank if r in dims) for s in symbols]
occurrences = [i for i in occurrences if i]
if not occurrences:
return self
# Finally, establish the expansion dimension
dimension = Counter(occurrences).most_common(1)[0][0]
should_expand = lambda n: set(dimension).issubset(set(n.rank))
elif mode == 'dimensions':
dimensions = kwargs.get('dimensions', ())
should_expand = lambda n: set(dimensions).issubset(set(n.rank))
elif mode in ['all', 'linear', 'outlinear']:
lda = kwargs.get('lda') or loops_analysis(self.expr_info.outermost_loop,
key='symbol', value='dim')
if mode == 'all':
should_expand = lambda n: lda.get(n.symbol) and \
any(r in self.expr_info.dims for r in lda[n.symbol])
elif mode == 'linear':
should_expand = lambda n: lda.get(n.symbol) and \
any(r in self.expr_info.linear_dims for r in lda[n.symbol])
elif mode == 'outlinear':
should_expand = lambda n: lda.get(n.symbol) and \
not lda[n.symbol].issubset(set(self.expr_info.linear_dims))
else:
warn('Skipping unknown expansion strategy.')
return
self.expander.expand(should_expand, **kwargs)
return self
def factorize(self, mode='standard', **kwargs):
"""Factorize terms in the expression. For example: ::
A[i]*B[j] + A[i]*C[j]
becomes ::
A[i]*(B[j] + C[j]).
:param mode: multiple factorization strategies are possible. Note that
different strategies may expose different code motion opportunities
* mode == 'standard': factorize symbols along the dimension that appears
most often in the expression.
* mode == 'dimensions': factorize symbols along the loop dimensions provided
in /kwargs['dimensions']/
* mode == 'all': factorize symbols depending on at least one of the
expression's dimensions.
* mode == 'linear': factorize symbols depending on the expression's
linear loops.
* mode == 'outlinear': factorize symbols independent of the expression's
linear loops.
* mode == 'constants': factorize symbols independent of any loops enclosing
the expression.
* mode == 'adhoc': factorize only symbols in /kwargs['adhoc']/ (details below)
* mode == 'heuristic': no global factorization rule is used; rather, within
each Sum tree, factorize the symbols appearing most often in that tree
:param kwargs:
* subexprs: an iterator of subexpressions rooted in /self.stmt/. If
provided, factorization will be performed only within these trees,
rather than within the whole expression
* adhoc: a list of symbols that can be factorized and, for each symbol,
a list of symbols that can be grouped. For example, if we have
``kwargs['adhoc'] = [(A, [B, C]), (D, [E, F, G])]``, and the
expression is ``A*B + D*E + A*C + A*F``, the result will be
``A*(B+C) + A*F + D*E``. If the A's list were empty, all of the
three symbols B, C, and F would be factorized. Recall that this
option is ignored unless ``mode == 'adhoc'``.
* lda: an up-to-date loop dependence analysis, as returned by a call
to ``loops_analysis(node, 'symbol', 'dim'). By providing this
information, loop dependence analysis can be avoided, thus
speeding up the transformation.
"""
if mode == 'standard':
symbols = Find(Symbol).visit(self.stmt.rvalue)[Symbol]
# The heuristics privileges linear dimensions
dims = self.expr_info.out_linear_dims
if not dims or self.expr_info.dimension >= 2:
dims = self.expr_info.linear_dims
# Get the dimension occurring most often
occurrences = [tuple(r for r in s.rank if r in dims) for s in symbols]
occurrences = [i for i in occurrences if i]
if not occurrences:
return self
# Finally, establish the factorization dimension
dimension = Counter(occurrences).most_common(1)[0][0]
should_factorize = lambda n: set(dimension).issubset(set(n.rank))
elif mode == 'dimensions':
dimensions = kwargs.get('dimensions', ())
should_factorize = lambda n: set(dimensions).issubset(set(n.rank))
elif mode == 'adhoc':
adhoc = kwargs.get('adhoc')
if not adhoc:
return self
should_factorize = lambda n: n.urepr in adhoc
elif mode == 'heuristic':
kwargs['heuristic'] = True
should_factorize = lambda n: False
elif mode in ['all', 'linear', 'outlinear', 'constants']:
lda = kwargs.get('lda') or loops_analysis(self.expr_info.outermost_loop,
key='symbol', value='dim')
if mode == 'all':
should_factorize = lambda n: lda.get(n.symbol) and \
any(r in self.expr_info.dims for r in lda[n.symbol])
elif mode == 'linear':
should_factorize = lambda n: lda.get(n.symbol) and \
any(r in self.expr_info.linear_dims for r in lda[n.symbol])
elif mode == 'outlinear':
should_factorize = lambda n: lda.get(n.symbol) and \
not lda[n.symbol].issubset(set(self.expr_info.linear_dims))
elif mode == 'constants':
should_factorize = lambda n: not lda.get(n.symbol)
else:
warn('Skipping unknown factorization strategy.')
return
# Perform the factorization
self.factorizer.factorize(should_factorize, **kwargs)
return self
def reassociate(self, reorder=None):
"""Reorder symbols in associative operations following a convention.
By default, the convention is to order the symbols based on their rank.
For example, the terms in the expression ::
a*b[i]*c[i][j]*d
are reordered as ::
a*d*b[i]*c[i][j]
This as achieved by reorganizing the AST of the expression.
"""
def _reassociate(node, parent):
if isinstance(node, (Symbol, Div)):
return
elif isinstance(node, (Sum, Sub, FunCall, Ternary)):
for n in node.children:
_reassociate(n, node)
elif isinstance(node, Prod):
children = explore_operator(node)
# Reassociate symbols
symbols = [n for n, p in children if isinstance(n, Symbol)]
# Capture the other children and recur on them
other_nodes = [(n, p) for n, p in children if not isinstance(n, Symbol)]
for n, p in other_nodes:
_reassociate(n, p)
# Create the reassociated product and modify the original AST
children = list(zip(*other_nodes))[0] if other_nodes else ()
children += tuple(sorted(symbols, key=reorder))
reassociated_node = ast_make_expr(Prod, children, balance=False)
parent.children[parent.children.index(node)] = reassociated_node
else:
warn('Unexpected node %s while reassociating' % typ(node))
reorder = reorder if reorder else lambda n: (n.rank, n.dim)
_reassociate(self.stmt.rvalue, self.stmt)
return self
def replacediv(self):
"""Replace divisions by a constant with multiplications."""
divisions = Find(Div).visit(self.stmt.rvalue)[Div]
to_replace = {}
for i in divisions:
if isinstance(i.right, Symbol):
if isinstance(i.right.symbol, (int, float)):
to_replace[i] = Prod(i.left, 1 / i.right.symbol)
elif isinstance(i.right.symbol, str) and i.right.symbol.isdigit():
to_replace[i] = Prod(i.left, 1 / float(i.right.symbol))
else:
to_replace[i] = Prod(i.left, Div(1.0, i.right))
ast_replace(self.stmt, to_replace, copy=True, mode='symbol')
return self
def preevaluate(self):
"""Preevaluates subexpressions which values are compile-time constants.
In this process, reduction loops might be removed if the reduction itself
could be pre-evaluated."""
# Aliases
stmt, expr_info = self.stmt, self.expr_info
# Simplify reduction loops
if not isinstance(stmt, (Incr, Decr, IMul, IDiv)):
# Not a reduction expression, give up
return
expr_syms = Find(Symbol).visit(stmt.rvalue)[Symbol]
reduction_loops = expr_info.out_linear_loops_info
if any([not is_perfect_loop(l) for l, p in reduction_loops]):
# Unsafe if not a perfect loop nest
return
# The following check is because it is unsafe to simplify if non-loop or
# non-constant dimensions are present
hoisted_stmts = self.hoisted.all_stmts
hoisted_syms = [Find(Symbol).visit(h)[Symbol] for h in hoisted_stmts]
hoisted_dims = [s.rank for s in flatten(hoisted_syms)]
hoisted_dims = set([r for r in flatten(hoisted_dims) if not is_const_dim(r)])
if any(d not in expr_info.dims for d in hoisted_dims):
# Non-loop dimension or non-constant dimension found, e.g. A[i], with /i/
# not being a loop iteration variable
return
for i, (l, p) in enumerate(reduction_loops):
syms_dep = SymbolDependencies().visit(l, **SymbolDependencies.default_args)
if not all([(tuple(syms_dep[s]) == expr_info.loops and s.dim == len(expr_info.loops))
for s in expr_syms if syms_dep[s]]):
# A sufficient (although not necessary) condition for loop reduction to
# be safe is that all symbols in the expression are either constants or
# tensors assuming a distinct value in each point of the iteration space.
# So if this condition fails, we give up
return
# At this point, tensors can be reduced along the reducible dimensions
reducible_syms = [s for s in expr_syms if not s.is_const]
# All involved symbols must result from hoisting
if not all([s.symbol in self.hoisted for s in reducible_syms]):
return
# Replace hoisted assignments with reductions
finder = Find(Assign, stop_when_found=True, with_parent=True)
for hoisted_loop in self.hoisted.all_loops:
for assign, parent in finder.visit(hoisted_loop)[Assign]:
sym, expr = assign.children
decl = self.hoisted[sym.symbol].decl
if sym.symbol in [s.symbol for s in reducible_syms]:
parent.children[parent.children.index(assign)] = Incr(sym, expr)
sym.rank = self.expr_info.linear_dims
decl.sym.rank = decl.sym.rank[i+1:]
# Remove the reduction loop
p.children[p.children.index(l)] = l.body[0]
# Update symbols' ranks
for s in reducible_syms:
s.rank = self.expr_info.linear_dims
# Update expression metadata
self.expr_info._loops_info.remove((l, p))
# Precompute constant expressions
decls = visit(self.header, info_items=['decls'])['decls']
evaluator = Evaluate(decls, any(d.nonzero for s, d in decls.items()))
for hoisted_loop in self.hoisted.all_loops:
evals = evaluator.visit(hoisted_loop, **Evaluate.default_args)
# First, find out identical tables
mapper = defaultdict(list)
for s, values in evals.items():
mapper[str(values)].append(s)
# Then, map identical tables to a single symbol
for values, symbols in mapper.items():
to_replace = {s: symbols[0] for s in symbols[1:]}
ast_replace(self.stmt, to_replace, copy=True)
# Clean up
for s in symbols[1:]:
s_decl = self.hoisted[s.symbol].decl
self.header.children.remove(s_decl)
self.hoisted.pop(s.symbol)
evals.pop(s)
# Finally, update the hoisted symbols
for s, values in evals.items():
hoisted = self.hoisted[s.symbol]
hoisted.decl.init = values
hoisted.decl.qual = ['static', 'const']
self.hoisted.pop(s.symbol)
# Move all decls at the top of the kernel
self.header.children.remove(hoisted.decl)
self.header.children.insert(0, hoisted.decl)
self.header.children.insert(0, FlatBlock("// Preevaluated tables"))
# Clean up
self.header.children.remove(hoisted_loop)
return self
def sharing_graph_rewrite(self):
"""Rewrite the expression based on its sharing graph. Details in the
paper:
An algorithm for the optimization of finite element integration loops
(Luporini et. al.)
"""
linear_dims = self.expr_info.linear_dims
other_dims = self.expr_info.out_linear_dims
# Maximize visibility of linear symbols
self.expand(mode='all')
# Make sure that potential reductions are not hidden away
lda = loops_analysis(self.header, value='dim')
self.reassociate(lambda i: (not lda[i]) + lda[i].issubset(set(other_dims)))
# Construct the sharing graph
nodes, edges = [], []
for i in summands(self.stmt.rvalue):
symbols = [i] if isinstance(i, Symbol) else list(zip(*explore_operator(i)))[0]
lsymbols = [s for s in symbols if any(d in lda[s] for d in linear_dims)]
lsymbols = [s.urepr for s in lsymbols]
nodes.extend([j for j in lsymbols if j not in nodes])
edges.extend(combinations(lsymbols, r=2))
sgraph = nx.Graph(edges)
# Transform everything outside the sharing graph (pure linear, no ambiguity)
isolated = [n for n in nodes if n not in sgraph.nodes()]
for n in isolated:
self.factorize(mode='adhoc', adhoc={n: [] for n in nodes})
self.licm('only_const').licm('only_outlinear')
# Transform the expression based on the sharing graph
nodes = [n for n in nodes if n in sgraph.nodes()]
if not (nodes and all(sgraph.degree(n) > 0 for n in nodes)):
self.factorize(mode='heuristic')
self.licm('only_const').licm('only_outlinear')
return
# Use short variable names otherwise Pulp might complain
nodes_vars = {i: n for i, n in enumerate(nodes)}
vars_nodes = {n: i for i, n in nodes_vars.items()}
edges = [(vars_nodes[i], vars_nodes[j]) for i, j in edges]
import pulp as ilp
def setup():
# ... declare variables
x = ilp.LpVariable.dicts('x', nodes_vars.keys(), 0, 1, ilp.LpBinary)
y = ilp.LpVariable.dicts('y',
[(i, j) for i, j in edges] + [(j, i) for i, j in edges],
0, 1, ilp.LpBinary)
limits = defaultdict(int)
for i, j in edges:
limits[i] += 1
limits[j] += 1
# ... define the problem
prob = ilp.LpProblem("Factorizer", ilp.LpMinimize)
# ... define the constraints
for i in nodes_vars:
prob += ilp.lpSum(y[(i, j)] for j in nodes_vars if (i, j) in y) <= limits[i]*x[i]
for i, j in edges:
prob += y[(i, j)] + y[(j, i)] == 1
# ... define the objective function (min number of factorizations)
prob += ilp.lpSum(x[i] for i in nodes_vars)
return x, prob
# Solve the ILP problem to find out the minimal-cost factorization strategy
x, prob = setup()
prob.solve(ilp.GLPK(msg=0))
# Also attempt to find another optimal factorization, but with
# additional constraints on the reduction dimensions. This may help in
# later rewrite steps
if len(other_dims) > 1:
z, prob = setup()
for i, n in nodes_vars.items():
if not set(n[1]).intersection(set(other_dims[:-1])):
prob += z[i] == 0
prob.solve(ilp.GLPK(msg=0))
if ilp.LpStatus[prob.status] == 'Optimal':
x = z
# ... finally, apply the transformations. Observe that:
# 1) the order (first /nodes/, than /other_nodes/) in which
# the factorizations are carried out is crucial
# 2) sorting /nodes/ and /other_nodes/ locally ensures guarantees
# deterministic output code
# 3) precedence is given to outer reduction loops; this maximises the
# impact of later transformations, while not affecting this pass
# 4) with_promotion is set to true if there exist potential reductions
# to simplify
nodes = [nodes_vars[n] for n, v in x.items() if v.value() == 1]
other_nodes = [nodes_vars[n] for n, v in x.items() if nodes_vars[n] not in nodes]
for n in sorted(nodes, key=itemgetter(1)) + sorted(other_nodes):
self.factorize(mode='adhoc', adhoc={n: []})
self.licm('incremental', with_promotion=len(other_dims) > 1)
return self
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from contextlib import contextmanager
import peewee
from flask import abort
from peewee import Model
import utils
from services import peewee_mysql
from utils.log import app_logger
class MySQLBaseModel(Model):
class Meta:
database = peewee_mysql
@contextmanager
def sa_session_scope(session, commit=False):
"""Provide a transactional scope around a series of operations
for sqlalchemy."""
try:
yield session
if commit:
session.commit()
except Exception as e:
session.rollback()
app_logger.error(e)
raise
finally:
session.close()
def model2dict(model, pop=[], orm='peewee'):
if not model:
return
data = model.__dict__
if orm == 'sqlalchemy':
data.pop('_sa_instance_state', None)
elif orm == 'peewee':
if peewee.__version__.startswith('3'):
data = model.__data__
elif peewee.__version__.startswith('2'):
data = model._data
for k, v in data.iteritems():
if isinstance(v, datetime.date):
data[k] = utils.datetime2str(v, '%Y-%m-%d')
if isinstance(v, datetime.datetime):
data[k] = utils.datetime2str(v)
for field in pop:
data.pop(field, None)
return data
def get_object_or_404(model, *expressions):
try:
return model.get(*expressions)
except model.DoesNotExist:
abort(404)
|
from flask import Blueprint
tran = Blueprint('tran', __name__)
from . import views
|
# Generated by Django 3.0.14 on 2021-05-19 17:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0102_modelyearreportassessment_modelyearreportassessmentcomment_modelyearreportassessmentdescriptions'),
]
operations = [
migrations.RemoveField(
model_name='modelyearreport',
name='ldv_sales',
),
]
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import OpaqueFunction
from launch_ros.actions import Node
def launch_setup(context, *args, **kwargs):
bt_example_dir = get_package_share_directory('nav2_bt_example')
nav_params_file = os.path.join(bt_example_dir, 'config', 'params.yaml')
lifecycle_nodes = ['bt_navigator']
nav = Node(
package='nav2_bt_navigator',
executable='bt_navigator',
name='bt_navigator',
output='screen',
parameters=[nav_params_file],)
lfc = Node(
package='nav2_lifecycle_manager',
executable='lifecycle_manager',
name='lifecycle_manager_navigation',
output='screen',
parameters=[{'use_sim_time': True},
{'autostart': True},
{'node_names': lifecycle_nodes}])
return [nav, lfc]
def generate_launch_description():
return LaunchDescription(
[
OpaqueFunction(function=launch_setup),
]
)
|
from pngdata.core import encode, decode
|
import pytest
from django.core.files.base import ContentFile
try:
from wagtail.core.models import Page
except ImportError:
from wagtail.wagtailcore.models import Page
from wagtail_svgmap.models import ImageMap
from wagtail_svgmap.tests.utils import EXAMPLE2_SVG_DATA, IDS_IN_EXAMPLE2_SVG, IDS_IN_EXAMPLE_SVG
@pytest.mark.django_db
def test_id_caching(example_svg_upload):
map = ImageMap.objects.create(svg=example_svg_upload)
assert map.ids == IDS_IN_EXAMPLE_SVG
assert map.size == (588, 588)
@pytest.mark.django_db
def test_image_replacing(example_svg_upload):
map = ImageMap.objects.create(svg=example_svg_upload)
assert map.ids == IDS_IN_EXAMPLE_SVG
map.svg.save('example2.svg', ContentFile(EXAMPLE2_SVG_DATA))
map.save()
map.refresh_from_db()
assert map.ids == IDS_IN_EXAMPLE2_SVG
@pytest.mark.django_db
def test_image_replacing_with_region(example_svg_upload):
"""
Test that replacing an image with a new one won't crash if the element IDs change.
Refs https://github.com/City-of-Helsinki/wagtail-svgmap/issues/11 (#11)
"""
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='red', link_external='https://google.com/')
map.svg.save('example2.svg', ContentFile(EXAMPLE2_SVG_DATA))
assert 'https://google.com' not in map.rendered_svg # can't be there as 'red' is not there
@pytest.mark.django_db
def test_rendering(root_page, example_svg_upload, dummy_wagtail_doc):
page = Page(title="nnep", slug="nnep")
page.set_url_path(root_page)
root_page.add_child(instance=page)
page.save()
assert page.url
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='green', link_external='/foobar', target='_blank')
map.regions.create(element_id='blue', link_page=page, target='_top')
map.regions.create(element_id='red', link_document=dummy_wagtail_doc)
svg = map.rendered_svg
assert '/foobar' in svg
assert '_blank' in svg
assert 'nnep' in svg
assert '_top' in svg
assert ('documents/%s' % dummy_wagtail_doc.pk) in svg
@pytest.mark.django_db
def test_auto_recache(root_page, example_svg_upload):
page = Page(title="nnep", slug="nnep")
page.set_url_path(root_page)
root_page.add_child(instance=page)
page.save()
assert page.url
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='blue', link_page=page)
map.recache_svg(save=True)
assert 'nnep' in map.rendered_svg
page.slug = 'ffflop'
page.save() # The `post_save` triggers will get called...
assert 'ffflop' in ImageMap.objects.get(pk=map.pk).rendered_svg
|
from zeus import factories
from zeus.constants import Status
from zeus.tasks import process_artifact
def test_aggregates_upon_completion(mocker, default_job):
manager = mocker.Mock()
artifact = factories.ArtifactFactory(job=default_job, queued=True)
process_artifact(artifact_id=artifact.id, manager=manager)
assert artifact.status == Status.finished
manager.process.assert_called_once_with(artifact)
|
import logging
from django.core.management.base import BaseCommand
from handler.event_bot import event_bot
from events.settings import PROXY
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('token', type=str)
def handle(self, *args, **options):
token = options['token']
event_bot(token, PROXY)
|
"""
Unit tests for the Subject factory
"""
import os
from django.test import TestCase
from richie.apps.courses.factories import SubjectFactory
class SubjectFactoriesTestCase(TestCase):
"""
Unit test suite to validate the behavior of the Subject factory
"""
def test_factories_subject_logo(self):
"""
The SubjectFactory should be able to generate a plugin with a realistic fake logo.
"""
subject = SubjectFactory(fill_logo=True)
# Check that the logo plugin was created as expected
logo = subject.extended_object.placeholders.get(slot="logo")
self.assertEqual(logo.cmsplugin_set.count(), 1)
# The logo plugin should point to one of our fixtures images
logo_plugin = logo.cmsplugin_set.get(plugin_type="PicturePlugin")
self.assertIn(
"logo",
os.path.basename(logo_plugin.djangocms_picture_picture.picture.file.name),
)
def test_factories_subject_banner(self):
"""
The SubjectFactory should be able to generate a plugin with a realistic fake banner.
"""
subject = SubjectFactory(fill_banner=True)
# Check that the logo plugin was created as expected
banner = subject.extended_object.placeholders.get(slot="banner")
self.assertEqual(banner.cmsplugin_set.count(), 1)
# The banner plugin should point to one of our fixtures images
banner_plugin = banner.cmsplugin_set.get(plugin_type="PicturePlugin")
self.assertIn(
"banner",
os.path.basename(banner_plugin.djangocms_picture_picture.picture.file.name),
)
def test_factories_subject_description(self):
"""
The SubjectFactory should be able to generate a plugin with a realistic fake description.
"""
subject = SubjectFactory(fill_description=True)
# Check that the description plugin was created as expected
description = subject.extended_object.placeholders.get(slot="description")
self.assertEqual(description.cmsplugin_set.count(), 1)
# The description plugin should contain paragraphs
description_plugin = description.cmsplugin_set.get(plugin_type="CKEditorPlugin")
self.assertIn("<p>", description_plugin.simple_text_ckeditor_simpletext.body)
|
import asyncio
from scp import user
from scp.utils import parser
from pyrogram.types import (
Message,
)
__PLUGIN__ = 'create'
__DOC__ = str(
user.md.KanTeXDocument(
user.md.Section(
'create',
user.md.SubSection(
'create a super group / channel / bot',
user.md.Code('(*prefix)create {type} {title}'),
),
),
),
)
convLock = asyncio.Lock()
@user.on_message(user.sudo & user.command('create'))
async def create_handler(_, message: Message):
if len(message.command) == 1:
return await message.delete()
arg = message.text.split(None, 1)[1].split(None, 1)
if len(arg) == 1:
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Error',
user.md.Italic('title is not given'),
),
),
quote=True,
)
if arg[0].lower() not in ['group', 'channel', 'bot']:
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Error',
user.md.Italic(
f'{arg[0].lower()}'
'is not in [\'group\', \'channel\', \'bot\']',
),
),
),
quote=True,
)
if arg[0].lower() == 'group':
chat = await user.create_supergroup(title=arg[1])
elif arg[0].lower() == 'channel':
chat = await user.create_channel(title=arg[1])
elif arg[0].lower() == 'bot':
async with convLock:
await user.send_message('BotFather', '/newbot')
await asyncio.sleep(0.5)
await user.send_message('Botfather', arg[1])
await asyncio.sleep(0.5)
botName = arg[1].replace(' ', '_')
ans = await user.ask('Botfather', botName)
if not ans.text.startswith('Done!'):
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Error',
user.md.Italic(
f'@{botName} is taken',
),
),
),
quote=True,
)
validate, token = parser.checkToken(ans.text)
if validate:
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Generated Token',
user.md.KeyValueItem(
user.md.Code(
'@' + botName,
),
user.md.Code(token),
),
user.md.KeyValueItem(
user.md.Bold('Link'),
f't.me/{botName}',
),
),
),
quote=True,
)
link = await user.export_chat_invite_link(chat.id)
await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Create Chat',
user.md.SubSection(
chat.title,
user.md.KeyValueItem(
user.md.Bold(
'id',
), user.md.Code(chat.id),
),
user.md.KeyValueItem(
user.md.Bold(
'type',
), user.md.Code(chat.type),
),
user.md.KeyValueItem(user.md.Bold('link'), link),
),
),
),
)
|
#!/usr/bin/env python3
"""
Generate f32x4 floating-point arithmetic operation cases.
"""
from simd_f32x4_arith import Simdf32x4ArithmeticCase
from simd_float_op import FloatingPointArithOp
class F64ArithOp(FloatingPointArithOp):
maximum = '0x1.fffffffffffffp+1023'
class Simdf64x2ArithmeticCase(Simdf32x4ArithmeticCase):
LANE_LEN = 2
LANE_TYPE = 'f64x2'
floatOp = F64ArithOp()
FLOAT_NUMBERS = (
'0x0p+0', '-0x0p+0', '0x1p-1022', '-0x1p-1022', '0x1p-1', '-0x1p-1', '0x1p+0', '-0x1p+0',
'0x1.921fb54442d18p+2', '-0x1.921fb54442d18p+2', '0x1.fffffffffffffp+1023', '-0x1.fffffffffffffp+1023',
'0x0.0000000000001p-1022', '0x0.0000000000001p-1022', 'inf', '-inf'
)
LITERAL_NUMBERS = ('0123456789', '0123456789e019', '0123456789e+019', '0123456789e-019',
'0123456789.', '0123456789.e019', '0123456789.e+019', '0123456789.e-019',
'0123456789.0123456789', '0123456789.0123456789e019',
'0123456789.0123456789e+019', '0123456789.0123456789e-019',
'0x0123456789ABCDEFabcdef', '0x0123456789ABCDEFabcdefp019',
'0x0123456789ABCDEFabcdefp+019', '0x0123456789ABCDEFabcdefp-019',
'0x0123456789ABCDEFabcdef.', '0x0123456789ABCDEFabcdef.p019',
'0x0123456789ABCDEFabcdef.p+019', '0x0123456789ABCDEFabcdef.p-019',
'0x0123456789ABCDEFabcdef.0123456789ABCDEFabcdef',
'0x0123456789ABCDEFabcdef.0123456789ABCDEFabcdefp019',
'0x0123456789ABCDEFabcdef.0123456789ABCDEFabcdefp+019',
'0x0123456789ABCDEFabcdef.0123456789ABCDEFabcdefp-019'
)
NAN_NUMBERS = ('nan', '-nan', 'nan:0x4000000000000', '-nan:0x4000000000000')
@staticmethod
def v128_const(lane, value):
return '(v128.const {lane_type} {value})'.format(lane_type=lane, value=' '.join([str(value)] * 2))
@property
def combine_ternary_arith_test_data(self):
return {
'add-sub': [
['1.125'] * 2, ['0.25'] * 2, ['0.125'] * 2, ['1.0'] * 2
],
'sub-add': [
['1.125'] * 2, ['0.25'] * 2, ['0.125'] * 2, ['1.25'] * 2
],
'mul-add': [
['1.25'] * 2, ['0.25'] * 2, ['0.25'] * 2, ['0.375'] * 2
],
'mul-sub': [
['1.125'] * 2, ['0.125'] * 2, ['0.25'] * 2, ['0.25'] * 2
],
'div-add': [
['1.125'] * 2, ['0.125'] * 2, ['0.25'] * 2, ['5.0'] * 2
],
'div-sub': [
['1.125'] * 2, ['0.125'] * 2, ['0.25'] * 2, ['4.0'] * 2
],
'mul-div': [
['1.125'] * 2, ['0.125'] * 2, ['0.25'] * 2, ['2.25'] * 2
],
'div-mul': [
['1.125'] * 2, ['4'] * 2, ['0.25'] * 2, ['18.0'] * 2
]
}
@property
def combine_binary_arith_test_data(self):
return {
'add-neg': [
['1.125'] * 2, ['0.125'] * 2, ['-1.0'] * 2
],
'sub-neg': [
['1.125'] * 2, ['0.125'] * 2, ['-1.25'] * 2
],
'mul-neg': [
['1.5'] * 2, ['0.25'] * 2, ['-0.375'] * 2
],
'div-neg': [
['1.5'] * 2, ['0.25'] * 2, ['-6'] * 2
],
'add-sqrt': [
['2.25'] * 2, ['0.25'] * 2, ['1.75'] * 2
],
'sub-sqrt': [
['2.25'] * 2, ['0.25'] * 2, ['1.25'] * 2
],
'mul-sqrt': [
['2.25'] * 2, ['0.25'] * 2, ['0.375'] * 2
],
'div-sqrt': [
['2.25'] * 2, ['0.25'] * 2, ['6'] * 2
]
}
def get_invalid_cases(self):
return super().get_invalid_cases().replace('32', '64')
@property
def mixed_sqrt_nan_test_data(self):
return {
'neg_canon': [
('nan', '1.0'), ('nan:canonical', '-1.0'),
],
'sqrt_canon': [
('4.0', '-nan'), ('2.0', 'nan:canonical'),
],
'add_arith': [
('nan:0x8000000000000', '1.0'), ('nan', '1.0'),
('nan:arithmetic', '2.0'),
],
'sub_arith': [
('1.0', '-1.0'), ('-nan', '1.0'), ('nan', '-2.0'),
],
'mul_mixed': [
('nan:0x8000000000000', '1.0'), ('2.0', 'nan'),
('nan:arithmetic', 'nan:canonical')
],
'div_mixed': [
('nan', '1.0'), ('2.0', '-nan:0x8000000000000'),
('nan:canonical', 'nan:arithmetic')
]
}
def mixed_nan_test(self, cases):
"""Mixed f64x2 tests when only expects NaNs in a subset of lanes."""
mixed_cases = [
'\n;; Mixed f64x2 tests when some lanes are NaNs', '(module']
for test_type, test_data in sorted(self.mixed_sqrt_nan_test_data.items()):
op = test_type.split('_')[0]
if op in self.UNARY_OPS:
mixed_cases.extend([
' (func (export "{lane}_{t}") (result v128)'.format(lane=self.LANE_TYPE, t=test_type),
' ({lane}.{op} (v128.const {lane} {param})))'.format(
lane=self.LANE_TYPE, op=op, param=' '.join(test_data[0]))])
if op in self.BINARY_OPS:
mixed_cases.extend([
' (func (export "{lane}_{t}") (result v128)'.format(lane=self.LANE_TYPE, t=test_type),
' ({lane}.{op} (v128.const {lane} {param1}) (v128.const {lane} {param2})))'.format(
lane=self.LANE_TYPE, op=op,
param1=' '.join(test_data[0]),
param2=' '.join(test_data[1]))])
mixed_cases.append(')\n')
for test_type, test_data in sorted(self.mixed_sqrt_nan_test_data.items()):
mixed_cases.append('(assert_return (invoke "{lane}_{t}") (v128.const {lane} {result}))'.format(
lane=self.LANE_TYPE, t=test_type, result=' '.join(test_data[-1])
))
cases.extend(mixed_cases)
def gen_test_cases():
simd_f64x2_arith = Simdf64x2ArithmeticCase()
simd_f64x2_arith.gen_test_cases()
if __name__ == '__main__':
gen_test_cases() |
class Metric(object):
'''
Interface class for the various metrics.
'''
def calculate(self, node):
'''
Calculate the metric for the given node. A metric may be a number,
a string or any other measurable value about the code.
'''
raise NotImplementedError('{}.calculate'.format(type(self).__name__))
def get_metric_name(self):
'''
Return a string containing the full name of the metric.
'''
raise NotImplementedError('{}.get_metric_name'.format(type(self).__name__))
|
"""
ccextractor-web | TestUpload.py
Author : Saurabh Shrivastava
Email : saurabh.shrivastava54+ccextractorweb[at]gmail.com
Link : https://github.com/saurabhshri
"""
import unittest
from run import app, createConfig
class TestUpload(unittest.TestCase):
def setUp(self):
createConfig()
def test_if_without_login_redirected_to_login_page(self):
response = app.test_client().get('/dashboard')
self.assertEqual(response.status_code, 302)
self.assertIn(b'<a href="/login?next=mod_dashboard.dashboard">/login?next=mod_dashboard.dashboard</a>', response.data)
|
import argparse
import sys
from typing import Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.client.api.bulk_insert import BulkInsertFailedError
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.scripts.csv_script import CsvScript
from exabel_data_sdk.util.resource_name_normalization import normalize_resource_name
class LoadEntitiesFromCsv(CsvScript):
"""
Processes a CSV file with entities and creates them in the Exabel API.
The CSV file should have a header line specifying the column names.
The command line argument --name-column specifies the column from which to read
the entity names. The entity names are automatically normalized to create a valid
resource name for the entity.
For instance, if the entity type is "brand", and the namespace is "acme", and the entity name
is "Spring & Vine", the generated resource name will be:
entityTypes/brand/entities/acme.Spring_Vine
Optionally, another column may specify a display name for the entity, and another column
may give a description for the entity.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--entity-type",
required=False,
type=str,
help="The type of the entities to be loaded. Must already exist in the data model. "
"If not specified, defaults to the same value as the name_column argument.",
)
self.parser.add_argument(
"--name-column",
required=False,
type=str,
help="The column name for the entity name. "
"If not specified, defaults to the first column in the file.",
)
self.parser.add_argument(
"--display-name-column",
required=False,
type=str,
help="The column name for the entity's display name. "
"If not specified, uses the entity name",
)
self.parser.add_argument(
"--description-column",
required=False,
type=str,
help="The column name for the entity description. "
"If not specified, no description is provided.",
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
if args.dry_run:
print("Running dry-run...")
print("Loading entities from", args.filename)
name_col_ref = args.name_column or 0
string_columns = {
name_col_ref,
args.display_name_column or name_col_ref,
}
if args.description_column:
string_columns.add(args.description_column)
entities_df = self.read_csv(args, string_columns=string_columns)
name_col = args.name_column or entities_df.columns[0]
display_name_col = args.display_name_column or name_col
description_col = args.description_column
entity_type_name = f"entityTypes/{args.entity_type or name_col}"
entity_type = client.entity_api.get_entity_type(entity_type_name)
if not entity_type:
print("Failure: Did not find entity type", entity_type_name)
print("Available entity types are:")
print(client.entity_api.list_entity_types())
sys.exit(1)
entities = [
Entity(
name=f"{entity_type_name}/entities/{args.namespace}."
f"{normalize_resource_name(row[name_col])}",
display_name=row[display_name_col],
description=row[description_col] if description_col else "",
)
for _, row in entities_df.iterrows()
]
if args.dry_run:
print("Loading", len(entities), "entities")
print(entities)
return
try:
client.entity_api.bulk_create_entities(entities, entity_type_name, threads=args.threads)
except BulkInsertFailedError:
# An error summary has already been printed.
pass
if __name__ == "__main__":
LoadEntitiesFromCsv(sys.argv, "Upload entities file.").run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 3/22/18 4:23 PM
# @Author : Yuecheng Jing
# @Site : www.nanosparrow.com
# @File : ValidNumber.py
# @Software: PyCharm
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
status_start_space = 1
status_start_number_without_dot = 2
status_start_number_with_dot = 3
status_dot_after_number = 4
status_dot_before_number = 5
status_e = 6
status_end_number_with_dot = 7
status_end_number_with_e = 8
status_end_space = 9
status_symbol_before_e = 10
status_symbol_after_e = 11
status_not_number = 12
# input_space input_number input_dot input_e input_symbol
transtionMatrix = [
status_start_space, status_start_number_without_dot, status_dot_before_number, status_not_number, status_symbol_before_e, # status = 0
status_start_space, status_start_number_without_dot, status_dot_before_number, status_not_number, status_symbol_before_e, # status_start_space
status_end_space, status_start_number_without_dot, status_dot_after_number, status_e, status_not_number, # status_start_number_without_dot
status_end_space, status_start_number_with_dot, status_not_number, status_e, status_not_number, # status_start_number_with_dot
status_end_space, status_end_number_with_dot, status_not_number, status_e, status_not_number, # status_dot_after_number
status_not_number, status_start_number_with_dot, status_not_number, status_not_number, status_not_number, # status_dot_before_number
status_not_number, status_end_number_with_e, status_not_number, status_not_number, status_symbol_after_e, # status_e
status_end_space, status_end_number_with_dot, status_not_number, status_e, status_not_number, # status_end_number_with_dot
status_end_space, status_end_number_with_e, status_not_number, status_not_number, status_not_number, # status_end_number_with_e
status_end_space, status_not_number, status_not_number, status_not_number, status_not_number, # status_end_space
status_not_number, status_start_number_without_dot, status_dot_before_number, status_not_number, status_not_number, # status_symbol_before_e
status_not_number, status_end_number_with_e, status_not_number, status_not_number, status_not_number, # status_symbol_after_e
]
current_status = 0
for c in s:
input_type = self.inputType(c)
if input_type == 5: # input_other
return False
current_status = transtionMatrix[current_status * 5 + input_type]
if current_status == status_not_number:
return False
if current_status == status_start_number_without_dot or \
current_status == status_start_number_with_dot or \
current_status == status_end_number_with_dot or \
current_status == status_end_number_with_e or \
current_status == status_end_space or \
current_status == status_dot_after_number:
return True
return False
def inputType(self, char):
input_space = 0
input_number = 1
input_dot = 2
input_e = 3
input_symbol = 4
input_other = 5
if char == ' ':
return input_space
elif char == '.':
return input_dot
elif char == 'e':
return input_e
elif '0' <= char <= '9':
return input_number
elif char == '-' or char == '+':
return input_symbol
else:
return input_other
|
from output.models.nist_data.list_pkg.duration.schema_instance.nistschema_sv_iv_list_duration_min_length_2_xsd.nistschema_sv_iv_list_duration_min_length_2 import NistschemaSvIvListDurationMinLength2
__all__ = [
"NistschemaSvIvListDurationMinLength2",
]
|
from __future__ import absolute_import
from .gp import GP
from .gp_classifier import GPClassifier
__all__ = ["GP", "GPClassifier"]
|
''' Testes do formatador '''
import unittest
from service.qry_options_builder import QueryOptionsBuilder
class OptionsBuilderTest(unittest.TestCase):
''' Classe que testa a construção de options a partir dos parâmetros do request '''
def test_no_categories(self):
''' Verifica se o parâmetro obrigatório de categorias está presente '''
self.assertRaises(ValueError, QueryOptionsBuilder.build_options, {})
def test_full_args(self):
''' Verifica se os parâmetros são criados corretamente '''
r_args = {
"categorias": 'a,b',
"valor": 'c,d',
"agregacao": 'e,f',
"ordenacao": 'g,h',
"filtros": r'eq-o-comma\,separated,and,eq-p-q',
"pivot": 'i,j',
"limit": '10',
"offset": '11',
"calcs": 'k,l',
"partition": 'm,n',
"theme": 't'
}
opts = QueryOptionsBuilder.build_options(r_args)
self.assertEqual(
opts,
{
"categorias": ['a', 'b'],
"valor": ['c', 'd'],
"agregacao": ['e', 'f'],
"ordenacao": ['g', 'h'],
"where": ['eq-o-comma,separated', 'and', 'eq-p-q'],
"pivot": ['i', 'j'],
"limit": '10',
"offset": '11',
"calcs": ['k', 'l'],
"partition": ['m', 'n'],
"theme": 't'
}
)
def test_main_theme(self):
''' Verifica se os parâmetros são criados corretamente '''
r_args = {
"categorias": 'a,b',
"valor": 'c,d',
"agregacao": 'e,f',
"ordenacao": 'g,h',
"where": r'eq-o-comma\,separated,and,eq-p-q',
"pivot": 'i,j',
"limit": '10',
"offset": '11',
"calcs": 'k,l',
"partition": 'm,n'
}
opts = QueryOptionsBuilder.build_options(r_args)
self.assertEqual(opts.get('theme'), None)
def test_chart_args(self):
''' Verifica se os parâmetros de gráficos são criados corretamente '''
r_args = {
"categorias": 'a,b',
"valor": 'c,d',
"agregacao": 'e,f',
"ordenacao": 'g,h',
"filtros": r'eq-o-comma\,separated,and,eq-p-q',
"pivot": 'i,j',
"limit": '10',
"offset": '11',
"calcs": 'k,l',
"partition": 'm,n',
"theme": 't',
"as_image": 'N',
"from_viewconf": 'S'
}
opts = QueryOptionsBuilder.build_options(r_args)
self.assertEqual(
opts,
{
"categorias": ['a', 'b'],
"valor": ['c', 'd'],
"agregacao": ['e', 'f'],
"ordenacao": ['g', 'h'],
"where": ['eq-o-comma,separated', 'and', 'eq-p-q'],
"pivot": ['i', 'j'],
"limit": '10',
"offset": '11',
"calcs": ['k', 'l'],
"partition": ['m', 'n'],
"theme": 't',
"as_image": False,
"from_viewconf": True
}
)
|
import json
import datetime
import requests
import pymysql
import pymongo
def insert_category(conn):
"""将商品的种类插入数据库 """
# 商品种类的 id 和对应的名称
categories_dict = {
66: "手机",
327: "腕表配饰",
65: "电脑办公",
67: "相机单反",
217: "平板数码",
179: "运动户外",
255: "家电家居",
1000: "其他",
}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = "insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)"
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
result = cursor.execute(sql, (category_id, category_name, create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = "https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}"
res = requests.get(brand_url.format(category_id=category_id))
# 所有的brand字典组成的列表
brands = json.loads(res.content.decode("utf-8")).get("brand_list")
brand_list += brands
except:
print("出错了:category_id:", category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']
sql = "insert into goods_brand values (%s, %s, %s, %s, %s, %s)"
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get("brand_id"))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
brand_name = brand.get("brand_name")
brand_name_ch = brand.get("brand_name_ch") if brand.get("brand_name_ch") else brand_name
brand_name_en = brand.get("brand_name_en") if brand.get("brand_name_en") else brand_name
category_id = int(brand.get("category_id_1"))
category_id = category_id if category_id in category_id_list else 1000
# 插入数据库
result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
# 加入去重队列
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
# 数据库中的所有的字段 22 个
kws = ("product_name", "category_id_1", "brand_id", "product_desc",
"short_product_name", "sku_key_1", "sku_key_2", "sku_key_3", "product_flag",
"min_firstpay", "is_product_up_down", "real_amount", "mart_amount", "fq_num",
"product_info", "delivery_time", "gift_list", "fe_params", "slider_imgs",
"detail_imgs", "create_time")
# 插入除 商品 id 之外的字段
# sql = "insert into goods () values (%s, %s, %s, %s, %s, " \
# "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql = "insert into goods (good_name,category_id,brand_id,product_name,short_product_name," \
"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount," \
"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs," \
"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
# 获取mongodb 中的数据
goods = GOODS.find()
for good in goods:
try:
data = []
# 商品 id 去重集合
# good_id_set = set()
for kw in kws[:-5]:
info = good["detail_data"].get(kw)
data.append(info)
# 单独处理复杂的项目
gift_list = " ".join([str(s) for s in good["detail_data"].get("gift_list")[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good["detail_data"].get("fe_params"))
data.append(fe_params)
slider_imgs = "||".join(good["slider_imgs"])
data.append(slider_imgs)
detail_imgs = "||".join(good["detail_imgs"])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
data.append(create_time)
# 判断 id 是否重复
# if good["good_id"] not in good_id_set:
with conn.cursor() as cursor:
cursor.execute("select brand_id from goods_brand")
# 查出所有的品牌 id
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute("select category_id from goods_category")
# 查出所有的种类 id
all_category_ids = [category_id[0] for category_id in cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])
cursor.execute(sql, tuple(data))
conn.commit()
# good_id_set.add(good["good_id"])
except Exception as e:
print(e)
continue
def main():
# MySQL 连接
conn = pymysql.connect(host="127.0.0.1", port=3306, user="root", password="123456",
db="test", charset="utf8", autocommit=False)
# 将分类插入数据库
# insert_category(conn)
# 将品牌插入数据库
# insert_brand(conn)
# 将商品插入数据库
# mongodb 连接
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN["fenqile"]["goods"]
insert_goods(conn, GOODS)
conn.close()
if __name__ == "__main__":
main()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
from asyncio import sleep
from pylast import User, WSError
from re import sub
from urllib import parse
from os import environ
from sys import setrecursionlimit
from telethon.errors import AboutTooLongError
from telethon.tl.functions.account import UpdateProfileRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import User as Userbot
from telethon.errors.rpcerrorlist import FloodWaitError
from userbot import CMD_HELP, BOTLOG, BOTLOG_CHATID, DEFAULT_BIO, BIO_PREFIX, lastfm, LASTFM_USERNAME, bot
from userbot.events import register
# =================== CONSTANT ===================
LFM_BIO_ENABLED = "```last.fm current music to bio is now enabled.```"
LFM_BIO_DISABLED = "```last.fm current music to bio is now disabled. Bio reverted to default.```"
LFM_BIO_RUNNING = "```last.fm current music to bio is already running.```"
LFM_BIO_ERR = "```No option specified.```"
LFM_LOG_ENABLED = "```last.fm logging to bot log is now enabled.```"
LFM_LOG_DISABLED = "```last.fm logging to bot log is now disabled.```"
LFM_LOG_ERR = "```No option specified.```"
ERROR_MSG = "```last.fm module halted, got an unexpected error.```"
ARTIST = 0
SONG = 0
USER_ID = 0
if BIO_PREFIX:
BIOPREFIX = BIO_PREFIX
else:
BIOPREFIX = None
LASTFMCHECK = False
RUNNING = False
LastLog = False
# ================================================
@register(outgoing=True, pattern="^.lastfm$")
async def last_fm(lastFM):
""" For .lastfm command, fetch scrobble data from last.fm. """
await lastFM.edit("Processing...")
preview = None
playing = User(LASTFM_USERNAME, lastfm).get_now_playing()
username = f"https://www.last.fm/user/{LASTFM_USERNAME}"
if playing is not None:
try:
image = User(LASTFM_USERNAME,
lastfm).get_now_playing().get_cover_image()
except IndexError:
image = None
pass
tags = await gettags(isNowPlaying=True, playing=playing)
rectrack = parse.quote_plus(f"{playing}")
rectrack = sub("^", "https://www.youtube.com/results?search_query=",
rectrack)
if image:
output = f"[]({image})@KensurBoiii is now listening to:\n\n• [{playing}]({rectrack})\n`{tags}`\n\nCheck him out [here]({username}) "
preview = True
else:
output = f"@KensurBoiii is now listening to:\n\n• [{playing}]({rectrack})\n`{tags}`\n\nCheck him out [here]({username}) "
else:
recent = User(LASTFM_USERNAME, lastfm).get_recent_tracks(limit=3)
playing = User(LASTFM_USERNAME, lastfm).get_now_playing()
output = f"@KensurBoiii was last listening to:\n\n"
for i, track in enumerate(recent):
print(i)
printable = await artist_and_song(track)
tags = await gettags(track)
rectrack = parse.quote_plus(str(printable))
rectrack = sub("^",
"https://www.youtube.com/results?search_query=",
rectrack)
output += f"• [{printable}]({rectrack})\n"
if tags:
output += f"`{tags}`\n\n"
if preview is not None:
await lastFM.edit(f"{output}", parse_mode='md', link_preview=True)
else:
await lastFM.edit(f"{output}", parse_mode='md')
async def gettags(track=None, isNowPlaying=None, playing=None):
if isNowPlaying:
tags = playing.get_top_tags()
arg = playing
if not tags:
tags = playing.artist.get_top_tags()
else:
tags = track.track.get_top_tags()
arg = track.track
if not tags:
tags = arg.artist.get_top_tags()
tags = "".join([" #" + t.item.__str__() for t in tags[:5]])
tags = sub("^ ", "", tags)
tags = sub(" ", "_", tags)
tags = sub("_#", " #", tags)
return tags
async def artist_and_song(track):
return f"{track.track}"
async def get_curr_track(lfmbio):
global ARTIST
global SONG
global LASTFMCHECK
global RUNNING
global USER_ID
oldartist = ""
oldsong = ""
while LASTFMCHECK:
try:
if USER_ID == 0:
USER_ID = (await lfmbio.client.get_me()).id
user_info = await bot(GetFullUserRequest(USER_ID))
RUNNING = True
playing = User(LASTFM_USERNAME, lastfm).get_now_playing()
SONG = playing.get_title()
ARTIST = playing.get_artist()
oldsong = environ.get("oldsong", None)
oldartist = environ.get("oldartist", None)
if playing is not None and SONG != oldsong and ARTIST != oldartist:
environ["oldsong"] = str(SONG)
environ["oldartist"] = str(ARTIST)
if BIOPREFIX:
lfmbio = f"{BIOPREFIX} 🎧: {ARTIST} - {SONG}"
else:
lfmbio = f"🎧: {ARTIST} - {SONG}"
try:
if BOTLOG and LastLog:
await bot.send_message(
BOTLOG_CHATID,
f"Attempted to change bio to\n{lfmbio}")
await bot(UpdateProfileRequest(about=lfmbio))
except AboutTooLongError:
short_bio = f"🎧: {SONG}"
await bot(UpdateProfileRequest(about=short_bio))
else:
if playing is None and user_info.about != DEFAULT_BIO:
await sleep(6)
await bot(UpdateProfileRequest(about=DEFAULT_BIO))
if BOTLOG and LastLog:
await bot.send_message(
BOTLOG_CHATID, f"Reset bio back to\n{DEFAULT_BIO}")
except AttributeError:
try:
if user_info.about != DEFAULT_BIO:
await sleep(6)
await bot(UpdateProfileRequest(about=DEFAULT_BIO))
if BOTLOG and LastLog:
await bot.send_message(
BOTLOG_CHATID, f"Reset bio back to\n{DEFAULT_BIO}")
except FloodWaitError as err:
if BOTLOG and LastLog:
await bot.send_message(BOTLOG_CHATID,
f"Error changing bio:\n{err}")
except FloodWaitError as err:
if BOTLOG and LastLog:
await bot.send_message(BOTLOG_CHATID,
f"Error changing bio:\n{err}")
except WSError as err:
if BOTLOG and LastLog:
await bot.send_message(BOTLOG_CHATID,
f"Error changing bio:\n{err}")
await sleep(2)
RUNNING = False
@register(outgoing=True, pattern=r"^.lastbio (on|off)")
async def lastbio(lfmbio):
arg = lfmbio.pattern_match.group(1).lower()
global LASTFMCHECK
global RUNNING
if arg == "on":
setrecursionlimit(700000)
if not LASTFMCHECK:
LASTFMCHECK = True
environ["errorcheck"] = "0"
await lfmbio.edit(LFM_BIO_ENABLED)
await sleep(4)
await get_curr_track(lfmbio)
else:
await lfmbio.edit(LFM_BIO_RUNNING)
elif arg == "off":
LASTFMCHECK = False
RUNNING = False
await bot(UpdateProfileRequest(about=DEFAULT_BIO))
await lfmbio.edit(LFM_BIO_DISABLED)
else:
await lfmbio.edit(LFM_BIO_ERR)
@register(outgoing=True, pattern=r"^.lastlog (on|off)")
async def lastlog(lstlog):
arg = lstlog.pattern_match.group(1).lower()
global LastLog
LastLog = False
if arg == "on":
LastLog = True
await lstlog.edit(LFM_LOG_ENABLED)
elif arg == "off":
LastLog = False
await lstlog.edit(LFM_LOG_DISABLED)
else:
await lstlog.edit(LFM_LOG_ERR)
CMD_HELP.update({
'lastfm':
".lastfm\
\nUsage: Shows currently scrobbling track or most recent scrobbles if nothing is playing.\
\n\nlastbio: .lastbio <on/off>\
\nUsage: Enables/Disables last.fm current playing to bio.\
\n\nlastlog: .lastlog <on/off>\
\nUsage: Enable/Disable last.fm bio logging in the bot-log group."
})
|
class card:
def __init__(self,card,account,description,pin,seq,auth,active):
self.card = card
self.account = account
self.description = description
self.pin = pin
self.seq = seq
self.auth = auth
self.active = active
def setActive(self,v):
self.active = v
def setAccount(self,v):
self.account = v
def setDescription(self,v):
self.description = v
def setPin(self,v):
self.pin = v
def setSeq(self,v):
self.seq = v
def setAuth(self,v):
self.auth = v
def getCard(self):
return self.card
def getAccount(self):
return self.account
def getDescription(self):
return self.description
def getPin(self):
return self.pin
def getSeq(self):
return self.seq
def getAuth(self):
return self.auth
def getActive(self):
return self.active
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from rest_framework import viewsets
from authentication.forms import UserSignupForm, UserSigninForm
from authentication.serializer import UserSerializer
from authentication.tokens import activation_token
from pharmacies import settings
from pharmacies.permission import IsAdmin
from shop.models import Category, Product
def homepage(request):
products_all = Product.objects.filter(active=True)
categories = Category.objects.filter(active=True)
products = Product.objects.filter(active=True).order_by('-created')
featured_products = Product.objects.filter(featured=True)
paginator = Paginator(products, 6)
page = request.GET.get('page')
products = paginator.get_page(page)
return render(request, 'shop/base.html',
{'products_all': products_all, 'categories': categories, 'product': products,
'featured_products': featured_products})
def signup(req):
if req.method == "POST":
form = UserSignupForm(req.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
site = get_current_site(req)
mail_subject = "Confirmation message"
message = render_to_string('authentication/activate_mail.html', {
"user": user,
'domain': site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': activation_token.make_token(user)
})
to_email = form.cleaned_data.get('email')
to_list = [to_email]
from_email = settings.EMAIL_HOST_USER
send_mail(mail_subject, message, from_email, to_list, fail_silently=True)
messages.success(req, "Thanks for your registration! A confirmation link has been sent to your mail")
else:
form = UserSignupForm()
return render(req, 'authentication/users_signup.html', {'form': form})
def activate(req, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(id=uid)
except(TypeError, ValueError):
user = None
if user and activation_token.check_token(user, token):
user.is_active = True
user.save()
messages.info(req, 'Your account activated! Now login')
return redirect("authentication:login")
else:
messages.error(req, "Activation link is invalid")
def signin(request):
if request.method == "POST":
form = UserSigninForm(request.POST)
username = form['username'].value()
password = form['password'].value()
user = authenticate(username=username, password=password)
if user:
login(request, user)
redirect_url = request.GET.get('next', 'shop:home')
return redirect(redirect_url)
else:
messages.error(request, 'Invalid username or password')
else:
form = UserSigninForm()
return render(request, 'authentication/users_signin.html', {'form': form})
def signout(request):
logout(request)
messages.success(request, 'Logged out successfully!')
return redirect('authentication:login')
"""
API with permissions
"""
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAdmin]
|
import pandas as pd
import numpy as np
import copy
from tqdm import tqdm
from typing import List, Dict
import model as m
from db import PostgresDB as db
from sql import script_get_customer_data, \
script_get_customer_last_price, \
script_update_customer_stocks
class Customer():
# Структура покупателей
customer_proportion = m.CUSTOMER_PROPORTION
# Число человек в семье
customer_number_people = m.CUSTOMER_NUMBER_PEOPLE
# Структура уровня финансового благосостояния покупателей
customer_financial_wealth = m.CUSTOMER_FINANCIAL_WEALTH
# Список выбора текущего запаса сыра у покупателя
customer_current_stock_cheese = m.CUSTOMER_CURRENT_STOCK_CHEESE
# Среднее потребление сыра на одного человека в день в граммах
customer_avg_cheese_consumption = m.CUSTOMER_AVG_CHEESE_CONSUMPTION
# Структура потребление различных видов сыров
customer_kind_cheese = m.CUSTOMER_KIND_CHEESE
# Отношение к маркетинговым акциям
customer_sensitivity_marketing_campaign = m.CUSTOMER_SENSITIVITY_MARKETING_CAMPAIGN
# Готовность отказаться на время от потребления сыра
customer_stop_eating_cheese = m.CUSTOMER_STOP_EATING_CHEESE
# Готовность переключиться на более дешевый вид сыра
customer_switching_another_cheese = m.CUSTOMER_SWITCHING_ANOTHER_CHEESE
# Готовность искать сыр по старой цене в других продуктовых сетях
customer_looking_cheese = m.CUSTOMER_LOOKING_CHEESE
# Чувствительный порог изменения старой цены на сыр
customer_significant_price_change = m.CUSTOMER_SIGNIFICANT_PRICE_CHANGE
# -------------------------------------------------------------------------------------------------------------------
# Структура ритейлеров города
retailer_proportion = m.RETAILER_PROPORTION
def __init__(self):
pass
@classmethod
def generation_list_customer_proportion(cls, list_with_proportion: List = customer_proportion) -> List[str]:
"""Полный список выбора категорий покупателей"""
list_customer = []
for key, value in list_with_proportion.items():
for _ in range(value):
list_customer.append(key)
return list_customer
@classmethod
def choice_val(cls, d):
"""Выбор случайного значения из переданного массива данных"""
if isinstance(d, list):
return np.random.choice(d)
elif isinstance(d, dict):
return np.random.choice(list(d.keys()),
1,
p=[round(i / sum(d.values()), 2) for i in d.values()])[0]
else:
pass
@classmethod
def generation_dataframe_customer(cls) -> None:
"""Генерация рандомного датафрейма с покупателями"""
# Раздел Покупатель
list_customer_id = []
list_family_type = []
list_number_people = []
list_financial_wealth = []
# Раздел Сыр
list_favorite_kind_cheese = []
list_current_stock_cheese = []
list_avg_cheese_consumption = []
list_stop_eating_cheese = []
list_switching_another_cheese = []
# Раздел Ритейлер
list_basic_retailer = []
list_significant_price_change = []
list_looking_cheese = []
list_sensitivity_marketing_campaign = []
# Раздел Ключи
list_key_kind_cheese_retailer = []
# Стартовый ИД покупателя
customer_id_val = 1
for customer_family_type_val in tqdm(cls.generation_list_customer_proportion()):
# Раздел Покупатель
list_customer_id.append(customer_id_val)
list_family_type.append(customer_family_type_val)
list_number_people.append(cls.customer_number_people.get(customer_family_type_val))
list_financial_wealth.append(cls.choice_val(cls.customer_financial_wealth))
# Раздел Сыр
# Если благосостояние покупателя высокое, то исключаем продукты сырные из выбора
if list_financial_wealth[-1] == "высокий":
customer_kind_cheese_copy = copy.deepcopy(cls.customer_kind_cheese)
del customer_kind_cheese_copy["продукты сырные"]
list_favorite_kind_cheese.append(cls.choice_val(customer_kind_cheese_copy))
customer_kind_cheese_copy.clear()
else:
list_favorite_kind_cheese.append(cls.choice_val(cls.customer_kind_cheese))
list_current_stock_cheese.append(cls.choice_val(cls.customer_current_stock_cheese))
list_avg_cheese_consumption.append(cls.customer_number_people.get(customer_family_type_val) *
cls.customer_avg_cheese_consumption)
list_stop_eating_cheese.append(cls.choice_val(cls.customer_stop_eating_cheese))
list_switching_another_cheese.append(cls.choice_val(cls.customer_switching_another_cheese))
# Раздел Ритейлер
list_basic_retailer.append(cls.choice_val(cls.retailer_proportion))
list_significant_price_change.append(cls.choice_val(cls.customer_significant_price_change))
list_looking_cheese.append(cls.choice_val(cls.customer_looking_cheese))
list_sensitivity_marketing_campaign.append(cls.choice_val(cls.customer_sensitivity_marketing_campaign))
# Раздел Ключи
list_key_kind_cheese_retailer.append("|".join([list_basic_retailer[-1], list_favorite_kind_cheese[-1]]))
customer_id_val = customer_id_val + 1
customer_data_dict = {"customer_id": list_customer_id,
"family_type": list_family_type,
"number_people": list_number_people,
"financial_wealth": list_financial_wealth,
"favorite_kind_cheese": list_favorite_kind_cheese,
"avg_cheese_consumption": list_avg_cheese_consumption,
"stop_eating_cheese": list_stop_eating_cheese,
"switching_another_cheese": list_switching_another_cheese,
"basic_retailer": list_basic_retailer,
"significant_price_change": list_significant_price_change,
"looking_cheese": list_looking_cheese,
"sensitivity_marketing_campaign": list_sensitivity_marketing_campaign,
"key": list_key_kind_cheese_retailer}
stock_cheese_data_dict = {"customer_id": list_customer_id,
"current_value": list_current_stock_cheese}
customer_df = pd.DataFrame(data=customer_data_dict)
stock_df = pd.DataFrame(data=stock_cheese_data_dict)
return customer_df, stock_df
@classmethod
def get_customer_data(cls, customer_id: int, script=script_get_customer_data) -> Dict:
"""Получить данные по ИД покупателя"""
customer_data_dict = db().get_customer_data(script, customer_id)
return customer_data_dict
@classmethod
def get_customer_last_price(cls, customer_id: int, model: int, script=script_get_customer_last_price) -> int:
"""Получить последнюю цену сыра, по которой покупатель приобретал товар"""
last_price_current = db().get_customer_last_price(script, customer_id=customer_id, model=model)
return last_price_current
@classmethod
def update_customer_stocks(cls, customer_id: int, balance_cheese: int,
script=script_update_customer_stocks) -> None:
"""Обновить данные по товарным запасам покупателя"""
db().update_customer_stocks(script, customer_id=customer_id, current_value=balance_cheese)
|
import ds_format as ds
import json
from ds_format.cmd import UsageError, NumpyEncoder
def get(*args, **opts):
if len(args) != 2:
raise TypeError('Usage: get <path> <input>')
path = args[0].split('/')
input_ = args[1]
d = ds.from_netcdf(input_, [])
if len(path) == 2 and path[0] == '' and path[1] == '':
j = json.dumps(d['.']['.'], sort_keys=True, indent=4, cls=NumpyEncoder)
print(j)
elif len(path) == 2 and path[0] == '':
attr = path[1]
print(d['.']['.'][attr])
elif len(path) == 2:
var = path[0]
attr = path[1]
print(d['.'][var][attr])
elif len(path) == 1:
var = path[0]
j = json.dumps(d['.'][var], sort_keys=True, indent=4, cls=NumpyEncoder)
print(j)
|
from copy import deepcopy
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.state import (
next_epoch,
next_slot
)
from eth2spec.test.helpers.block import apply_empty_block
from eth2spec.test.helpers.attestations import (
add_attestation_to_state,
fill_aggregate_attestation,
get_valid_attestation,
sign_attestation,
)
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
def run_process_crosslinks(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_crosslinks')
@with_all_phases
@spec_state_test
def test_no_attestations(spec, state):
yield from run_process_crosslinks(spec, state)
for shard in range(spec.SHARD_COUNT):
assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
@with_all_phases
@spec_state_test
def test_single_crosslink_update_from_current_epoch(spec, state):
next_epoch(spec, state)
attestation = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation)
add_attestation_to_state(spec, state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
assert len(state.current_epoch_attestations) == 1
shard = attestation.data.crosslink.shard
pre_crosslink = deepcopy(state.current_crosslinks[shard])
yield from run_process_crosslinks(spec, state)
assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
assert pre_crosslink != state.current_crosslinks[shard]
@with_all_phases
@spec_state_test
def test_single_crosslink_update_from_previous_epoch(spec, state):
next_epoch(spec, state)
attestation = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation)
add_attestation_to_state(spec, state, attestation, state.slot + spec.SLOTS_PER_EPOCH)
assert len(state.previous_epoch_attestations) == 1
shard = attestation.data.crosslink.shard
pre_crosslink = deepcopy(state.current_crosslinks[shard])
crosslink_deltas = spec.get_crosslink_deltas(state)
yield from run_process_crosslinks(spec, state)
assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
assert pre_crosslink != state.current_crosslinks[shard]
# ensure rewarded
for index in spec.get_crosslink_committee(
state,
attestation.data.target.epoch,
attestation.data.crosslink.shard):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
@with_all_phases
@spec_state_test
def test_double_late_crosslink(spec, state):
if spec.get_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
print("warning: ignoring test, test-assumptions are incompatible with configuration")
return
next_epoch(spec, state)
state.slot += 4
attestation_1 = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation_1)
# add attestation_1 to next epoch
next_epoch(spec, state)
add_attestation_to_state(spec, state, attestation_1, state.slot + 1)
for _ in range(spec.SLOTS_PER_EPOCH):
attestation_2 = get_valid_attestation(spec, state)
if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard:
sign_attestation(spec, state, attestation_2)
break
next_slot(spec, state)
apply_empty_block(spec, state)
fill_aggregate_attestation(spec, state, attestation_2)
# add attestation_2 in the next epoch after attestation_1 has
# already updated the relevant crosslink
next_epoch(spec, state)
add_attestation_to_state(spec, state, attestation_2, state.slot + 1)
assert len(state.previous_epoch_attestations) == 1
assert len(state.current_epoch_attestations) == 0
crosslink_deltas = spec.get_crosslink_deltas(state)
yield from run_process_crosslinks(spec, state)
shard = attestation_2.data.crosslink.shard
# ensure that the current crosslinks were not updated by the second attestation
assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in spec.get_crosslink_committee(
state,
attestation_2.data.target.epoch,
attestation_2.data.crosslink.shard):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0
|
"""
Module with models definitions
"""
import pickle
import numpy as np
class Model:
"""
Machine learning model, built from layers
"""
def __init__(self, layers):
"""
Constructor
:param layers: list of Layers instances
"""
self.layers = layers
output_shape = None
for layer in self.layers:
layer.build(output_shape)
output_shape = layer.output_shape
def predict(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def get_loss(self, labels, predictions):
"""
Compute categorical crossentropy loss
:param labels: batch of labels
:param predictions: batch of predictions
:return: mean loss of the batch predictions
"""
epsilon = 1e-7
clipped_predictions = np.clip(predictions, epsilon, 1 - epsilon)
return np.mean(-np.sum(labels * np.log(clipped_predictions), axis=1))
def train(self, x, y, learning_rate):
activation = x
for layer in self.layers:
activation = layer.train_forward(activation)
gradients = self.layers[-1].get_output_layer_error_gradients(y)
for layer in reversed(self.layers[:-1]):
gradients = layer.train_backward(gradients, learning_rate)
def get_accuracy(self, x, y):
predictions = self.predict(x)
correct_predictions_count = sum(
[np.argmax(prediction) == np.argmax(ground_truth) for prediction, ground_truth in zip(predictions, y)])
return correct_predictions_count / len(predictions)
def save(self, path):
"""
Save model at provided path
:param path:
"""
with open(path, "wb") as file:
pickle.dump(self, file)
@staticmethod
def load(path):
"""
Load model from file
:param path:
:return: Model instance
"""
with open(path, "rb") as file:
model = pickle.load(file)
if not isinstance(model, Model):
raise ValueError("Object at path isn't a Model instance")
return model
|
class SayPlugin:
def __init__(self, config):
super(SayPlugin, self).__init__()
self.config = config
# pylint: disable=no-self-use
def execute(self, context):
entities = context.nlp_analysis.entities
result = {}
result["query"] = entities["query"]
response = {
'result': result,
'status': 'success'
}
return response
|
import hashlib
import os
from StringIO import StringIO
from tarfile import TarFile
from tempfile import NamedTemporaryFile
from urllib import quote
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from tardis.tardis_portal.models import Experiment
class TarDownloadTestCase(TestCase):
def setUp(self):
# create user
self.testuser = User(username='testuser')
self.testuser.save()
# create test experiment
self.exp = Experiment(title='tar download test' * 15,
created_by=self.testuser,
public_access=Experiment.PUBLIC_ACCESS_FULL)
self.exp.save()
# create test dataset
self.ds = self.exp.datasets.create(
description="testing tar download dataset")
datafile_content = "\n".join(['some data %d' % i
for i in range(1000)])
filesize = len(datafile_content)
md5sum = hashlib.md5(datafile_content).hexdigest()
# create test datafiles and datafile objects
self.dfs = []
for i in range(20):
df = self.ds.datafile_set.create(
filename='testfile%d.txt' % i,
mimetype='text/plain',
size=filesize,
md5sum=md5sum,
directory='/'.join([
'testdir%d' % i
for i in range(i, i + 4)
]))
df.file_object = StringIO(datafile_content)
df.refresh_from_db()
self.dfs.append(df)
# mock client
self.client = Client()
def tearDown(self):
# delete created objects and files
[ds.delete() for ds in self.exp.datasets.all()]
self.exp.delete()
def test_tar_experiment_download(self):
self.assertTrue(all(df.verified for df in self.dfs))
response = self.client.get(reverse(
'tardis.tardis_portal.download.streaming_download_experiment',
args=(self.exp.id, 'tar')))
with NamedTemporaryFile('w') as tarfile:
for c in response.streaming_content:
tarfile.write(c)
tarfile.flush()
self.assertEqual(int(response['Content-Length']),
os.stat(tarfile.name).st_size)
tf = TarFile(tarfile.name)
for df in self.dfs:
full_path = os.path.join(
self.exp.title.replace(' ', '_'),
quote(self.ds.description, safe=''),
df.directory, df.filename)
# docker has a file path limit of ~240 characters
if os.environ.get('DOCKER_BUILD', 'false') != 'true':
tf.extract(full_path, '/tmp')
self.assertEqual(
os.stat(os.path.join('/tmp', full_path)).st_size,
int(df.size))
|
from abc import ABC
from datetime import datetime, timedelta
from unittest.mock import patch
from django.contrib.auth.models import Permission
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import timezone
from django.utils.dateparse import parse_time
from news.models import Event, TimePlace
from users.models import User
from util.locale_utils import parse_datetime_localized
from ...models.course import Printer3DCourse
from ...models.machine import Machine, MachineType
from ...models.reservation import Quota, Reservation, ReservationRule
class ReservationTestBase(TestCase, ABC):
def init_objs(self, machine_type: MachineType):
self.machine_type = machine_type
self.machine = Machine.objects.create(name="C1", location="Printer room", status=Machine.Status.AVAILABLE,
machine_type=self.machine_type)
self.user = User.objects.create_user("User", "user@makentnu.no", "user_pass")
self.user_quota = Quota.objects.create(user=self.user, ignore_rules=False, number_of_reservations=2,
machine_type=self.machine_type)
self.course_registration = Printer3DCourse.objects.create(user=self.user, username=self.user.username,
date=datetime.now().date(),
name=self.user.get_full_name())
self.max_time_reservation = 5
ReservationRule.objects.create(machine_type=self.machine_type, start_time=parse_time("00:00"), end_time=parse_time("23:59"),
days_changed=6, start_days=1, max_hours=self.max_time_reservation,
max_inside_border_crossed=self.max_time_reservation)
self.event = Event.objects.create(title="TEST EVENT")
self.timeplace = TimePlace.objects.create(publication_time=timezone.now(),
start_time=timezone.now() + timedelta(seconds=1),
end_time=timezone.now() + timedelta(minutes=1),
event=self.event)
def check_reservation_invalid(self, reservation, error_message):
self.assertFalse(reservation.validate(), error_message)
try:
reservation.save()
self.fail(error_message)
except ValidationError:
pass
def check_reservation_valid(self, reservation, error_message):
self.assertTrue(reservation.validate(), error_message)
try:
reservation.save()
except ValidationError:
self.fail(error_message)
def create_reservation(self, relative_start_time: timedelta, relative_end_time: timedelta, event: Event = None,
user: User = None, machine: Machine = None, special=False, special_text=""):
machine = machine or self.machine
user = user or self.user
return Reservation(user=user, machine=machine, event=event, start_time=timezone.now() + relative_start_time,
end_time=timezone.now() + relative_end_time, special=special, special_text=special_text)
class TestReservation(ReservationTestBase):
def setUp(self):
# See the `0015_machinetype.py` migration for which MachineTypes are created by default
super().init_objs(MachineType.objects.get(pk=1))
@staticmethod
def save_past_reservation(reservation):
validate_function = reservation.validate
reservation.validate = lambda: True
reservation.save()
reservation.validate = validate_function
def set_reservation_future_limit_days(self, days):
self.reservation_future_limit_days = Reservation.RESERVATION_FUTURE_LIMIT_DAYS
Reservation.RESERVATION_FUTURE_LIMIT_DAYS = days
def reset_reservation_future_limit_days(self):
Reservation.RESERVATION_FUTURE_LIMIT_DAYS = self.reservation_future_limit_days
def give_user_event_permission(self):
self.user.user_permissions.add(Permission.objects.get(name="Can create event reservation"))
def test_can_create_reservation(self):
self.check_reservation_valid(self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Reservations should be saveable")
def test_not_allowed_user_cannot_create_reservation(self):
self.course_registration.delete()
self.user.refresh_from_db()
self.assertFalse(self.machine_type.can_user_use(self.user))
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Uses that cannot use a machine, should not be able to reserve it"
)
def test_reserve_end_time_before_start_time(self):
self.check_reservation_invalid(self.create_reservation(timedelta(hours=1), timedelta(minutes=30)),
"Reservations should not be able to end before they start")
def test_reserve_longer_than_maximum_user_time(self):
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=1), timedelta(hours=self.max_time_reservation + 1.1)),
"Reservations should not be allowed to exceed the maximum allowed time for the user")
def test_reserve_in_the_past(self):
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=-1), timedelta(hours=1)),
"A reservation is invalid if it starts in the past, even though it ends in the future"
)
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=-1), timedelta(hours=-0.5)),
"A reservation is invalid if it is completely in the past"
)
def test_make_more_than_one_reservation(self):
self.user_quota.number_of_reservations = 5
self.user_quota.save()
self.assertTrue(Quota.can_create_new_reservation(self.user, self.machine_type))
for reservation_number in range(5):
self.check_reservation_valid(self.create_reservation(timedelta(days=reservation_number, hours=1),
timedelta(days=reservation_number, hours=2)),
"User should be able to make as many reservations as allowed")
self.assertFalse(Quota.can_create_new_reservation(self.user, self.machine_type))
def test_make_more_than_allowed_number_of_reservations(self):
self.user_quota.number_of_reservations = 5
self.user_quota.save()
for reservation_number in range(5):
self.check_reservation_valid(self.create_reservation(timedelta(days=reservation_number, hours=1),
timedelta(days=reservation_number, hours=2)),
"User should be able to make as many reservations as allowed")
self.check_reservation_invalid(self.create_reservation(timedelta(days=5, hours=1), timedelta(days=5, hours=2)),
"User should not be able to make more reservations than allowed")
@patch("django.utils.timezone.now")
def test_disallow_overlapping_reservations(self, now_mock):
now_mock.return_value = parse_datetime_localized("2018-03-12 12:00")
self.user_quota.number_of_reservations = 3
self.user_quota.save()
self.check_reservation_valid(self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Saving should be valid")
# Start before, end inside
self.check_reservation_invalid(self.create_reservation(timedelta(minutes=50), timedelta(hours=1, minutes=50)),
"Reservation should not be able to end inside another")
# Start inside, end after
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=1, minutes=10), timedelta(hours=2, minutes=10)),
"Reservation should not be able to end inside another")
# Start inside, end inside
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=1, minutes=10), timedelta(hours=1, minutes=50)),
"Reservation should not be able to start and end inside another")
# Start before, end after
self.check_reservation_invalid(self.create_reservation(timedelta(minutes=50), timedelta(hours=2, minutes=10)),
"Reservation should not be able to encapsulate another")
# End at the start time of other
self.check_reservation_valid(self.create_reservation(timedelta(hours=0), timedelta(hours=1)),
"A reservation should be allowed to end at the same time another one starts")
# Start at the end time of other
self.check_reservation_valid(self.create_reservation(timedelta(hours=2), timedelta(hours=3)),
"A reservation should be allowed to start at the same time another one ends")
def test_make_event_without_event_permission(self):
self.check_reservation_invalid(
self.create_reservation(timedelta(hours=1), timedelta(hours=2), event=self.timeplace),
"Should not be able to make event reservation without correct permission")
def test_make_event_with_event_permission(self):
self.give_user_event_permission()
self.user_quota.max_number_of_reservations = 1
self.user_quota.save()
self.check_reservation_valid(
self.create_reservation(timedelta(hours=1), timedelta(hours=2), event=self.timeplace),
"User with the correct permission should be allowed to create an event reservation")
self.check_reservation_valid(
self.create_reservation(timedelta(days=1, hours=1), timedelta(days=1, hours=2), event=self.timeplace),
"Event reservations should not count towards the total number of reservations")
def test_make_event_reservation_with_longer_than_user_max_time(self):
self.give_user_event_permission()
self.check_reservation_valid(
self.create_reservation(timedelta(hours=1), timedelta(hours=self.max_time_reservation + 2),
event=self.timeplace),
"User should be able to make event reservations longer than their maximum reservation time")
def test_change_event_while_maximum_booked(self):
self.user_quota.max_number_of_reservations = 1
self.user_quota.save()
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2))
self.check_reservation_valid(reservation, "Reservation should be valid")
reservation.end_time = timezone.now() + timedelta(hours=3)
self.check_reservation_valid(reservation,
"Changing a reservation with the maximum number of reservations should be valid")
def test_same_time_separate_machines(self):
additional_printer = Machine.objects.create(name="C2", location="Printer room Mackerspace U1", status=Machine.Status.AVAILABLE,
machine_type=self.machine_type)
Machine.objects.create(name="C3", location="Printer room Mackerspace U1", status=Machine.Status.AVAILABLE,
machine_type=self.machine_type)
self.check_reservation_valid(self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Saving a single reservation should be valid")
self.check_reservation_valid(
self.create_reservation(timedelta(hours=1), timedelta(hours=2), machine=additional_printer),
"Reservations on different printers should be able to overlap in time")
def test_can_owner_change_future_reservation(self):
self.assertTrue(self.create_reservation(timedelta(hours=1), timedelta(hours=2)).can_change(self.user))
def test_can_owner_change_started_reservation(self):
self.assertFalse(self.create_reservation(timedelta(hours=-1), timedelta(hours=2)).can_change(self.user))
def test_can_owner_change_end_time_of_started_reservation(self):
reservation = self.create_reservation(timedelta(hours=-2), timedelta(hours=2))
self.save_past_reservation(reservation)
self.assertTrue(reservation.can_change_end_time(self.user))
reservation.end_time = timezone.now() + timedelta(hours=1)
self.check_reservation_valid(reservation, "Should be able to change end time of started reservation")
reservation.end_time = timezone.now() + timedelta(hours=-1)
self.check_reservation_invalid(reservation,
"Should not be able to change end time of started reservation to before the current time")
def test_can_owner_change_end_time_of_ended_reservation(self):
self.assertFalse(
self.create_reservation(timedelta(hours=-3), timedelta(hours=-1)).can_change_end_time(self.user))
def test_can_owner_change_started_event_reservation(self):
self.give_user_event_permission()
self.assertTrue(
self.create_reservation(timedelta(hours=-1), timedelta(hours=2), event=self.timeplace).can_change(
self.user))
def test_can_owner_change_started_special_reservation(self):
self.give_user_event_permission()
self.assertTrue(self.create_reservation(timedelta(hours=-1), timedelta(hours=2), special=True,
special_text="Test").can_change(self.user))
def test_can_other_user_change_future_reservation(self):
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2))
self.assertTrue(reservation.can_change(self.user))
self.assertFalse(reservation.can_change(user2))
def test_can_user_with_event_reservation_change_other_user_non_event_reservation(self):
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
user2.user_permissions.add(Permission.objects.get(name="Can create event reservation"))
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2))
self.assertTrue(reservation.can_change(self.user))
self.assertFalse(reservation.can_change(user2))
def test_can_user_with_event_reservation_change_other_user_event_reservation(self):
self.give_user_event_permission()
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
user2.user_permissions.add(Permission.objects.get(name="Can create event reservation"))
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2), event=self.timeplace)
self.assertTrue(reservation.can_change(self.user))
self.assertTrue(reservation.can_change(user2))
def test_can_user_with_event_reservation_change_other_user_special_reservation(self):
self.give_user_event_permission()
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
user2.user_permissions.add(Permission.objects.get(name="Can create event reservation"))
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2), special=True, special_text="Test")
self.assertTrue(reservation.can_change(self.user))
self.assertTrue(reservation.can_change(user2))
def test_can_user_without_event_reservation_change_other_user_special_reservation(self):
self.give_user_event_permission()
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2), special=True, special_text="Test")
self.assertTrue(reservation.can_change(self.user))
self.assertFalse(reservation.can_change(user2))
def test_can_user_without_event_reservation_change_other_user_event_reservation(self):
self.give_user_event_permission()
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2), event=self.timeplace)
self.assertTrue(reservation.can_change(self.user))
self.assertFalse(reservation.can_change(user2))
def test_can_delete_future_reservation(self):
self.assertTrue(self.create_reservation(timedelta(hours=1), timedelta(hours=2)).can_delete(self.user))
def test_cannot_delete_started_reservation(self):
self.assertFalse(self.create_reservation(timedelta(hours=-1), timedelta(hours=2)).can_delete(self.user))
def test_is_within_allowed_period_for_reservation(self):
self.set_reservation_future_limit_days(7)
reservation = self.create_reservation(timedelta(hours=1), timedelta(hours=2))
self.assertTrue(reservation.is_within_allowed_period())
reservation.end_time = timezone.now() + timedelta(days=7, minutes=2)
self.assertFalse(reservation.is_within_allowed_period())
self.reset_reservation_future_limit_days()
def test_create_reservation_too_far_in_the_future(self):
self.set_reservation_future_limit_days(7)
self.check_reservation_invalid(self.create_reservation(timedelta(days=7), timedelta(days=7, hours=1)),
"Reservation is too far in the future and should not be valid")
self.reset_reservation_future_limit_days()
def test_make_event_reservation_too_far_in_the_future(self):
self.set_reservation_future_limit_days(7)
self.give_user_event_permission()
self.check_reservation_valid(
self.create_reservation(timedelta(days=7), timedelta(days=7, hours=1), event=self.timeplace),
"Event reservations are always valid no matter how far in the future they are")
self.reset_reservation_future_limit_days()
class TestAdvancedMachineReservation(ReservationTestBase):
def setUp(self):
# See the `0015_machinetype.py` migration for which MachineTypes are created by default
super().init_objs(MachineType.objects.get(pk=6))
def test_booking_advanced_printer_without_any_course(self):
user2 = User.objects.create_user("test", "user2@makentnu.no", "test_pass")
self.check_reservation_invalid(self.create_reservation(timedelta(hours=1), timedelta(hours=2), user=user2),
"Uses that cannot use a machine, should not be able to reserve it")
def test_booking_advanced_printer_with_normal_course(self):
self.check_reservation_invalid(self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Uses that cannot use a machine, should not be able to reserve it")
def test_booking_advanced_printer_with_advanced_course(self):
self.course_registration.advanced_course = True
self.course_registration.save()
self.check_reservation_valid(self.create_reservation(timedelta(hours=1), timedelta(hours=2)),
"Uses that cannot use a machine, should not be able to reserve it")
|
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = np.random.randint(10, 20, 2)
data = np.concatenate((base, outliers))
np.random.shuffle(data)
return data
def fill_data():
# Build random data
return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate(
(np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100)
# np.linspace(0, 2*np.pi, 100)
def savitzky(x, y, ploy_nom):
return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10)
def map(x_filtered, y_filtered, x, y, title="title"):
# Generate some test data
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"):
# Plotting
fig = plt.figure()
ax = fig.subplots()
plt.plot(x_filtered, y_filtered, 'red', marker="o")
plt.plot(x, y, 'green', marker="o")
plt.subplots_adjust(bottom=0.25)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.legend(["Filter", "Raw"])
plt.show()
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.