text stringlengths 8 6.05M |
|---|
# CHALLENGE: https://www.hackerrank.com/challenges/30-2d-arrays
def build_arr():
arr = []
for arr_i in range(6):
arr_t = [int(arr_temp) for arr_temp in input().strip().split(' ')]
arr.append(arr_t)
return arr
def is_square_arr(arr):
for e in arr:
if len(e) != len(arr):
raise ValueError('Input is not a n * n 2d array')
return True
def calc_hourglass_sums(arr):
sums = []
if is_square_arr(arr):
n = len(arr)
for i in range(0, n - 2):
for j in range(0, n - 2):
hourglass_sum = sum(arr[i][j:j+3]) + sum(arr[i+2][j:j+3]) + arr[i+1][j+1]
sums.append(hourglass_sum)
return sums
arr = build_arr()
sums = calc_hourglass_sums(arr)
print(max(sums))
|
"""cryptarchive server"""
from twisted.internet import threads
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.protocol import Factory
from twisted.protocols.basic import IntNStringReceiver
from cryptarchive.challenge import Challenge
from cryptarchive import constants
from cryptarchive.usermanager import UserManager
class CryptarchiveServerProtocol(IntNStringReceiver):
"""
The Protocol for the cryptarchive server.
For security reasons, each protocol instance is one-user only.
"""
structFormat = constants.MESSAGE_LENGTH_PREFIX
prefixLength = constants.MESSAGE_LENGTH_PREFIX_LENGTH
MAX_LENGTH = constants.MAX_MESSAGE_LENGTH
# state constants
STATE_IGNORE = 0
STATE_WAIT_VERSION = 5
STATE_WAIT_USERNAME = 1
STATE_WAIT_CHALLENGE_RESPONSE = 2
STATE_READY = 3
STATE_WRITING = 4
def connectionMade(self):
self.state = self.STATE_WAIT_VERSION
self.userid = None
self.challenge = None
self.cur_f = None
IntNStringReceiver.connectionMade(self)
@inlineCallbacks
def stringReceived(self, s):
"""called when a string was received."""
if len(s) == 0:
# ignore message
pass
if self.state == self.STATE_IGNORE:
# ignore message
pass
elif self.state == self.STATE_WAIT_VERSION:
if s != constants.COM_VERSION:
self.sendString("VERSION-FAIL")
self.transport.loseConnection()
else:
self.state = self.STATE_WAIT_USERNAME
self.sendString("VERSION-OK")
elif self.state == self.STATE_WAIT_USERNAME:
# username received
self.userid = self.factory.usermanager.get_userid(s)
# send challenge
self.challenge = yield self.factory.get_challenge(self.userid)
ser = self.challenge.dumps()
self.state = self.STATE_WAIT_CHALLENGE_RESPONSE
self.sendString(ser)
elif self.state == self.STATE_WAIT_CHALLENGE_RESPONSE:
valid = self.challenge.check_solution(s)
if valid:
self.state = self.STATE_READY
self.sendString("AUTH-OK")
else:
self.state = self.STATE_IGNORE
self.sendString("AUTH-FAIL")
self.transport.loseConnection()
elif self.state == self.STATE_READY:
action, data = s[0], s[1:]
if action == constants.ACTION_GET:
p = self.factory.usermanager.get_file_path(self.userid, data)
if not p.exists():
self.sendString("E")
else:
self.sendString("O")
with p.open("rb") as fin:
yield self.async_send(fin)
self.state = self.STATE_IGNORE
self.transport.loseConnection()
elif action == constants.ACTION_SET:
p = self.factory.usermanager.get_file_path(self.userid, data)
self.cur_f = p.open("wb")
self.state = self.STATE_WRITING
elif action == constants.ACTION_DELETE:
p = self.factory.usermanager.get_file_path(self.userid, data)
if not p.exists:
res = constants.RESPONSE_ERROR + "File not found!"
else:
try:
p.remove()
except Exception as e:
res = constants.RESPONSE_ERROR + repr(e)
else:
res = constants.RESPONSE_OK + "File deleted."
self.state = self.STATE_IGNORE
self.sendString(res)
self.transport.loseConnection()
else:
self.transport.loseConnection()
elif self.state == self.STATE_WRITING:
self.cur_f.write(s)
else:
self.transport.loseConnection()
raise RuntimeError("Logic Error: reached invalid state!")
def connectionLost(self, reason):
"""called when the connection was lost."""
if self.cur_f is not None:
self.cur_f.close()
self.cur_f = None
IntNStringReceiver.connectionLost(self, reason)
@inlineCallbacks
def async_send(self, fin):
"""
Send file asynchroneous.
:param fin: file to read
:type fin: file
:return: deferred which will be called when finished
:rtype: Deferred
"""
while True:
n = 32 * 1024 # 32 KB
data = yield threads.deferToThread(fin.read, n)
if not data:
break
self.sendString(data)
def lengthLimitExceeded(self, length):
"""called when the length limit is exceeded."""
if self.factory.verbose:
print "WARNING: Message length exceeds self.MAX_LENGTH: " + str(length)
self.transport.loseConnection()
class CryptarchiveServerFactory(Factory):
"""
Factory for the cryptarchive server.
:param path: path where the files will be stored.
:type path: str or FilePath
"""
protocol = CryptarchiveServerProtocol
def __init__(self, path, verbose=False):
self.verbose = verbose
self.usermanager = UserManager(path)
def buildProtocol(self, addr):
"""build a protocol for the communication with the client"""
p = self.protocol()
p.factory = self
return p
@inlineCallbacks
def get_challenge(self, userid):
"""returns a challenge for the user."""
if self.usermanager.user_exists(userid):
authblockpath = self.usermanager.get_authblock_path(userid)
hash_path = self.usermanager.get_hash_path(userid)
authblock = yield self.load_file_in_thread(authblockpath)
expected_hash = yield self.load_file_in_thread(hash_path)
returnValue(Challenge.generate_challenge(authblock, expected_hash))
else:
returnValue(Challenge.generate_unsolvable_challenge())
def load_file_in_thread(self, path):
"""
Load the specified file in a thread.
:param path: the path of the file to load
:type path: FilePath
:return: a deferred which will be called with the file content
:rtype: Deferred
"""
return threads.deferToThread(path.getContent)
|
num = int(input("enter a number: "))
count = 0
while True:
if num == 0:
break
temp = num % 10
num = num//10
flag = 0
if temp == 1:
continue
else:
for j in range(2,(temp//2+1)):
if temp % j ==0:
flag = 1
if flag == 0:
count = count+1
print(count) |
import json
import re
from django import forms
from django.core import validators
from gim.core.models import (LabelType, LABELTYPE_EDITMODE, Label,
GITHUB_STATUS_CHOICES, Milestone, Repository)
from gim.front.mixins.forms import LinkedToRepositoryFormMixin, LinkedToUserFormMixin
from gim.front.utils import FailRegexValidator
from gim.front.widgets import EnclosedInput
class LabelTypeEditForm(LinkedToRepositoryFormMixin):
format_string = forms.CharField(
required=False,
label=u'Format',
help_text=u'Write the format for labels to match this group, inserting the '
u'strings <strong>{label}</strong> for the part to display, and '
u'optionnally <strong>{order}</strong> if your labels include a '
u'number to order them<br />If you want the label to be an '
u'number that serves as the order too, use '
u'<strong>{ordered-label}</strong>',
)
format_string_validators = [
validators.RegexValidator(
re.compile('\{(?:ordered\-)?label\}'),
'Must contain a "{label}" or "{ordered-label}" part',
'no-label'
),
# TODO: check many label or order
FailRegexValidator(
re.compile('(?:\{label\}.*\{ordered-label\})|(?:\{ordered-label\}.*\{label\})'),
'If "{ordered-label}" is present, must not contain "{label"}',
'ordered-label-and-label'
),
FailRegexValidator(
re.compile('(?:\{order\}.*\{ordered-label\})|(?:\{ordered-label\}.*\{order\})'),
'If "{ordered-label}" is present, must not contain "{order"}',
'ordered-label-and-order'
),
]
labels_list = forms.CharField(
required=False,
label=u'Labels',
help_text=u'Choose which labels to add in this group. You can also add new ones (use a coma to separate them)',
)
class Meta:
model = LabelType
fields = ('name', 'edit_mode', 'regex', 'format_string', 'labels_list', 'is_metric')
widgets = {
'regex': forms.TextInput,
}
def __init__(self, *args, **kwargs):
if kwargs.get('instance') and kwargs['instance'].edit_mode != LABELTYPE_EDITMODE.REGEX and kwargs['instance'].edit_details:
if 'initial' not in kwargs:
kwargs['initial'] = {}
if kwargs['instance'].edit_mode == LABELTYPE_EDITMODE.FORMAT:
kwargs['initial']['format_string'] = kwargs['instance'].edit_details.get('format_string', '')
elif kwargs['instance'].edit_mode == LABELTYPE_EDITMODE.LIST:
kwargs['initial']['labels_list'] = kwargs['instance'].edit_details.get('labels_list', '')
super(LabelTypeEditForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['placeholder'] = 'Choose a name for this group'
self.fields['regex'].required = False # will be tested depending on the edit_mode
self.fields['edit_mode'].widget.attrs['class'] = 'uniform'
self.fields['edit_mode'].help_text = 'Changing mode don\'t keep your configuration, except when changing to "Regular Expression" (each mode convert its configuration to a regular expression)'
self.fields['labels_list'].widget.attrs.update({
'data-labels': self.get_labels_json(),
})
def get_labels_json(self):
data = {l.name: {'name': l.name, 'color': l.color}
for l in self.repository.labels.all()}
return json.dumps(data)
def _clean_fields(self):
"""
First check the edit_mode then set required flag, and validators, for
the edit field corresponding to the chosen edit_mode
"""
self.edit_mode_value = None
try:
# get the value of the edit_mode field
edit_mode_field = self.fields['edit_mode']
self.edit_mode_value = edit_mode_field.widget.value_from_datadict(
self.data, self.files, self.add_prefix('edit_mode'))
self.edit_mode_value = edit_mode_field.clean(self.edit_mode_value)
# adapt required attribute and validators of other fields
if self.edit_mode_value == LABELTYPE_EDITMODE.REGEX:
self.fields['regex'].required = True
elif self.edit_mode_value == LABELTYPE_EDITMODE.FORMAT:
self.fields['format_string'].required = True
self.fields['format_string'].validators = self.format_string_validators
elif self.edit_mode_value == LABELTYPE_EDITMODE.LIST:
self.fields['labels_list'].required = True
except Exception:
pass
# then finally launch the real clean_field process
super(LabelTypeEditForm, self)._clean_fields()
def clean(self):
"""
Create final regex based on input values for other modes
"""
data = super(LabelTypeEditForm, self).clean()
if data['is_metric']:
metric_error = False
if self.edit_mode_value == LABELTYPE_EDITMODE.LIST:
data['is_metric'] = False
elif self.edit_mode_value == LABELTYPE_EDITMODE.FORMAT:
format_string = data.get('format_string')
metric_error = bool(format_string) and not('{order}' in format_string or
'{ordered-label}' in format_string)
elif self.edit_mode_value == LABELTYPE_EDITMODE.REGEX:
regex = data.get('regex')
metric_error = bool(regex) and '(?P<order>\d+)' not in regex
if metric_error:
if not self._errors:
self.add_error('is_metric', u'You can only set a group as metric if it has an order')
return data
if self.edit_mode_value == LABELTYPE_EDITMODE.FORMAT and data.get('format_string'):
data['regex'] = LabelType.regex_from_format(data['format_string'])
if self.edit_mode_value == LABELTYPE_EDITMODE.LIST and data.get('labels_list'):
data['regex'] = LabelType.regex_from_list(data['labels_list'])
return data
def save(self, *args, **kwargs):
"""
Reset the edit_details json field that keep edit
"""
self.instance.edit_details = {}
if self.instance.edit_mode == LABELTYPE_EDITMODE.FORMAT:
self.instance.edit_details = {'format_string': self.cleaned_data['format_string']}
elif self.instance.edit_mode == LABELTYPE_EDITMODE.LIST:
labels = ','.join(sorted(self.cleaned_data['labels_list'].split(','), key=unicode.lower))
self.instance.edit_details = {'labels_list': labels}
return super(LabelTypeEditForm, self).save(*args, **kwargs)
class LabelTypePreviewForm(LabelTypeEditForm):
def clean(self):
# do not do any unicity check
cleaned_data = super(LabelTypePreviewForm, self).clean()
self._validate_unique = False
return cleaned_data
class LabelEditForm(LinkedToRepositoryFormMixin):
color_validator = validators.RegexValidator(
re.compile('^[0-9a-f]{6}$', flags=re.IGNORECASE),
'Must be a valid hex color (without the #)',
'invalid-color'
)
label_name_validator = validators.RegexValidator(
re.compile('^[^\,]+$'),
'Must not contain a comma (",")',
'comma-refused'
)
class Meta:
model = Label
fields = ('name', 'color', )
def __init__(self, *args, **kwargs):
super(LabelEditForm, self).__init__(*args, **kwargs)
if 'name' in self.fields:
self.fields['name'].validators = [self.label_name_validator]
self.fields['color'].validators = [self.color_validator]
def clean_name(self):
return (self.cleaned_data.get('name') or '').strip()
def save(self, commit=True):
"""
Set the github status
"""
if self.instance.pk:
self.instance.github_status = GITHUB_STATUS_CHOICES.WAITING_UPDATE
else:
self.instance.github_status = GITHUB_STATUS_CHOICES.WAITING_CREATE
return super(LabelEditForm, self).save(commit)
class TypedLabelEditForm(LabelEditForm):
class Meta(LabelEditForm.Meta):
fields = ('label_type', 'order', 'typed_name', 'color', 'name')
def __init__(self, *args, **kwargs):
super(TypedLabelEditForm, self).__init__(*args, **kwargs)
self.fields['typed_name'].validators = [self.label_name_validator]
def clean_label_type(self):
label_type = self.cleaned_data.get('label_type')
if (not label_type
or label_type.repository_id != self.repository.id
or self.instance and self.instance.pk and label_type.id != self.instance.label_type_id
):
raise forms.ValidationError('Impossible to save this label')
return label_type
def clean_typed_name(self):
return (self.cleaned_data.get('typed_name') or '').strip()
def clean(self):
cleaned_data = super(TypedLabelEditForm, self).clean()
label_type = cleaned_data.get('label_type')
typed_name = cleaned_data.get('typed_name')
if label_type and typed_name : # if not, we are in error mode
if label_type.edit_mode == LABELTYPE_EDITMODE.REGEX:
if not label_type.match(typed_name):
raise forms.ValidationError('This label does not match the regex')
if label_type.edit_mode == LABELTYPE_EDITMODE.FORMAT:
# try to get the full label name, will raise ValidationError if problem
cleaned_data['name'] = label_type.create_from_format(
typed_name,
cleaned_data.get('order')
)
else: # label_type.edit_mode == LABELTYPE_EDITMODE.LIST:
cleaned_data['name'] = typed_name
# remember the name if changed to remove it from the list
if self.instance and self.instance.name != cleaned_data['name']:
self._old_label_name = self.instance.name
return cleaned_data
@property
def errors(self):
errors = super(LabelEditForm, self).errors
if errors:
if 'typed_name' in errors:
if 'name' not in errors:
errors['name'] = errors['typed_name']
del errors['typed_name']
return errors
def save(self, commit=True):
label = super(TypedLabelEditForm, self).save(commit=commit)
# manage the list of labels if we have this kind of label-type
label_type = self.cleaned_data['label_type']
if label_type.edit_mode == LABELTYPE_EDITMODE.LIST:
labels_list = label_type.edit_details['labels_list'].split(u',')
type_updated = False
# if the label changed its name, we remove the old one from the list
if hasattr(self, '_old_label_name') and self._old_label_name in labels_list:
labels_list.remove(self._old_label_name)
type_updated = True
# if the label is new in the type list, add it
if label.name not in labels_list:
labels_list.append(label.name)
type_updated = True
if type_updated:
label_type.edit_details['labels_list'] = u','.join(labels_list)
label_type.regex = label_type.regex_from_list(labels_list)
label_type.save()
return label
class DueOnWidget(EnclosedInput, forms.DateInput):
def __init__(self, attrs=None):
if not attrs:
attrs = {}
if 'placeholder' not in attrs:
attrs['placeholder'] = 'yyyy-mm-dd'
if 'maxlength' not in attrs:
attrs['maxlength'] = 10
super(DueOnWidget, self).__init__(
attrs=attrs,
input_type='text',
prepend='icon-th',
append='icon-remove',
addons_titles={
'prepend': 'Click to open a datepicker',
'append': 'Click to clear the due-on date',
},
format='%Y-%m-%d',
parent_classes=['date', 'due_on'],
)
class MilestoneEditForm(LinkedToRepositoryFormMixin):
open = forms.BooleanField(required=False)
class Meta:
model = Milestone
fields = ('title', 'description', 'due_on', 'open')
def __init__(self, *args, **kwargs):
# fill the "open" field
instance = kwargs.get('instance')
if not instance or not instance.state or instance.state == 'open':
if 'initial' not in kwargs:
kwargs['initial'] = {}
kwargs['initial']['open'] = True
super(MilestoneEditForm, self).__init__(*args, **kwargs)
self.fields['title'].widget = forms.TextInput()
self.fields['due_on'].widget = DueOnWidget()
def save(self, commit=True):
"""
Set the github status, and the state
"""
self.instance.state = 'open' if self.cleaned_data['open'] else 'closed'
if self.instance.pk:
self.instance.github_status = GITHUB_STATUS_CHOICES.WAITING_UPDATE
else:
self.instance.github_status = GITHUB_STATUS_CHOICES.WAITING_CREATE
return super(MilestoneEditForm, self).save(commit)
class MilestoneCreateForm(LinkedToUserFormMixin, MilestoneEditForm):
user_attribute = 'creator'
class HookToggleForm(forms.Form):
hook_set = forms.BooleanField(required=False, widget=forms.HiddenInput)
class MainMetricForm(forms.ModelForm):
class Meta:
model = Repository
fields = ['main_metric']
def __init__(self, *args, **kwargs):
super(MainMetricForm, self).__init__(*args, **kwargs)
self.fields['main_metric'].queryset = self.instance.label_types.filter(
is_metric=True, edit_mode__in=LABELTYPE_EDITMODE.MAYBE_METRIC.values.keys())
self.fields['main_metric'].empty_label = '--- No main metric ---'
def save(self, commit=True):
self.instance.save(update_fields=['main_metric'])
def clean_main_metric(self):
label_type = self.cleaned_data.get('main_metric')
if label_type:
if not label_type.can_be_metric():
raise forms.ValidationError('The group "%s" cannot be used as a metric' % label_type)
return label_type
|
"""CRUD operations."""
from model import db, User, Comment, FavRecycler, connect_to_db
import materials
def create_user(name, email, password):
"""Create and return a new user."""
user = User(name=name, email=email, password=password)
db.session.add(user)
db.session.commit()
return user
def get_user_by_id(user_id):
"""Return a user by primary key."""
return User.query.filter(User.user_id == user_id).first()
def get_user_by_email(email):
"""Return a user by email."""
return User.query.filter(User.email == email).first()
def get_favorited_recyclers(user_id):
"""Return all favorited recyclers and comments."""
return FavRecycler.query.filter(
FavRecycler.user_id == user_id).all()
def is_recycler_favorited(location_id, user_id):
"""Checks if recycler has been favorited or not."""
if FavRecycler.query.filter(
FavRecycler.location_id == location_id,
FavRecycler.user_id == user_id).first():
return True
return False
def get_favorited_location_ids_list(user_id):
"""Returns location IDs of favorited recyclers."""
location_ids = FavRecycler.query.filter(FavRecycler.user_id == user_id).all()
return [location.location_id for location in location_ids]
def user_id_if_match(email, password):
"""Returns user_id of user if the login is successful."""
if get_user_by_email(email) and get_user_by_email(email).password == password:
return get_user_by_email(email).user_id
def create_comment(user_id, name, location_id, comment):
"""Create and return a new comment."""
comment = Comment(user_id=user_id, name=name, location_id=location_id, comment=comment)
db.session.add(comment)
db.session.commit()
return comment
def get_recycler_comments(location_id):
"""Return all comments of a recycler."""
return Comment.query.filter(Comment.location_id==location_id).all()
def fav_a_recycler(user_id, location_id):
"""Return a new favorited recycler."""
fav_recycler = FavRecycler(user_id=user_id, location_id=location_id)
db.session.add(fav_recycler)
db.session.commit()
return fav_recycler
if __name__ == '__main__':
from server import app
connect_to_db(app)
|
from string import ascii_letters, digits
from random import choice
def random_str(str_len=32):
base = ascii_letters + digits
str = ''
for i in range(str_len):
str += choice(base)
return str
|
#!/usr/bin/env python
import cgi
form = cgi.FieldStorage()
import sqlite3
db = sqlite3.connect('todo.db')
id = form.getvalue('id')
sql = "delete from story where id = %s" % id
cursor = db.cursor()
cursor.execute(sql)
db.commit()
db.close()
print "Content-type: text/html\n"
print "<meta http-equiv='refresh' content='0;URL=http://localhost:8000/cgi-bin/list.py'>"
|
class Solution:
def sumOfLeftLeaves(self, root: TreeNode) -> int:
self.x = 0
def traverse(root):
if not root:
return
if root.left and not root.left.left and not root.left.right:
self.x += root.left.val
traverse(root.left)
traverse(root.right)
traverse(root)
return(self.x) |
#apollonean_gasket.py
"""
-----------------------------------------------------------------------------------------------------
Generates a visualization of a region of the Apollonean Gasket by printing spheres in space in Maya
-----------------------------------------------------------------------------------------------------
One function named run()
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
Script modified by Vlasis Gogousis [vgogousis@gmail.com]
MA3D o 2017
Original script source:
FB36, 2012. Active State [online].
Available from: http://code.activestate.com/recipes/578016-apollonian-gasket-fractal-using-ifs/
[Accessed 13 February 2017]
"""
#******** IMPORT MODULES ********#
import maya.cmds as cmds
import random, math, numpy
#******** RUN APOLLONEAN GASKET REGION VISUALIZATION ********#
def run(max_Iteration, size):
"""
Generates a visualization of a region of the Apollonean Gasket by printing spheres in space in Maya
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
"""
# Initialize scene
cmds.file(new = True, force = True)
cmds.lookThru( 'top' )
cmds.grid(toggle=False)
# Setup window for progress bar
window = cmds.window()
cmds.columnLayout()
progressControl = cmds.progressBar(maxValue=max_Iteration, width=300)
cmds.showWindow(window)
# Initialize fractal variables
s = math.sqrt(3.0)
def f(z):
return 3.0 / (1.0 + s - z) - (1.0 + s) / (2.0 + s)
ifs = ["f(z)", "f(z) * complex(-1.0, s) / 2.0", "f(z) * complex(-1.0, -s) / 2.0"]
xa = -0.6
xb = 0.9
ya = -0.75
yb = 0.75
z = complex(0.0, 0.0)
# Create shader to paint spheres with
shader=cmds.shadingNode("blinn",asShader=True, name = "shader" + str(1))
attr = shader + ".color"
cmds.setAttr (attr, 1,1,1)
# Draw max_Iteration number of spheres
for i in range(max_Iteration):
# Compute z and kx, ky
z = eval(ifs[random.randint(0, 2)])
kx = int((z.real - xa) / (xb - xa) * (size - 1))
ky = int((z.imag - ya) / (yb - ya) * (size - 1))
# Update progress bar
cmds.progressBar(progressControl, edit=True, step=1)
# If kx and kz are within our drawing canvas draw sphere:
if kx >=0 and kx < size and ky >= 0 and ky < size:
cmds.polySphere(n="sp"+str(i))
cmds.move(kx,0,ky)
cmds.scale(0.5,0.5,0.5)
cmds.hyperShade(assign="shader"+str(1))
# Update viewport
cmds.viewFit( 'top', all=True )
cmds.dolly( 'top', os=1.5 )
cmds.refresh()
# Update progress bar and viewport
cmds.progressBar(progressControl, edit=True, step=1)
cmds.refresh()
cmds.toggleWindowVisibility(window) |
import gen
from language import *
from macropy.experimental.pattern import macros, _matching, switch, patterns, LiteralMatcher, TupleMatcher, PatternMatchException, NameMatcher, ListMatcher, PatternVarConflict, ClassMatcher, WildcardMatcher
from itertools import *
def compile2SHA(ha):
return gen.preprocess(ha)
def getShaEdges(sha):
with patterns:
Ha(n, ss, sss, e, gvs, igvs) << sha
rets = [None]*len(e)
for i, s in enumerate(e):
with patterns:
Edge(l1, l2, guard, updateList, eventList) << s
rets[i] = Edge(n+'_'+l1, n+'_'+l2,
guard, updateList, eventList)
return rets
def getShaLocations(sha):
with patterns:
Ha(n, ss, sss, e, gvs, igvs) << sha
rets = [None]*len(ss)
for i, s in enumerate(ss):
with patterns:
Loc(name, odeList, combinatorList, invariant) << s
rets[i] = Loc(n+'_'+name, odeList, combinatorList, invariant)
return rets
def compose(haList):
# First generate the SHA
shas = ([compile2SHA(r) for r in haList])
name = []
newgvs = []
newigvs = []
nsss = []
for sha in shas:
with patterns:
Ha(n, ss, sss, e, gvs, igvs) << sha
newgvs += gvs
newigvs += igvs
name += [n]
with patterns:
Loc(lname, odeList, combinatorList, invariant) << sss
nsss += [n+'_'+lname]
ha_name = '_'.join(name)
init_loc_name = '_'+'_'.join(nsss)
init_loc = None
# get states
states = [getShaLocations(s) for s in shas]
pset = list(product(*states))
newLoc = [None]*len(pset)
newLocNames = [None]*len(pset)
for ii, i in enumerate(pset):
newLocname = ''
newLocodeList = []
newLoccList = []
newLocinvsdict = {}
for loc in i:
with patterns:
Loc(name, odeList, cList, invs) << loc
newLocname += '_'+name
newLocodeList += odeList
newLoccList += cList
for k in invs.iterkeys():
if k in newLocinvsdict.keys():
if invs[k] != newLocinvsdict[k]:
for jk in invs[k]:
newLocinvsdict[k].append(jk)
else:
newLocinvsdict.update(invs)
newLocNames[ii] = newLocname
newLoc[ii] = Loc(newLocname, newLocodeList,
newLoccList, newLocinvsdict)
if newLocname == init_loc_name:
init_loc = newLoc[ii]
# Now build the edges
edges = [getShaEdges(s) for s in shas]
pset = list(product(*edges))
newEdges = [None]*len(pset)
for ii, i in enumerate(pset):
nl1 = ''
nl2 = ''
nguards = {}
nupdateList = []
neventList = []
for edge in i:
with patterns:
Edge(l1, l2, guards, updateList, eventList) << edge
nl1 += '_'+l1
nl2 += '_'+l2
nupdateList += updateList
neventList += eventList
# Dictionaries need to be handeled correctly
for k in guards.iterkeys():
if k in nguards.keys() and guards[k] != nguards[k]:
for jk in guards[k]:
nguards[k].append(jk)
else:
nguards.update(guards)
newEdges[ii] = Edge(nl1, nl2, nguards, nupdateList, neventList)
# Making the xtra edges
names = []
same_edge_names = []
for edges in pset:
for edge in edges:
with patterns:
Edge(l1, l2, u, r) << edge
names += [l1, l2]
same_edge_names += [(l1, l2)]
names = set(names)
same_edge_names = set(same_edge_names)
name_permutations = permutations(names, len(pset[0]))
res = []
for i in name_permutations:
there = False
for (x, y) in same_edge_names:
if x in i and y in i and x != y:
there = True
break
if not there:
res += [i]
# Now make the init_state and dest_state
xtraEdges = []
for edges in pset:
counter = 0
for edge in edges:
if counter > 0:
break
counter += 1
oedges = filter(lambda x: x != edge, edges)
with patterns:
Edge(l1, l2, myg, myu, mye) << edge
init_name = [l1]
dest_name = [l2]
all_others = [l1, l2]
for oe in oedges:
with patterns:
Edge(on1, on2, ou, ur) << oe
init_name += [on1]
dest_name += [on2]
all_others += [on1, on2]
# Get all the permutations that contain me or my
# destination
pps = []
for pp in res:
there = True
for p in pp:
if p not in all_others:
there = False
break
if there:
pps += [pp]
# Remove all permutation elements that are the same as
# the init_name
pps = filter(lambda x: sorted(x) != sorted(tuple(init_name)),
pps)
pps = filter(lambda x: sorted(x) != sorted(tuple(dest_name)),
pps)
# Finally remove all the names that are not in the
# states! They are just extras!!
pps = filter(lambda x: '_'+'_'.join(x) in newLocNames, pps)
# Now we can finally build these xtraEdges
nl1 = '_' + '_'.join(init_name)
for pp in pps:
nl2 = ''
nguards = {}
nupdateList = []
neventList = []
for p in pp:
nl2 += '_'+p
# This means the combined destination state
# contains edges destination state
if p == l2:
nupdateList += myu
# These are the updates from other
# locations!
for oe in oedges:
with patterns:
Edge(ol1, ol2, og, ou, oev) << oe
for uu in ou:
with switch(uu):
if Update.Update2(x):
nupdateList += [uu]
neventList += mye
for k in myg.iterkeys():
if k in nguards.keys():
if myg[k] != nguards[k]:
for jm in myg[k]:
nguards[k].append(jm)
else:
nguards.update(myg)
else:
for oe in oedges:
with patterns:
Edge(ol1, ol2, oguards, oupdateList,
oeventList) << oe
if ol2 == p:
nupdateList += oupdateList
neventList += oeventList
for k in oguards.iterkeys():
if k in nguards.keys():
if oguards[k] != nguards[k]:
for jo in oguards:
nguards[k].append(jo)
else:
nguards.update(oguards)
else:
for uu in oupdateList:
with switch(uu):
if Update.Update2(x):
nupdateList += [uu]
# These are the edges updates!
for uu in myu:
with switch(uu):
if Update.Update2(x):
nupdateList += [uu]
xtraEdges += [Edge(nl1, nl2,
nguards, nupdateList, neventList)]
# Now make the name of the Ha to be returned
return Ha(ha_name, newLoc, init_loc,
newEdges+xtraEdges, newgvs, newigvs)
|
#coding=utf-8
"""
Created on Tue Apr 9 12:33:43 2019
@author: murillon2003_00
"""
import utils
import operations
import bank_account_variables as acc
from file import load_bank_data
def main():
load_bank_data()
print(acc.money_slips)
print(acc.accounts_list)
utils.header()
account_auth = operations.auth_account()
if account_auth:
utils.clear()
utils.header()
option_typed = operations.get_menu_options_typed(account_auth)
operations.do_operation(option_typed, account_auth)
else:
print("Conta inválida!")
if __name__ == '__main__':
while True:
main()
input('Pressione <ENTER> para continuar...')
utils.clear()
|
import pytest
from model_objects import Product, SpecialOfferType, ProductUnit
from shopping_cart import ShoppingCart
from teller import Teller
from tests.fake_catalog import FakeCatalog
def test_ten_percent_discount():
catalog = FakeCatalog()
toothbrush = Product("toothbrush", ProductUnit.EACH)
catalog.add_product(toothbrush, 0.99)
apples = Product("apples", ProductUnit.KILO)
catalog.add_product(apples, 1.99)
cart = ShoppingCart()
cart.add_item_quantity(apples, 2.5)
teller = Teller(catalog)
teller.add_special_offer(SpecialOfferType.TEN_PERCENT_DISCOUNT, toothbrush, 10.0)
receipt = teller.checks_out_articles_from(cart)
assert 4.975 == pytest.approx(receipt.total_price(), 0.01)
|
## Requires Python v3 and pandas (pip install pandas)
## This script takes the newcastle membership csv and attempts
## to reduce the file size as much as possible through aggregation and lookups
## Two lookup files to provide library names and dates are also created.
import csv
import os
import re
from datetime import datetime
import pandas
MEMBERDATA = '..\\data\\dashboard_newcastle_members.csv'
def read_member_data():
member_data_frame = pandas.DataFrame(
pandas.read_csv(open(os.path.join(os.path.dirname(__file__), MEMBERDATA), 'r')), index=None)
return member_data_frame
def run():
members = read_member_data()
postcodes = members['Postcode'].unique()
libraries = members['Library Registered At'].unique()
dates_added = members['Date Added'].unique()
times_added = members['Date Added'].unique()
run() |
# coding:utf-8
import abc
import iconfig
class Command(object):
"""
指令基类
"""
sub_cmd_list = None
def __init__(self, cmd, args):
self.cmd = cmd
self.args = args
@abc.abstractmethod
def execute(self):
raise Exception(u"指令尚未实现")
@staticmethod
def real_cmd(cmd, raise_err=True, valid=True, top_cmd=None):
"""
valid=True时,如果不指定top_cmd,则认为cmd是一级指令,否则认为是top_cmd的二级指令
:param str top_cmd:
:param cmd:
:param raise_err:
:param valid:
:return:
"""
config = iconfig.read_config('system')
alias = config['alias']
cmd = alias[cmd] if alias.has_key(cmd) else cmd
if valid:
error = False
if top_cmd:
top_cmd = alias[top_cmd] if top_cmd in alias else top_cmd
cls = config['cmd_cls'][top_cmd] if top_cmd in config['cmd_cls'] else None
if not cls:
error = True
else:
if cmd not in dir(eval('%s.%s' % (cls.lower(), cls))):
error = True
elif not config['cmd_cls'].has_key(cmd):
error = True
if error:
if raise_err:
raise Exception(u'无效指令')
else:
return None
return cmd
def __str__(self):
return self.cmd + ' ' + ' '.join(self.args)
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'test_skryaga.views.home', name='home'),
# url(r'^test_skryaga/', include('test_skryaga.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('test_skryaga.contact.urls')),
url(r'^main/', include('test_skryaga.main.urls')),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
|
import logging
import os.path
import numpy as np
from poap.controller import BasicWorkerThread, ThreadController
from pySOT.experimental_design import SymmetricLatinHypercube
from pySOT.optimization_problems import Ackley
from pySOT.strategy import SRBFStrategy
from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant
def test_example_simple():
if not os.path.exists("./logfiles"):
os.makedirs("logfiles")
if os.path.exists("./logfiles/example_simple.log"):
os.remove("./logfiles/example_simple.log")
logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO)
num_threads = 2
max_evals = 50
ackley = Ackley(dim=10)
rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim))
slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1))
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = SRBFStrategy(
max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True
)
# Launch the threads and give them access to the objective function
for _ in range(num_threads):
worker = BasicWorkerThread(controller, ackley.eval)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print("Best value found: {0}".format(result.value))
print(
"Best solution found: {0}\n".format(
np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True)
)
)
if __name__ == "__main__":
test_example_simple() |
DEAD_VALUE = '.'
LIVE_VALUE = '*'
class Cell(object):
def __init__(self, x, y, value):
self.x = x
self.y = y
self.value = value
self.nextValue = None
def live(self, nextloop=False):
if nextloop:
self.nextValue = LIVE_VALUE
else:
self.value = LIVE_VALUE
def die(self, nextloop=False):
if nextloop:
self.nextValue = DEAD_VALUE
else:
self.value = DEAD_VALUE
def isdie(self):
return self.value == DEAD_VALUE
def islive(self):
return self.value == LIVE_VALUE
def update(self):
if self.nextValue is not None:
self.value = self.nextValue
self.nextValue = None
def __str__(self):
return self.value
class Game(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.grid = {}
for x in range(self.width):
for y in range(self.height):
self.grid[(x, y)] = Cell(x, y, DEAD_VALUE)
def get(self, x, y):
return self.grid.get((x, y), None)
def update(self):
# todo :refactor to common
for pos, cell in self.grid.iteritems():
count = self.count_neighbout(*pos)
if count < 2 or count > 3:
cell.die(nextloop=True)
elif count == 2:
pass
elif count == 3 and cell.isdie():
cell.live(nextloop=True)
for pose, cell in self.grid.iteritems():
cell.update()
def count_neighbout(self, x, y):
count = 0
if self.get(x - 1, y - 1) is not None and self.get(x - 1, y - 1).islive():
count += 1
if self.get(x, y - 1) is not None and self.get(x, y - 1).islive():
count += 1
if self.get(x + 1, y - 1) is not None and self.get(x + 1, y - 1).islive():
count += 1
if self.get(x - 1, y) is not None and self.get(x - 1, y).islive():
count += 1
if self.get(x + 1, y) is not None and self.get(x + 1, y).islive():
count += 1
if self.get(x - 1, y + 1) is not None and self.get(x - 1, y + 1).islive():
count += 1
if self.get(x, y + 1) is not None and self.get(x, y + 1).islive():
count += 1
if self.get(x + 1, y + 1) is not None and self.get(x + 1, y + 1).islive():
count += 1
return count
def draw(self):
import sys
for y in range(1, self.height):
for x in range(1, self.width):
sys.stdout.write(str(self.get(x, y)))
sys.stdout.write('\n')
pass
def main():
game = Game(8, 8)
import random
for i in range(10):
game.get(random.randrange(1, 8), random.randrange(1, 8)).live()
while True:
game.draw()
c = raw_input()
game.update()
if __name__ == '__main__':
main()
|
"""
Contains business logic tasks for this order of the task factory.
Each task should be wrapped inside a task closure that accepts a **kargs parameter
used for task initialization.
"""
def make_task_dict():
"""
Returns a task dictionary containing all tasks in this module.
"""
task_dict = {}
return task_dict
def get_task(task_name, params):
"""
Accesses the task dictionary, returning the task corresponding to a given key,
wrapped in a closure containing the task and its arguments.
"""
tasks = make_task_dict()
return tasks[task_name](params)
|
import os
import cv2
import dlib
import numpy as np
import matplotlib as mat
image = cv2.imread("Test/finger.png")
#contrast and brightness to 0.8 and 25
brightness = 25
contrast = 0.8
img = np.int16(image)
img = img * (contrast/127+1) - contrast + brightness
img = np.clip(img, 0, 255)
img = np.uint8(img)
#Gray
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cImg = clahe.apply(gray_image)
equ = cv2.equalizeHist(cImg)
#Normalized
norm_image = cv2.normalize(equ, None, 0, 255 , cv2.NORM_MINMAX)
#adaptive (gaussian c) threshold for block size of 15 and constant 2
thresholdImg = cv2.adaptiveThreshold(norm_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 2)
#Gaussian Blur
thImg = cv2.blur(thresholdImg, (5,5))
thImg2 = cv2.medianBlur(thImg,5)
# 閾值
smoothImg = cv2.adaptiveThreshold(thImg2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 2)
smoothImg2 = cv2.adaptiveThreshold(thImg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 2)
cv2.imwrite('gray_image.png',gray_image)
cv2.imshow('og_image',img)
cv2.imshow('gray_image',smoothImg2)
cv2.imshow('test',smoothImg)
cv2.waitKey(0) # Waits forever for user to press any key
cv2.destroyAllWindows()
|
from Books import Books
from Users import Users
from UserController import UserController
from BookController import BookController
from LoanController import LoanController
from Loans import Loans
from Database import Database
print("Welcome to the library\n")
database = Database()
books = Books(database)
users = Users(database)
loans = Loans(books, users, database)
userController = UserController(books, users, loans)
bookController = BookController(books, users, loans)
loanController = LoanController(books, users, loans)
def top_menu():
print("Option 1: List all users")
print("Option 2: List all books")
print("Option 3: Search for a user")
print("Option 4: Search for a book")
print("Option 5: Borrow a book")
print("Option 6: Return a book")
print("Option 7: Check my loans")
print("Option 8: Add a user")
print("Option 9: Remove a user")
print("Option 10: Add a book")
print("Option 11: Remove a book")
print("Option 12: List books on loan")
print("Option 13: List books not on loan")
print("Option 14: List users with loans")
choice = input("Select an option: ")
if choice == "1":
userController.print_users()
elif choice == "2":
bookController.print_books()
elif choice == "3":
search_user_menu()
elif choice == "4":
search_book_menu()
elif choice == "5":
borrow_menu()
elif choice == "6":
return_menu()
elif choice == "7":
check_loans()
elif choice == "8":
add_a_user()
elif choice == "9":
remove_a_user()
elif choice == "10":
add_a_book()
elif choice == "11":
remove_a_book()
elif choice == "12":
loanController.print_books_loaned()
elif choice == "13":
loanController.print_books_not_loaned()
elif choice == "14":
loanController.print_users_with_loans()
def search_user_menu():
print("Option 1: Search by first name")
print("Option 2: Search by last name")
print("Option 3: Search by code")
choice = input("Select an option: ")
if choice == "1":
first_name = input("Please enter a first name: ")
userController.usersearch(1, first_name)
elif choice == "2":
last_name = input("Please enter a last name: ")
userController.usersearch(2, last_name)
elif choice == "3":
code = int(input("Please enter a code: "))
userController.usersearch(3, code)
def search_book_menu():
print("Option 1: Search by title")
print("Option 2: Search by author")
print("Option 3: Search by code")
choice = input("Select an option: ")
if choice == "1":
title = input("Please enter a book title: ")
bookController.booksearch(1, title)
elif choice == "2":
author = input("Please enter an author: ")
bookController.booksearch(2, author)
elif choice == "3":
code = int(input("Please enter a code: "))
bookController.booksearch(3, code)
def borrow_menu():
user = int(input("Please enter your user code: "))
book = int(input("Please enter the book code of the book you want to borrow: "))
loanController.borrow(book, user)
def return_menu():
book = int(input("Please enter the book code of the book you want to return: "))
loanController.return_book(book)
def check_loans():
user = int(input("Please enter your user code: "))
loanController.user_loans(user)
def add_a_user():
first_name = input("Please enter a first name: ")
last_name = input("Please enter a last name: ")
userController.add_user(first_name, last_name)
def remove_a_user():
user = int(input("Please enter the user code of the user you want to remove: "))
userController.remove_user(user)
def add_a_book():
title = input("Please enter the title: ")
author = input("Please enter the author: ")
bookController.add_book(title, author)
def remove_a_book():
book = int(input("Please enter the book code of the book you want to remove: "))
bookController.remove_book(book)
while True:
top_menu() |
'''Кортеж - то же самое, что и список, только в
круглых скобках и не может изменяться'''
my_tuple = ('param1', 'param2', 'param3', 'param4')
# ^ - это кортеж
try:
my_tuple[1] = 'parametr'
print(my_tuple)
except:
print('Заменять элементы нельзя!')
my_tuple = ('new_param1', 'new_param2', 'new_param3', 'new_param4')
print('Я заменил кортеж!', my_tuple) |
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse, JsonResponse
from django.template.loader import render_to_string
from .models import Rate
from .forms import PostRateForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from dateutil.relativedelta import relativedelta
import dateutil.parser
from account.models import MyUser, RateReader
from countrycity.models import Liner
from django.contrib import messages
from account.models import MyUserProfile
from django.utils import timezone
import re
def rateSearchedList(request):
if request.user.is_authenticated:
MOBILE_AGENT_RE = re.compile(r'.*(iphone|mobile|androidtouch)',re.IGNORECASE)
if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):
is_mobile = True
else:
is_mobile = False
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance)
rates = rates | Rate.objects.filter(inputperson=request.user)
if request.GET.get('pk'): # 수정 시도 후 취소한 경우
pk = request.GET.get('pk')
try:
modifiedrate = rates.get(pk=pk)
except:
modifiedrate = Rate.objects.none()
html = render_to_string('rateform_modify_ajax_done.html', {'modifiedrate': modifiedrate, 'is_mobile':is_mobile},)
result = {'html': html}
return JsonResponse(result)
if request.GET.getlist('inputperson'):
inputpersons = request.GET.getlist('inputperson')
inputperson_list = []
for inputperson in inputpersons:
x = MyUser.objects.filter(profile__profile_name=inputperson)
for y in x:
inputperson_list.append(y)
else:
inputperson_list = []
account_list = request.GET.getlist('account')
if request.GET.getlist('liner'):
liners = request.GET.getlist('liner')
liner_list = []
for liner in liners:
x = Liner.objects.filter(label=liner).values_list('name', flat=True)
for y in x:
liner_list.append(y)
else:
liner_list = []
pol_list = request.GET.getlist('pol')
pod_list = request.GET.getlist('pod')
sf = request.GET.get('search_from')
st = request.GET.get('search_to')
# 참조 전 최초 선언
searchvalue_ip = []
searchvalue_ac = []
searchvalue_ln = []
searchvalue_pl = []
searchvalue_pd = []
searchvalue_st = []
filter_args = {}
if inputperson_list != [] and inputperson_list != ['-']:
filter_args['inputperson__in'] = inputperson_list
searchvalue_ip = request.GET.getlist('inputperson')
if account_list != [] and account_list != ['-']:
filter_args['account__in'] = account_list
searchvalue_ac = request.GET.getlist('account')
if liner_list != [] and liner_list != ['-']:
filter_args['liner__in'] = liner_list
searchvalue_ln = request.GET.getlist('liner')
if pol_list != [] and pol_list != ['-']:
filter_args['pol__in'] = pol_list
searchvalue_pl = request.GET.getlist('pol')
if pod_list != [] and pod_list != ['-']:
filter_args['pod__in'] = pod_list
searchvalue_pd = request.GET.getlist('pod')
if sf:
searchvalue_sf = dateutil.parser.parse(sf).date()
filter_args['effectiveDate__gte'] = searchvalue_sf
else:
searchvalue_sf = timezone.now().replace(day=1) + relativedelta(months=-1) # 전달 1일
filter_args['effectiveDate__gte'] = searchvalue_sf
if st:
searchvalue_st = dateutil.parser.parse(st).date()
filter_args['effectiveDate__lte'] = searchvalue_st
# else:
# searchvalue_st = timezone.now().replace(day=1) + relativedelta(months=+1, days=-1) # 앞달 말일
filtered_ordered_rates = rates.filter(**filter_args).order_by('-id').exclude(deleted=1)
loginuser = request.user
page = request.GET.get('page', 1)
paginator = Paginator(filtered_ordered_rates, 20)
try:
rates_paginated = paginator.page(page)
except PageNotAnInteger:
rates_paginated = paginator.page(1)
except EmptyPage:
rates_paginated = paginator.page(paginator.num_pages)
try:
profile = MyUserProfile.objects.get(owner=request.user)
except:
profile = False
inputperson_unique = filtered_ordered_rates.order_by('inputperson__profile__profile_name').values('inputperson__profile__profile_name').distinct()
account_unique = filtered_ordered_rates.order_by('account').values('account').distinct()
liner_filtered = filtered_ordered_rates.order_by('liner').values('liner').distinct()
liner_args = {}
liner_temp = []
for liner in liner_filtered:
liner_temp.append(liner['liner'])
liner_args['name__in'] = liner_temp
liner_unique = Liner.objects.filter(**liner_args).order_by('label').values('label')
pol_unique = filtered_ordered_rates.order_by('pol').values('pol').distinct()
pod_unique = filtered_ordered_rates.order_by('pod').values('pod').distinct()
context = {
'rates_paginated': rates_paginated,
'inputperson_unique': inputperson_unique,
'account_unique': account_unique,
'liner_unique': liner_unique,
'pol_unique': pol_unique,
'pod_unique': pod_unique,
'loginuser': loginuser,
'searchvalue_ip': searchvalue_ip,
'searchvalue_ac': searchvalue_ac,
'searchvalue_ln': searchvalue_ln,
'searchvalue_pl': searchvalue_pl,
'searchvalue_pd': searchvalue_pd,
'searchvalue_sf': searchvalue_sf,
'searchvalue_st': searchvalue_st,
'profile': profile,
'is_mobile': is_mobile,
}
if (not request.GET.get('page') or request.GET.get('page') == '1') and not request.GET.get('handler') == 'search_ajax_wide' and not request.GET.get('handler') == 'search_ajax_narrow':
return render(request, 'searchresult.html', context)
else:
if request.GET.get('handler') == 'search_ajax_narrow':
html = render_to_string('searchresult_more_narrow.html', context)
if int(page) < paginator.num_pages:
next_page = int(page) + 1
else:
next_page = 'last_page'
result = {
'html': html,
'page': next_page,
'searchvalue_ip': searchvalue_ip,
'searchvalue_ac': searchvalue_ac,
'searchvalue_ln': searchvalue_ln,
'searchvalue_pl': searchvalue_pl,
'searchvalue_pd': searchvalue_pd,
'searchvalue_sf': searchvalue_sf,
'searchvalue_st': searchvalue_st,
'is_mobile': is_mobile,
}
return JsonResponse(result)
elif request.GET.get('handler') == 'search_ajax_wide':
html = render_to_string('searchresult_more.html', context)
if int(page) < paginator.num_pages:
next_page = int(page) + 1
else:
next_page = 'last_page'
result = {
'html': html,
'page': next_page,
'searchvalue_ip': searchvalue_ip,
'searchvalue_ac': searchvalue_ac,
'searchvalue_ln': searchvalue_ln,
'searchvalue_pl': searchvalue_pl,
'searchvalue_pd': searchvalue_pd,
'searchvalue_sf': searchvalue_sf,
'searchvalue_st': searchvalue_st,
'is_mobile': is_mobile,
}
return JsonResponse(result)
elif request.GET.get('handler') == 'narrow':
html = render_to_string('searchresult_more_narrow.html', context)
if int(page) < paginator.num_pages:
next_page = int(page) + 1
else:
next_page = 'last_page'
result = {
'html': html,
'page': next_page,
'searchvalue_ip': searchvalue_ip,
'searchvalue_ac': searchvalue_ac,
'searchvalue_ln': searchvalue_ln,
'searchvalue_pl': searchvalue_pl,
'searchvalue_pd': searchvalue_pd,
'searchvalue_sf': searchvalue_sf,
'searchvalue_st': searchvalue_st,
'is_mobile': is_mobile,
}
return JsonResponse(result)
else:
html = render_to_string('searchresult_more.html', context)
if int(page) < paginator.num_pages:
next_page = int(page) + 1
else:
next_page = 'last_page'
result = {
'html':html,
'page':next_page,
'searchvalue_ip':searchvalue_ip,
'searchvalue_ac':searchvalue_ac,
'searchvalue_ln': searchvalue_ln,
'searchvalue_pl': searchvalue_pl,
'searchvalue_pd': searchvalue_pd,
'searchvalue_sf': searchvalue_sf,
'searchvalue_st': searchvalue_st,
'is_mobile': is_mobile,
}
return JsonResponse(result)
else:
return redirect('login')
def rateInput(request):
if request.user.is_authenticated:
MOBILE_AGENT_RE = re.compile(r'.*(iphone|mobile|androidtouch)', re.IGNORECASE)
if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):
is_mobile = True
else:
is_mobile = False
if request.method == "POST":
form = PostRateForm(request.POST)
if form.is_valid():
liner_list = request.POST.getlist('liner')
pol_list = request.POST.getlist('pol')
pod_list = request.POST.getlist('pod')
saved_post_id = []
for liner in liner_list:
for pol in pol_list:
for pod in pod_list:
form = PostRateForm(request.POST)
post = form.save(commit=False)
post.account = post.account.upper()
post.liner = liner
post.pol = pol
post.pod = pod
post.inputperson = request.user
post.deleted = 0
post.save()
saved_post_id.append(post.id)
just_inputed_rates = Rate.objects.filter(pk__in=saved_post_id)
html = render_to_string('rateform_input_ajax_done.html', {'just_inputed_rates': just_inputed_rates, 'is_mobile':is_mobile, })
result = {'html': html, 'message':'운임 저장 완료!',}
return JsonResponse(result)
else:
messages.add_message(request, messages.WARNING, '운임 저장 실패!')
return redirect('rateSearch')
else:
form = PostRateForm()
ed = timezone.now().replace(day=1) + relativedelta(months=+1, days=-1)
od = timezone.now()
ip = request.user.nickname
try:
profile = MyUserProfile.objects.get(owner=request.user)
except:
profile = False
return render(request, 'rateform_input_ajax.html', {'form':form, 'ed':ed, 'od':od, 'ip':ip, 'profile':profile, 'is_mobile':is_mobile,})
else:
return redirect('login')
def rateModify(request, pk, str):
MOBILE_AGENT_RE = re.compile(r'.*(iphone|mobile|androidtouch)', re.IGNORECASE)
if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):
is_mobile = True
else:
is_mobile = False
if request.method == "POST":
previousrate = Rate.objects.get(pk=pk)
form = PostRateForm(request.POST, instance=previousrate)
loginuser = request.user.email
# modify POST 저장 전에 로그인 유저와 작성자가 같은 지 체크
if form.is_valid() and previousrate.inputperson.email == loginuser:
post = form.save(commit=False)
post.account = post.account.upper()
post.save()
modifiedrate = Rate.objects.get(pk=pk)
html = render_to_string('rateform_modify_ajax_done.html', {'modifiedrate':modifiedrate, 'is_mobile': is_mobile,})
result = {'html':html, 'message':'수정 성공!',}
return JsonResponse(result)
else:
modifiedrate = Rate.objects.get(pk=pk)
html = render_to_string('rateform_modify_ajax_done.html', {'modifiedrate':modifiedrate, })
result = {'html':html, 'message':'수정 성공!',}
return JsonResponse(result)
else:
previousrate = Rate.objects.get(pk=pk)
loginuser = request.user.email
if request.user.is_authenticated and previousrate.inputperson.email == loginuser:
previousrate = Rate.objects.get(pk=pk)
form = PostRateForm(instance=previousrate)
ac = getattr(previousrate, 'account')
ln = getattr(previousrate, 'liner')
pl = getattr(previousrate, 'pol')
pd = getattr(previousrate, 'pod')
br20 = getattr(previousrate, 'buying20')
sl20 = getattr(previousrate, 'selling20')
br40 = getattr(previousrate, 'buying40')
sl40 = getattr(previousrate, 'selling40')
br4H = getattr(previousrate, 'buying4H')
sl4H = getattr(previousrate, 'selling4H')
lft = getattr(previousrate, 'loadingFT')
dft = getattr(previousrate, 'dischargingFT')
ed = getattr(previousrate, 'effectiveDate')
od = getattr(previousrate, 'offeredDate')
rmk = getattr(previousrate, 'remark')
ip = request.user
html = render_to_string('rateform_modify_ajax.html', {
'pk' : pk,
'form': form,
'ac': ac,
'ln': ln,
'pl': pl,
'pd': pd,
'br20': br20,
'sl20': sl20,
'br40': br40,
'sl40': sl40,
'br4H': br4H,
'sl4H': sl4H,
'lft': lft,
'dft': dft,
'ed': ed,
'od': od,
'rmk': rmk,
'ip': ip,
'is_mobile': is_mobile,
})
result = {'html':html}
return JsonResponse(result)
# 로그인은 되어있지만, 기존 입력자와 다를 경우
elif request.user.is_authenticated and previousrate.inputperson.email != loginuser:
result = {'not_inputperson': '입력자만 수정할 수 있습니다.', 'message':'입력자만 수정할 수 있습니다.'}
return JsonResponse(result)
else:
return redirect('login')
def rateDelete(request, pk, str):
if request.user.is_authenticated:
currentrate = Rate.objects.get(pk=pk)
loginuser = request.user.email
if currentrate.inputperson.email == loginuser:
form = PostRateForm(instance=currentrate)
post = form.save(commit=False)
post.inputperson = request.user
post.deleted = 1
post.save()
result = {'pk':post.id, 'message':'삭제 완료!'}
return JsonResponse(result)
else:
result = {'message':'입력자만 삭제할 수 있습니다.'}
return JsonResponse(result)
else:
return redirect('login')
def rateDuplicate(request, pk):
if request.user.is_authenticated:
MOBILE_AGENT_RE = re.compile(r'.*(iphone|mobile|androidtouch)', re.IGNORECASE)
if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):
is_mobile = True
else:
is_mobile = False
previousrate = Rate.objects.get(pk=pk)
loginuser = request.user.email
# modify POST 저장 전에 로그인 유저와 작성자가 같은 지 체크
if previousrate.inputperson.email == loginuser:
previousrate.pk = None
previousrate.effectiveDate = timezone.now().replace(day=1) + relativedelta(months=+1, days=-1)
previousrate.offeredDate = timezone.now()
previousrate.save()
just_inputed_rates = Rate.objects.filter(pk=previousrate.id)
html = render_to_string('rateform_input_ajax_done.html', {'just_inputed_rates': just_inputed_rates, 'is_mobile':is_mobile, })
result = {'html': html, 'message':'운임 복제 완료!',}
return JsonResponse(result)
else:
result = {'message':'입력자만 복제할 수 있습니다.'}
return JsonResponse(result)
else:
return redirect('login')
def main(request):
return render(request, 'main.html')
def rates_json(request):
if request.user.is_authenticated:
rates = Rate.objects.all()
id_text = []
results = {}
for rate in rates:
rate_json = {}
rate_json['inputperson'] = rate.inputperson
rate_json['account'] = rate.account
rate_json['liner'] = rate.liner
rate_json['pol'] = rate.pol
rate_json['pod'] = rate.pod
rate_json['buying20'] = rate.buying20
rate_json['selling20'] = rate.selling20
rate_json['buying40'] = rate.buying40
rate_json['selling40'] = rate.selling40
rate_json['buying4H'] = rate.buying4H
rate_json['selling4H'] = rate.selling4H
rate_json['loadingFT'] = rate.loadingFT
rate_json['dischargingFT'] = rate.dischargingFT
rate_json['effectiveDate'] = rate.effectiveDate
rate_json['offeredDate'] = rate.offeredDate
rate_json['recordedDate'] = rate.recordedDate
rate_json['remark'] = rate.remark
rate_json['deleted'] = rate.deleted
id_text.append(rate_json)
results['results'] = id_text
return render(request, 'download.html', {'rates':rates})
else:
return redirect('login')
def rateCharts(request):
if request.user.is_authenticated:
loginuser = request.user
try:
profile = MyUserProfile.objects.get(owner=request.user)
except:
profile = False
MOBILE_AGENT_RE = re.compile(r'.*(iphone|mobile|androidtouch)', re.IGNORECASE)
if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):
is_mobile = True
else:
is_mobile = False
return render(request, 'charts.html', {'is_mobile':is_mobile, 'loginuser':loginuser, 'profile':profile, })
else:
return redirect('login') |
from re import findall
def main():
# Find a small letter surrounded by 3 capital characters on each sides
message = open('3.txt', 'r').read()
pattern = '[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]'
answer = findall(pattern, message)
print(answer)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import requests
import json
import logging
import argparse
import time
import sys
import datetime
def sendTriggerQuery2Server(deviceState = 1):
logger = logging.getLogger(__name__)
basicTriggerFieldName = "which_light_is_turned_on"
payload = {
"triggerFields" : {
basicTriggerFieldName : 1,
}
}
requestUrl = "http://129.79.242.194:8081/ifttt/v1/triggers/light_turned_on.php"
headers = {
"IFTTT-CHANNEL-KEY" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd0",
}
response = requests.post(requestUrl, headers = headers, json=payload)
print(response.text)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
return True
#sync device events to the server side
def sendEvent2Server(deviceId, deviceState):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
payload = {
"userId" : 1,
"deviceId" : deviceId,
"deviceState" : deviceState,
"eventTime" : currTimeStr,
}
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/updateState.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
print(response.text)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
return True
#get actions from the server side
def getActionFromServer(deviceId, deviceState):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
payload = {
"userId" : 1,
"deviceId" : deviceId,
"deviceState" : deviceState,
"eventTime" : currTimeStr,
}
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/updateState.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
print(response.text)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
return True
def getLogFromServer(startTime = None, endTime = None, type = None, startId = 0, numLimit = 10):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
payload = {
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"startId" : startId,
"numLimit" : numLimit,
}
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/getLog.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return None
else:
#print(response.text)
return json.loads(response.text)["data"]
def insertLog2Server(logType, logMessage, logTime = None):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
if logTime is None:
logTime = currTimeStr
payload = {
"logType" : logType,
"logMessage" : logMessage,
"logTime" : logTime,
}
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/addLog.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
print(response.text)
return True
def getActionFromServer(startTime = None, endTime = None, deviceId = None, deviceState = None, startId = 0, numLimit = 10, isFinished = 0):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
payload = {
"startTime" : startTime,
"endTime" : endTime,
"deviceId" : deviceId,
"deviceState" : deviceState,
"startId" : startId,
"numLimit" : numLimit,
"isFinished" : isFinished,
}
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/getAction.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return None
else:
#print(response.text)
actionListResult = json.loads(response.text)
actionList = actionListResult["data"] if "data" in actionListResult else []
return actionList
def updateActionResult2Server(actionIdList, actionStateList):
logger = logging.getLogger(__name__)
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
payload = []
for actionId, actionState in zip(actionIdList, actionStateList):
actionObj = {
"actionId" : actionId,
"actionState" : actionState,
}
payload.append(actionObj)
#payload = json.dumps(payload)
requestUrl = "http://129.79.242.194:8081/ifttt/v1/updateActionResult.php"
headers = {
"Self-Channel-Key" : "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd",
}
response = requests.post(requestUrl, headers = headers, json=payload)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
print(response.text)
return True
def realtimeApi(triggerIdList):
defaultTriggerId = "1eadfa264f6f398e5bd085284214959b580d3043"
currTimeStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
print(currTimeStr)
logger = logging.getLogger(__name__)
iftttChannelKey = "ARQP1psdWjdMG4HENv4zWbWhnUzg4nyQ1nnbLYGPY3jO-YS6L11Y_mW3jUa0dTd0"
url = "http://realtime.ifttt.com/v1/notifications"
dataList = []
for triggerId in triggerIdList:
dataList.append({"trigger_identity" : triggerId})
payload = {
"data" : dataList,
}
headers = {
"IFTTT-Channel-Key" : iftttChannelKey,
}
response = requests.post(url, json = payload, headers = headers)
if response.status_code != 200:
logger.debug("request error with status %d and msg %s", response.status_code, response.text)
return False
else:
#print(response.text)
#return json.loads(response.text)
return True
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from pathlib import Path
from qbstyles import mpl_style
DR = Path(__file__).parent
def main():
'''Main.'''
mpl_style(dark=True)
fig = plt.figure()
ims = []
for i in range(10):
rand = np.random.randn(100) # 100個の乱数を生成
im = plt.plot(rand) # 乱数をグラフにする
ims.append(im) # グラフを配列 ims に追加
# 10枚のプロットを 100ms ごとに表示
# aniは絶対要る
ani = animation.ArtistAnimation(fig, ims, interval=100)
ani.save(
str(DR / 'anime.gif'),
writer='pillow'
)
plt.show()
if __name__ == '__main__':
main()
|
a = int(input("Please input the first number: "))
b = int(input("Please input the second number: "))
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a % b)
# 檔名: exercise0502.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
# -*- coding: utf-8 -*-
from ciscoconfparse import CiscoConfParse
def check_for_list(obj):
if type(obj) == list:
return obj[0]
return obj
config = CiscoConfParse("cisco_ipsec.txt")
crypto = check_for_list(config.find_objects("^crypto map CRYPTO"))
# Print output
print crypto.text
for child in crypto.children:
print child.text
|
##############################################
# #
# #
# #
# #
# #
# #
# #
# #
# ###############
# #
# #
# #
# #
# #
# #
# #
###############
#name - age dictionary program
#start by making a dictionary with names and ages
name_age = {"John Ashton": 25, "Ashley Andrade": 20, "Lilly petal": 35, "Nathan Heath": 19}
#create a function to print out the dictionary
def print_dict():
for key,value in name_age.items():
print(key,"---",value)
#create a function to add to the dictionary
def add_dict():
try:
name = input("Type in a name: ")
age = int(input("Type in their age: "))
except ValueError:
print("Opps! Looks like you typed something incorrect!")
name_age[name] = age
#create a function to remove from the dictionary
def remove_dict():
#see if name exists in dict
try:
existing = input("Enter a name you'd like to remove: ")
print("searching...")
for key,value in name_age.items():
if existing not in name_age:
print("Looks like the name is not in this list!")
else:
print("Name found!")
option = int(input("Would you like to delete? (1=yes, 2=no): "))
if option == 1:
del name_age[existing]
break
elif option == 2:
continue
except ValueError:
print("Opps! Looks like you typed something incorrect!")
#create a function that prints the updated dictionary
def print_updated():
for key,value in name_age.items():
print(key,"----",value)
print_dict()
add_dict()
remove_dict()
print_updated()
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def countLargestGroup(self, n: int) -> int:
groups = Counter()
for i in range(1, n + 1):
groups[self.sumOfDigits(i)] += 1
return list(groups.values()).count(max(groups.values()))
def sumOfDigits(self, n: int) -> int:
return sum(int(digit) for digit in str(n))
if __name__ == "__main__":
solution = Solution()
assert 4 == solution.countLargestGroup(13)
assert 2 == solution.countLargestGroup(2)
assert 6 == solution.countLargestGroup(15)
assert 5 == solution.countLargestGroup(24)
|
import datetime
import json
import ntpath
import time
import os
import math
from watchdog.observers import Observer
from watchdog.events import (FileModifiedEvent, PatternMatchingEventHandler)
observer = Observer()
# checks when file is modified
class Handler(PatternMatchingEventHandler):
def on_modified(self, event: FileModifiedEvent):
update_file()
print('file modified')
saves_folder = ntpath.expandvars(r'%APPDATA%\.minecraft\saves')
if not os.path.exists('stats.txt'):
stats_file = open('stats.txt',"w+")
stats_file.close()
def update_file():
saves = []
latest_world = None
# gets last modified world
for i in os.listdir(saves_folder):
full_dir = saves_folder + '\\' + i
saves.append(i)
last_mod = datetime.datetime.fromtimestamp(os.path.getmtime(full_dir))
if latest_world == None:
latest_world = [i, last_mod, full_dir]
elif latest_world[1] < last_mod:
latest_world = [i,last_mod, full_dir]
stats_general = {}
stats_custom = {}
stats_killed = {}
stats_mined = {}
stats_picked_up = {}
stats_used = {}
stats_killed_by = {}
# only runs if stats folder exists, useful when generating world for the first time
if os.path.isdir(latest_world[2] + '\stats'):
# define json stats file location & load
stats_folder = ntpath.expandvars(latest_world[2] + '\stats')
stats_file = open(stats_folder + '\\' + os.listdir(stats_folder)[0])
stats = json.load(stats_file)
# convert playtime in ticks to hours minutes and seconds (00:00:00)
world_playtime_seconds = stats['stats']['minecraft:custom']['minecraft:play_one_minute']
world_playtime_seconds /= 20
ty_res = time.gmtime(world_playtime_seconds)
world_playtime = time.strftime("%H:%M:%S",ty_res)
# update general stats category
stats_general.update({'world_name':latest_world[0]})
stats_general.update({'playtime':str(world_playtime)})
custom_dir = stats['stats']['minecraft:custom']
total_distance = 0.0
for i in custom_dir:
if i == 'minecraft:killed_by':
stats_custom.update({'killed_by':custom_dir[i]})
if i == 'minecraft:deaths':
stats_custom.update({'deaths':custom_dir[i]})
if i == 'minecraft:crouch_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:sprint_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:walk_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:walk_under_water_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:walk_on_water_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:swim_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:fall_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
if i == 'minecraft:fly_one_cm':
total_distance += custom_dir[i] / 100
print(i + ' ' + str(custom_dir[i]))
total_distance = round(total_distance,1)
for i in stats['stats']:
if i == 'minecraft:custom':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_"," ")
stats_custom.update({newx:stats['stats'][i][x]})
if i == 'minecraft:killed':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_"," ")
stats_killed.update({newx:stats['stats'][i][x]})
if i == 'minecraft:mined':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_"," ")
stats_mined.update({newx:stats['stats'][i][x]})
if i == 'minecraft:picked_up':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_"," ")
stats_picked_up.update({newx:stats['stats'][i][x]})
if i == 'minecraft:used':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_"," ")
stats_used.update({newx:stats['stats'][i][x]})
if i == 'minecraft:killed_by':
for x in stats['stats'][i]:
newx = x[10:]
newx = newx.replace("_", " ")
stats_killed_by.update({newx: stats['stats'][i][x]})
# write to text file
with open('stats.txt', 'w') as f:
f.write('world name: ' + stats_general['world_name'] + '\n')
f.write('playtime: ' + stats_general['playtime'] + '\n')
f.write('\n')
max_killed_by = [0,0]
for i in stats_killed_by:
if max_killed_by[1] < stats_killed_by[i]:
max_killed_by = [i,stats_killed_by[i]]
for i in stats_custom:
if i == 'deaths':
if stats_custom[i] < 2:
if max_killed_by[0] != 0:
f.write(str(stats_custom[i]) + ' death, ' + str(max_killed_by[0]) + ' (' + str(max_killed_by[1]) + ')' '\n')
else:
f.write(str(stats_custom[i]) + ' death' + '\n')
else:
if max_killed_by[0] != 0:
f.write(str(stats_custom[i]) + ' deaths, ' + str(max_killed_by[0]) + ' (' + str(max_killed_by[1]) + ')' '\n')
else:
f.write(str(stats_custom[i]) + ' deaths' + '\n')
f.write('distance travelled: ' + str(total_distance) + 'b' + '\n')
max_killed = [0,0]
for i in stats_killed:
if max_killed[1] < stats_killed[i]:
max_killed = [i,stats_killed[i]]
if max_killed[1] > 0:
f.write('most killed mob: ' + max_killed[0] + ' (' + str(max_killed[1]) + ')')
f.write('\n' * 1)
max_mined = [0,0]
for i in stats_mined:
if max_mined[1] < stats_mined[i]:
max_mined = [i,stats_mined[i]]
if max_mined[1] > 0:
f.write('most mined block: ' + max_mined[0] + ' (' + str(max_mined[1]) + ')')
f.write('\n' * 1)
max_used = [0,0]
for i in stats_used:
if max_used[1] < stats_used[i]:
max_used = [i,stats_used[i]]
if max_used[1] > 0:
f.write('most used item: ' + max_used[0] + ' (' + str(max_used[1]) + ')')
f.write('\n' * 1)
observer.schedule(event_handler=Handler('*'), path=saves_folder)
observer.daemon = False
observer.start() |
'''Tic Tac Toe Game for 2 players or player vs bot'''
import random
def get_input(cor_h):
'''Getting coordinates of the player move with validation if move is
allowed.
Expected input - 'x,y' in range 1 to 3 (game board is 3x3)
Validation checks if:
-input is in proper format
-input corresponds to rows & columns
-move wasn't already made '''
valid = [1, 2, 3]
while True:
while True:
cor = input('Make a move (x,y) ')
try:
cor = [int(e) for e in cor.split(',')]
break
except ValueError:
print('Wrong input, choose again')
if len(cor) == 2:
if cor[0] in valid and cor[1] in valid:
if cor not in cor_h:
cor_h.append(cor)
break
print('Wrong move, choose again')
return cor, cor_h
def bot_input(cor_h):
''' Returns move for bot. For now just generationg random move
within requirement. Some logic to be implemented in the future'''
while True:
cor = [random.randrange(1, 4), random.randrange(1, 4)]
if cor not in cor_h:
cor_h.append(cor)
break
return cor, cor_h
def update_board(cor, player, board):
''' Returns updated board, based on move coordinates and player
id '''
if player == 1:
mark = 'x'
else:
mark = 'o'
board[cor[0]-1][cor[1]-1] = mark
return board
def print_board(board):
''' Simplest posiible solution to print board-like table '''
print('', board[0], '\n', board[1], '\n', board[2])
def draw_board(board):
''' 'Draws' board in the console '''
size = range(len(board))
top = ''.join([' ___' for e in size])
for i in size:
print(top)
side = ''
for j in size:
elem = str(board[i][j])
if elem == '0':
elem = ' '
side = side + '| ' + elem + ' '
side = side + '|'
print(side)
print(top)
def check_winner(board):
''' Checks board to determine if there is a winner.
Prints message and returns bool value result to determine if
game should be continued
'''
rows_cols = board[:]
for i in range(3):
rows_cols.append([rows_cols[0][i], rows_cols[1][i],
rows_cols[2][i]])
rows_cols.append([rows_cols[0][2], rows_cols[1][1],
rows_cols[2][0]])
rows_cols.append([rows_cols[0][0], rows_cols[1][1],
rows_cols[2][2]])
if [e for e in rows_cols if 'x' in e and 0 not in e and 'o'
not in e]:
print('1 wins!')
result = False
elif [e for e in rows_cols if 'o' in e and 0 not in e and 'x'
not in e]:
print('2 wins!')
result = False
elif not [e for e in rows_cols if 0 in e]:
print('Draw!')
result = False
else:
print('No winner - game continues!')
result = True
return result
def players():
''' Ask for input to determine game mode - PvP or PvBot
Returns sigle value:
0 - 2 player game
1- 1 player game, bot starts
2 - 1 player game, player starts
'''
bot = 0
while True:
try:
players_qty = int(input('How many players? '))
if players_qty == 2:
break
if players_qty == 1:
print('You will play vs computer!')
while True:
start = (input('Do you want to start? y/n? '))
if start == 'y':
print('You wil play as Player 1!')
bot = 2
break
if start == 'n':
bot = 1
print('You wil play as Player 2!')
break
print('Incorrect input')
break
except ValueError:
print('Please specify correct number of players: 1 or 2')
return bot
def swap(to_swap):
''' Return 0 for odd(1) and 1 for even(0) input.
Used to iterate between 0 and 1.
'''
return to_swap%2 + 1
def game(game_mode):
''' Game. Accepts single argument to specify game mode:
0 - 2 player game
1- 1 player game, bot starts
2 - 1 player game, player starts
'''
# Intro - welcoming message and empty board.
print(' ')
if game_mode == 0:
print('Welcome in Tic Tac Toe for 2 players!')
else:
print('Welcome in Tic Tac Toe for 1 player!')
board = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
player = 0
cor_h = []
cont = True
draw_board(board)
# Step in game. Iterates beetwen players based on 'player' value.
while cont:
player = swap(player)
print('Player', player)
if game_mode == 0:
cors = get_input(cor_h)
elif game_mode == player:
cors = bot_input(cor_h)
else:
cors = get_input(cor_h)
cor = cors[0]
cor_h = cors[1]
board = update_board(cor, player, board)
draw_board(board)
cont = check_winner(board)
#counter+=1
def tic_tac_toe():
'''Main method of the game'''
game_mode = players()
game(game_mode)
if __name__ == '__main__':
tic_tac_toe()
|
"""
Rename 1kpilot gene alignements as family.msa
"""
import os
import sys
import shutil
def convert(raw_data_dir, output_ali_dir):
os.mkdir(output_ali_dir)
for f in os.listdir(raw_data_dir):
family = f.split(".")[1]
src = os.path.join(raw_data_dir, f)
dest = os.path.join(output_ali_dir, family + ".msa")
shutil.copyfile(src, dest)
if (__name__ == "__main__"):
if (len(sys.argv) != 3):
print("Syntax python " + os.path.basename(__file__) + " raw_data_dir output_ali_dir")
sys.exit(1)
convert(sys.argv[1], sys.argv[2])
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import fabio
import numpy as np
try:
from ..utils.file_manager import fullPath, ifHdfReadConvertless
from ..utils.image_processor import *
except: # for coverage
from utils.file_manager import fullPath, ifHdfReadConvertless
from utils.image_processor import *
class XRayViewer:
"""
A class for Quadrant Folding processing - go to process() to see all processing steps
"""
def __init__(self, img_path, img_name, file_list=None, extension=''):
"""
Initial value for QuadrantFolder object
:param img_path: directory path of input image
:param img_name: image file name
"""
self.img_name = img_name
if extension in ('.hdf5', '.h5'):
index = next((i for i, item in enumerate(file_list[0]) if item == img_name), 0)
self.orig_img = file_list[1][index]
else:
self.orig_img = fabio.open(fullPath(img_path, img_name)).data
self.orig_img = ifHdfReadConvertless(img_name, self.orig_img)
self.orig_img = self.orig_img.astype("float32")
self.orig_image_center = None
self.hist = []
self.dl, self.db = 0, 0
def getRotatedImage(self, angle, center):
"""
Get rotated image by angle while image = original input image, and angle = self.info["rotationAngle"]
"""
img = np.array(self.orig_img, dtype="float32")
b, l = img.shape
rotImg, _, _ = rotateImage(img, center, angle)
# Cropping off the surrounding part since we had already expanded the image to maximum possible extent in centerize image
bnew, lnew = rotImg.shape
db, dl = (bnew - b)//2, (lnew-l)//2
final_rotImg = rotImg[db:bnew-db, dl:lnew-dl]
self.dl, self.db = dl, db # storing the cropped off section to recalculate coordinates when manual center is given
return final_rotImg
|
1.分数出现以下情况怎么处理没有列出来:
(一共有R、I、A、S、E、C)
(假设R=I>A>S>E>C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R=I=A>S>E>C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R=I=A=S>E>C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R=I=A=S=E>C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R=I=A=S=E=C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R>I>A>S>E>C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R>I=A=S=E=C)
需要显示的结果是什么,以及如何判断得出该结果的
(假设R>I>A=S=E=C)
需要显示的结果是什么,以及如何判断得出该结果的
|
#!/usr/bin/env python
# This file downloads and sets up all the library dependencies for the game,
# including Lua, SFML (Window toolkit), and Box2D (physics engine).
import urllib2
import zipfile
import os
import subprocess
import errno
import shutil
import sys
import platform
import tarfile
msbuild = 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\msbuild.exe'
downloads = [
'http://files.luaforge.net/releases/luasocket/luasocket/luasocket-2.0.2/luasocket-2.0.2.tar.gz',
'https://box2d.googlecode.com/files/Box2D_v2.2.1.zip',
'http://luajit.org/download/LuaJIT-2.0.2.zip',
'http://www.sfml-dev.org/download/sfml/2.0/SFML-2.0-windows-vc11-32bits.zip',
'http://prdownloads.sourceforge.net/scons/scons-local-2.3.0.zip',
'https://raw.github.com/skaslev/gl3w/master/gl3w_gen.py',
'http://downloads.sourceforge.net/project/glew/glew/1.9.0/glew-1.9.0.zip',
]
deps = 'deps'
def unzip(stream):
# Unzips a stream into the 'deps' folder
zf = zipfile.ZipFile(stream)
for name in zf.namelist():
(dirname, filename) = os.path.split(name)
try:
path = os.path.join(deps, dirname)
os.makedirs(path)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if filename:
fd = open(os.path.join(deps, name), 'wb')
fd.write(zf.read(name))
fd.close()
def untar(stream):
# Untars a stream into the 'deps' folder
tf = tarfile.open(fileobj=stream, mode='r:gz')
tf.extractall(deps)
def download():
# Download and install library dependencies
if not os.path.exists(deps):
os.mkdir(deps)
for dl in downloads:
(_, name) = os.path.split(dl)
filename = os.path.join(deps, name)
print filename
(_, ext) = os.path.splitext(dl)
if os.path.exists(filename):
continue
stream = urllib2.urlopen(dl)
fd = open(filename, 'wb')
fd.write(stream.read())
fd.close()
fd = open(filename, 'rb')
if ext == '.zip':
unzip(fd)
elif ext == '.gz':
untar(fd)
def box2d():
# Build the Box2D library
if os.name == 'nt':
proj = os.path.join(deps, 'Box2D_v2.2.1\\build\\vs2010\\Box2D.vcxproj')
args = (msbuild, proj, '/p:PlatformToolset=v110', '/p:Configuration=Release')
subprocess.check_call(args)
elif 'Darwin' in platform.platform():
proj = os.path.join(deps, 'Box2D_v2.2.1/Build/xcode4/')
subprocess.check_call('xcodebuild', cwd=proj)
else:
assert not 'error: platform not supported'
def luajit():
# Build LuaJIT library
if os.name == 'nt':
cwd = os.path.join(deps, 'LuaJIT-2.0.2', 'src')
exe = os.path.join(os.getcwd(), cwd, 'msvcbuild.bat')
subprocess.check_call(exe, cwd=cwd, shell=True)
shutil.copy(os.path.join(deps, 'LuaJIT-2.0.2\\src\\lua51.dll'), 'lua51.dll')
else:
assert not 'error: platform not supported'
def sfml():
# Set up/install SFML
pass
def luasocket():
# Build the LuaSocket library
if os.name == 'nt':
include = os.path.join(os.getcwd(), deps, 'LuaJIT-2.0.2', 'src')
os.environ['INCLUDE'] = '%s;%s' % (os.environ['INCLUDE'], include)
print os.environ['INCLUDE']
cwd = os.path.join(deps, 'luasocket-2.0.2')
proj = os.path.join(cwd, 'socket.vcxproj')
if not os.path.exists(proj):
subprocess.check_call(('vcupgrade', 'socket.vcproj'), cwd=cwd)
projtext = open(proj, 'r').read()
fd = open(proj, 'w')
fd.write(projtext.replace('<Library Include="..\..\lib\lua5.1.dll.lib" />', ''))
fd.close()
subprocess.check_call((
msbuild,
proj,
'/p:UseEnv=true',
'/p:PlatformToolset=v110',
'/p:ConfigurationType=StaticLibrary',
'/p:Configuration=Release'))
socket = os.path.join('scripts', 'socket')
if not os.path.exists(socket):
os.makedirs(socket)
files = ['http.lua', 'url.lua']
for fn in files:
shutil.copy(os.path.join(cwd, 'src', fn), os.path.join(socket, fn))
files = ['socket.lua', 'ltn12.lua']
for fn in files:
shutil.copy(os.path.join(cwd, 'src', fn), os.path.join('scripts', fn))
fd = open(os.path.join('scripts', 'socket', 'core.lua'), 'w')
fd.close()
fd = open(os.path.join('scripts', 'mime.lua'), 'w')
fd.close()
else:
assert not 'error: platform not supported'
if os.name == 'nt':
download()
luajit()
box2d()
# sfml() # Prebuilt
#luasocket()
else:
deps = [ 'sfml', 'box2d', 'luajit', 'glew' ]
subprocess.check_call(['brew', 'install']+deps)
|
from elasticsearch import Elasticsearch, RequestsHttpConnection, serializer, compat, exceptions, helpers
from datetime import timedelta, datetime
import utils
_es_host = '10.200.102.23'
_es_index = 'mimic'
_doc_type = 'eprdoc'
_concept_type = 'ctx_concept'
_patient_type = 'patient'
_es_instance = None
_page_size = 200
class SemEHRES(object):
def __init__(self, es_host, es_index, doc_type, concept_type, patient_type):
self._host = es_host
self._es_instance = Elasticsearch([self._host])
self._index = es_index
self._doc_type = doc_type
self._patient_type = patient_type
self._concept_type = concept_type
self._customise_settings = None
def search_patient(self, q):
patients = []
need_next_query = True
offset = 0
while need_next_query:
query = {"match": {"_all": q}} if len(q) > 0 else {"match_all": {}}
results = self._es_instance.search(self._index, self._patient_type, {"query": query,
"from": offset,
"size": _page_size})
total = results['hits']['total']
for p in results['hits']['hits']:
patients.append(p)
offset += len(results['hits']['hits'])
if offset >= total:
need_next_query = False
return patients
def get_contexted_concepts(self, concept):
results = self._es_instance.search(self._index, self._concept_type, {"query": {"match": {"_all": concept}},
"size": 2000
})
cc_to_ctx = {}
for cc in results['hits']['hits']:
d = cc['_source']
cid = cc['_id']
if d['experiencer'] == 'Other':
cc_to_ctx[cid] = 'Other'
elif d['temporality'] == 'historical':
cc_to_ctx[cid] = 'historical'
elif d['temporality'] == 'hypothetical':
cc_to_ctx[cid] = 'hypothetical'
elif d['negation'] == 'Negated':
cc_to_ctx[cid] = 'Negated'
else:
cc_to_ctx[cid] = 'positive'
return cc_to_ctx
def summary_patients_by_concepts(self, concepts,
filter_func=None, args=[], patient_filters=None,
data_collection_func=None):
cc_to_ctx = {}
for t in concepts:
cc_to_ctx.update(self.get_contexted_concepts(t))
print len(cc_to_ctx)
patients = self.search_patient(' '.join(concepts))
results = []
valid_docs = set()
for p in patients:
if patient_filters is not None and p['_id'] not in patient_filters:
continue
sp = {'id': p['_id'], 'all': 0}
for ann in p['_source']['anns']:
if ann['contexted_concept'] in cc_to_ctx:
if filter_func is not None:
# do filter, if filter function returns false, skip it
if not filter_func(*tuple(args + [ann, p])):
continue
valid_docs.add(ann['appearances'][0]['eprid'])
t = cc_to_ctx[ann['contexted_concept']]
sp[t] = 1 if t not in sp else sp[t] + 1
sp['all'] += 1
if data_collection_func is not None:
data_collection_func(*tuple(args + [ann, sp, t]))
results.append(sp)
return results, list(valid_docs)
def get_doc_detail(self, doc_id, doc_type=None):
doc_type = self._doc_type if doc_type is None else doc_type
try:
es_doc = self._es_instance.get(self._index, doc_id, doc_type=doc_type)
if es_doc is not None:
return es_doc['_source']
else:
return None
except Exception:
return None
def search(self, entity, q, offset=0, size=10, include_fields=None):
query = {"query": {"match": {"_all": q}},
"from": offset,
"size": size}
if include_fields is not None:
query['_source'] = {
"includes": include_fields
}
results = self._es_instance.search(self._index, entity, query)
return results['hits']['total'], results['hits']['hits']
def scroll(self, q, entity, size=100, include_fields=None, q_obj=None):
query = {"query": {"match": {"_all": q}},
"size": size}
if q_obj is not None:
query = {
"query": q_obj,
"size": size
}
if include_fields is not None:
query['_source'] = {
"includes": include_fields
}
return helpers.scan(self._es_instance, query,
size=size, scroll='10m', index=self._index, doc_type=entity)
def index_med_profile(self, doc_type, data, patient_id):
self._es_instance.index(index=self._index, doc_type=doc_type, body=data, id=str(patient_id), timeout='30s')
@staticmethod
def get_instance():
global _es_instance
if _es_instance is None:
_es_instance = SemEHRES(_es_host, _es_index, _doc_type, _concept_type, _patient_type)
return _es_instance
@staticmethod
def get_instance_by_setting(es_host, es_index, es_doc_type, es_concept_type, es_patient_type):
global _es_instance
if _es_instance is None:
_es_instance = SemEHRES(es_host, es_index, es_doc_type, es_concept_type, es_patient_type)
return _es_instance
|
# Import DQoc HTML from lp:ubuntu-ui-toolkit
import os
import simplejson
from django.core.files import File
from django.core.files.storage import get_storage_class
from ..models import *
from . import Importer
__all__ = (
'CordovaImporter',
)
SECTIONS = {
'org.apache.cordova.battery-status': 'Device and Sensors',
'org.apache.cordova.camera': 'Device and Sensors',
# 'org.apache.cordova.contacts': 'Platform Services',
'org.apache.cordova.device': 'Device and Sensors',
'org.apache.cordova.device-motion': 'Device and Sensors',
'org.apache.cordova.dialogs': 'Graphical Interface',
# 'org.apache.cordova.geolocation': 'Platform Services',
'org.apache.cordova.globalization': 'Language Types',
'org.apache.cordova.inappbrowser': 'Platform Services',
'org.apache.cordova.media': 'Multimedia',
'org.apache.cordova.media-capture': 'Device and Sensors',
'org.apache.cordova.network-information': 'Platform Services',
'org.apache.cordova.splashscreen': 'Graphical Interface',
'org.apache.cordova.vibration': 'Device and Sensors',
}
class CordovaImporter(Importer):
SOURCE_FORMAT = "cordova"
def __init__(self, *args, **kwargs):
super(CordovaImporter, self).__init__(*args, **kwargs)
self.source = self.options.get('index')
self.DOC_ROOT = os.path.dirname(self.source)
self.in_supported_platforms = False
self.in_quirks = False
def parse_line(self, line, source_filename, element_fullname):
if '<h3>Supported Platforms</h3>\n' in line:
self.in_supported_platforms = True
return ''
if self.in_supported_platforms:
if '</ul>' in line:
self.in_supported_platforms = False
line.replace('</ul>', '', 1)
else:
return ''
if 'Quirks</h3>\n' in line or 'Quirk</h3>\n' in line:
self.in_quirks = True
return ''
if self.in_quirks:
if '<h3>' in line or '<h2>' in line or '<h1>' in line:
self.in_quirks = False
else:
return ''
clean_line = super(CordovaImporter, self).parse_line(line, source_filename, element_fullname)
return clean_line
def parse_filename(self, filename):
pass
def parse_href(self, href):
pass
def run(self):
if not os.path.exists(self.source):
print "Source index not found"
exit(1)
datafile = open(self.source)
tree = simplejson.load(datafile)
datafile.close()
self.PRIMARY_NAMESPACE = 'org.apache.cordova'
# Map document filenames to QML class names
for jsclass in tree:
classname = self.parse_classname(jsclass.get('term'))
if classname.startswith(self.PRIMARY_NAMESPACE):
self.class_map[jsclass.get('url')] = classname
else:
pass#self.page_map[jsclass.get('url')] = classname
# Import YUI class documentation
for jsclass in tree:
ns_name = None
fullname = None
classname = None
classpath = self.parse_classname(jsclass.get('term'))
# Remove module name part of the classname
if classpath.startswith(self.PRIMARY_NAMESPACE):
fullname = classpath[len(self.PRIMARY_NAMESPACE)+1:].title()
if '.' in fullname:
classname = fullname.split('.')[-1]
else:
classname = fullname
ns_name = classpath
if classpath not in SECTIONS:
continue
self.section, section_created = Section.objects.get_or_create(name=SECTIONS[classpath], topic_version=self.version)
if ns_name is not None:
namespace, created = Namespace.objects.get_or_create(name=ns_name, platform_section=self.section)
else:
namespace = None
self.import_module(namespace, classname, jsclass.get('url'))
def import_module(self, namespace, classname, source_doc):
doc_file = os.path.join(self.DOC_ROOT, source_doc)
doc_handle = open(doc_file)
doc_data = doc_handle.readlines()
doc_handle.close()
if isinstance(namespace.name, unicode):
fullname = unicode.encode(namespace.name, 'ascii')
else:
fullname = namespace.name
element, created = Element.objects.get_or_create(name=classname, fullname=fullname, section=self.section, namespace=namespace)
if self.verbosity >= 1:
print 'Element: ' + element.fullname
doc_start = 2
doc_end = -2
for i, line in enumerate(doc_data):
if '<h1><a name="%s">' % fullname in line:
doc_start = i+2
if '<!-- Functionality and Syntax Highlighting -->' in line:
doc_end = i-3
if self.verbosity >= 3:
print "Doc range: %s:%s" % (doc_start, doc_end)
try:
# Change the content of the docs
cleaned_data = ''
for line in doc_data[doc_start:doc_end]:
if line == '' or line == '\n':
continue
line = self.parse_line(line, source_doc, fullname)
if isinstance(line, unicode):
line = line.encode('ascii', 'replace')
cleaned_data += line
element.data = cleaned_data
except Exception, e:
print "Parsing content failed:"
print e
import pdb; pdb.set_trace()
element.source_file = os.path.basename(doc_file)
element.source_format = "cordova"
element.save()
def import_page(self, pagehref, pagename, pagetitle, ns_name):
if pagename.endswith('.html'):
pagename = pagename[:-5]
if ns_name is not None:
namespace, created = Namespace.objects.get_or_create(name=ns_name, platform_section=self.section)
else:
namespace = None
if namespace is not None:
fullname = namespace.name + '.' + pagename
else:
fullname = pagename
page, created = Page.objects.get_or_create(slug=pagename, fullname=fullname, title=pagetitle, section=self.section, namespace=namespace)
if self.verbosity >= 1:
print 'Page: ' + page.slug
doc_file = os.path.join(self.DOC_ROOT, pagehref)
doc_handle = open(doc_file)
doc_data = doc_handle.readlines()
doc_handle.close()
doc_start = 2
doc_end = -2
for i, line in enumerate(doc_data):
if '<div id="main" class="yui3-u">' in line:
doc_start = i+2
if '<script src="../assets/vendor/prettify/prettify-min.js"></script>' in line:
doc_end = i-4
if self.verbosity >= 3:
print "Doc range: %s:%s" % (doc_start, doc_end)
try:
# Change the content of the docs
cleaned_data = ''
for line in doc_data[doc_start:doc_end]:
if line == '' or line == '\n':
continue
if '<h1 class="title">' in line:
continue
line = self.parse_line(line, pagehref, fullname)
if isinstance(line, unicode):
line = line.encode('ascii', 'replace')
cleaned_data += line
page.data = cleaned_data
except Exception, e:
print "Parsing content failed:"
print e
#continue
#import pdb; pdb.set_trace()
page.source_file = os.path.basename(doc_file)
page.source_format = "cordova"
page.order_index = page_order_index
page.save()
|
""" A container for all information about the field: geometry and labels, as well as convenient API. """
from .base import Field
|
print("LETTER A HAS BEEN SUCCESSFULLY EXECUTED")
# |
from item_project import *
from map_project import rooms
inventory = []
#For example: item_id
# Start game at the reception
current_room = rooms["Reception"]
#====================================
# Player status
energy_min = 0
#Minimum energyof player
energy_max = 100
#Maximum energy of player
project_process = 0
#Original project process
project_process_max = 100
#Maximum project process
|
web: python ml_backend.py |
#!/usr/bin/env python3
# Marcos del Cueto
# Import libraries
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator)
import numpy as np
# Initialize lists
list_x = []
list_y = []
# Generate dataset as 10 points from x=5 to x=6.8, with y=exp(x)
for x in np.arange(5, 7, 0.2):
y = math.exp(x)
list_x.append(x)
list_y.append(y)
print("%.2f, %.2f" %(x, y))
# Transform lists to numpy arrays
list_x = np.array(list_x).reshape(-1, 1)
list_y = np.array(list_y)
# Create arrays with function y=exp(x)
function_x = np.arange(4.9, 7.0, 0.01)
function_y = [math.exp(x) for x in function_x]
# Plot points in dataset plus dashed line with function
plt.plot(function_x,function_y,color='C0',linestyle='dashed',linewidth=1)
plt.scatter(list_x, list_y,color='C0')
# Set axis labels
plt.xlabel('$x$',fontsize=15)
plt.ylabel('$y$',fontsize=15)
# Set axis ticks and limits
plt.xticks(np.arange(5,7,0.2))
plt.xlim(4.92,6.88)
plt.ylim(100,1000)
# Set minor ticks
axes = plt.gca()
axes.xaxis.set_minor_locator(MultipleLocator(0.05))
# Save plot into Figure_1.png
file_name='Figure_1.png'
plt.savefig(file_name,format='png',dpi=600)
plt.close()
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AuthenticationForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
class CustomLoginForm(AuthenticationForm):
username = forms.CharField(label='', widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your username',
})
)
password = forms.CharField(label='', widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Type your password'
}
))
|
###############################################################################
# Team List retrieval for FIRST Robotics FRC using The Blue Alliance API
#
##############
# INVOCATION #
##############
# The program can be invoked from the command line with two arguments.
# The first argument is the year of the event,
# the second is the event code,
# the third is the name of a file to which the results will be exported.
# example: python MatchScheduleViaTBI.py 2016 cada ds.txt
#
# The program can also be invoked without any arguments. In this case, the user
# will be prompted to input the commands directly.
#
##########
# OUTPUT #
##########
# The program exports the results into a given file with one team per line, tab delimited.
###############################################################################
import urllib2 # to get the page HTML code
#import urllib # to encode url data
import json # to parse JSON response
import sys # to get the arguments
from time import clock # for determining how long it took
START_TIME = clock()
filePath = ''
def showUsage():
print('MatchScheduleViaTBA.py')
print('Usage: python TeamListViaTBI.py [ <YEAR> , <EVENT>, <FILE> ]')
print('where: ')
print(' <YEAR> is the numeric year of the event.')
print(' <EVENT> is the eventCode of the event.')
print(' <FILE> is the name of a file to which the results will be saved.')
print('If no arguments are provided, the user will be prompted.')
# The application requires two arguments: a URL and a file location.
if (len(sys.argv) != 4 and len(sys.argv) != 1): # first is the script location
# Bad number of arguments.
showUsage()
exit(1)
elif len(sys.argv) is 1: # No arguments; prompt
print('Match Schedule Formatter for FRC')
print('Input arguments:')
year = raw_input('Event Year = ')
event = raw_input('Event Code = ')
filePath = raw_input('Export file location = ')
else:
year = sys.argv[1]
event = sys.argv[2]
filePath = sys.argv[3]
print('Program begun...')
# Now, we have to write the data to a file.
matchfile = open(filePath, 'w') # open for write
print('File opened successfully...')
url = 'http://www.thebluealliance.com/api/v2/event/'
url = url + year
url = url + event
url = url + "/matches"
req = urllib2.Request(url)
req.add_header('X-TBA-App-Id', 'frc100:scouting-system:v01')
req.add_header('User-Agent', "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11")
print (url)
print (req.header_items())
try:
print('Fetching team data from The Blue Alliance...')
response = urllib2.urlopen(req)
print('Parsing returned data from The Blue alliance...')
thepage = response.read() # interpret the response
matchList = json.loads(thepage) # load the JSON data
matches = {}
for match in matchList:
if match['comp_level'] == 'qm':
matchId = match['match_number']
alliances = match['alliances']
blue = alliances['blue']
blueteams = blue['teams']
red = alliances['red']
redteams = red['teams']
rawMatchData = [str(matchId)]
rawMatchData.extend(redteams)
rawMatchData.extend(blueteams)
matchStr = ",".join(rawMatchData)
matchStr = matchStr.replace("frc", "")
matches[matchId] = matchStr
for k in sorted(matches.keys()):
print matches[k]
matchfile.write(matches[k])
matchfile.write('\n')
except urllib2.HTTPError as e:
print e
matchfile.close()
print('Data written successfully...')
print('Done.')
|
from django.db import models
from django.contrib.auth.models import User
"""
class Platos:
id Plato
str descriprcion
class Menu:
id menu
id plato
date fecha
class pedido plato seleccionado
id pedido
id plato seleccionado
class pedido customizacion
id pedido
id customizacion
class pedido
id pedido
id menu
date fecha
"""
class Menu(models.Model):
date = models.DateField(unique=True)
class Options(models.Model):
menu = models.ForeignKey(
Menu, on_delete=models.CASCADE, related_name='options')
option_id = models.IntegerField()
text = models.TextField(null=False)
class Order(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='orders')
menu = models.ForeignKey(
Menu, on_delete=models.CASCADE, related_name='orders')
option_selected = models.IntegerField()
customizations = models.TextField(null=True)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class C1(object):
def meth1(self):
self.__x = 88
def meth2(self):
print(self.__x)
class C2(object):
def metha(self):
self.__x = 99
def methb(self):
print(self.__x)
class C3(C1, C2):
pass
a = C3()
a.meth1()
a.metha()
print(a.__dict__)
a.meth2()
a.methb()
|
#!/usr/bin/python3
import os
import zipfile
import sys
import glob
import gzip
import time
from subprocess import call
from operator import itemgetter
task_name = sys.argv[1]
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
logs_dir = os.path.join(root, "logs")
asterixdb = os.path.join(root, "asterixdb", "opt", "local")
is_level = len(glob.glob(os.path.join(asterixdb, "data", "red", "storage", "partition_0", "Level_Spatial", "Spatial_Table", "0",
"rtreeidx", "0_*"))) > 0
io_flags = (
os.path.join(asterixdb, "data", "red", "storage", "partition_0", "Level_Spatial", "Spatial_Table", "0",
"Spatial_Table", "is_flushing"),
os.path.join(asterixdb, "data", "red", "storage", "partition_0", "Level_Spatial", "Spatial_Table", "0",
"Spatial_Table", "is_merging"),
os.path.join(asterixdb, "data", "red", "storage", "partition_0", "Level_Spatial", "Spatial_Table", "0",
"rtreeidx", "is_flushing"),
os.path.join(asterixdb, "data", "red", "storage", "partition_0", "Level_Spatial", "Spatial_Table", "0",
"rtreeidx", "is_merging")
)
def wait_io():
def is_doing_io():
for f in io_flags:
if os.path.isfile(f):
return True
return False
while is_doing_io():
print("I/O pending...")
time.sleep(10)
def write_err(msg):
err_log = open(os.path.join(logs_dir, task_name + ".err"), "a")
if msg.endswith("\n"):
err_log.write(msg)
else:
err_log.write(msg + "\n")
err_log.close()
def parseline(line, kw):
if kw in line:
return line[line.index(kw) + len(kw) + 1:].replace("\n", "").split("\t")
else:
return []
def count_levels(components):
lvs = set()
for c in components:
lvs.add(int(c.split("_")[0]))
return len(lvs)
def extract_logs():
with open(os.path.join(logs_dir, task_name + ".load.tmp1"), "w") as loadtmpf, \
open(os.path.join(logs_dir, task_name + ".tables.tmp"), "w") as tablestmpf, \
open(os.path.join(logs_dir, task_name + ".read.tmp"), "w") as readtmpf:
for logp in glob.glob(os.path.join(asterixdb, "logs", "nc-red*")):
with (open(logp, "r") if logp.endswith(".log") else gzip.open(logp, "rt")) as logf:
is_err = False
for line in logf:
if is_err:
if " WARN " in line or " INFO " in line or "\tWARN\t" in line or "\tINFO\t" in line:
is_err = False
else:
if " ERROR " in line or "\tERROR\t" in line:
is_err = True
if not is_err:
if "[FLUSH]\trtreeidx" in line:
parts = parseline(line, "[FLUSH]\trtreeidx")
if len(parts) != 7:
continue
timestamp = parts[0]
new_component = parts[6]
c_name = new_component.split(":")[0].replace("_", "\t")
loadtmpf.write(timestamp + "\tF\t" + "\t".join(parts[1:]) + "\n")
tablestmpf.write(c_name + "\t" + new_component.replace(":", "\t") + "\n")
elif "[MERGE]\trtreeidx" in line:
parts = parseline(line, "[MERGE]\trtreeidx")
if len(parts) != 8:
continue
timestamp = parts[0]
new_components = parts[7].split(";")
loadtmpf.write(timestamp + "\tM\t" + "\t".join(parts[1:]) + "\n")
for new_component in new_components:
c_name = new_component.split(":")[0].replace("_", "\t")
tablestmpf.write(c_name + "\t" + new_component.replace(":", "\t") + "\n")
elif "[COMPONENTS]\trtreeidx" in line:
parts = parseline(line, "[COMPONENTS]\trtreeidx")
if len(parts) != 2:
continue
loadtmpf.write(parts[0] + "\tC\t" + parts[1] + "\n")
elif "[SEARCH]\trtreeidx" in line:
parts = parseline(line, "[SEARCH]\trtreeidx")
if len(parts) != 8:
continue
readtmpf.write("\t".join(parts) + "\n")
else:
continue
logf.close()
loadtmpf.close()
tablestmpf.close()
readtmpf.close()
call("sort -n -k1,1 \"{0}.tmp1\" | cut -f2- > \"{0}.tmp2\""
.format(os.path.join(logs_dir, task_name + ".load")), shell=True)
try:
os.remove(os.path.join(logs_dir, task_name + ".load.tmp1"))
except:
pass
call("sort -n -k1,1 -k2,2 \"{0}.tmp\" | cut -f3- > \"{0}.log\""
.format(os.path.join(logs_dir, task_name + ".tables")), shell=True)
try:
os.remove(os.path.join(logs_dir, task_name + ".tables.tmp"))
except:
pass
call("sort -n -k1,1 \"{0}.tmp\" | cut -f2- > \"{0}.log\""
.format(os.path.join(logs_dir, task_name + ".read")), shell=True)
try:
os.remove(os.path.join(logs_dir, task_name + ".read.tmp"))
except:
pass
total_flushed = []
total_merged = []
tmp_space = []
if is_level:
num_levels = []
with open(os.path.join(logs_dir, task_name + ".load.tmp2"), "r") as inf:
for line in inf:
parts = line.replace("\n", "").split("\t")
if len(parts) < 2:
continue
if parts[0] == "F":
flushed = int(parts[3])
merged = int(parts[4])
total_flushed.append(flushed)
total_merged.append(merged)
tmp_space.append(0)
if len(num_levels) == 0:
num_levels.append(1)
else:
num_levels.append(num_levels[-1])
elif parts[0] == "M":
f_cnt = int(parts[1])
merged = int(parts[4])
new_components = parts[7].split(";")
new_size = 0
for c in new_components:
new_size += int(c.split(":")[1])
total_merged[f_cnt - 1] = merged
tmp_space[f_cnt - 1] = max(tmp_space[f_cnt - 1], new_size)
elif parts[0] == "C":
components = parts[1].split(";")
f_cnt = int(components[0].split("_")[1])
if f_cnt > len(num_levels):
num_levels.append(count_levels(components))
else:
num_levels[f_cnt - 1] = count_levels(components)
else:
continue
inf.close()
try:
os.remove(os.path.join(logs_dir, task_name + ".load.tmp2"))
except:
pass
with open(os.path.join(logs_dir, task_name + ".load.log"), "w") as outf:
for i in range(len(total_merged)):
outf.write("{0}\t{1}\t{2}\t{3}\n".format(total_flushed[i], total_merged[i], num_levels[i], tmp_space[i]))
outf.close()
else:
stack_size = []
with open(os.path.join(logs_dir, task_name + ".load.tmp2"), "r") as inf:
for line in inf:
parts = line.replace("\n", "").split("\t")
if len(parts) < 2:
continue
if parts[0] == "F":
flushed = int(parts[3])
merged = int(parts[4])
total_flushed.append(flushed)
total_merged.append(merged)
tmp_space.append(0)
if len(stack_size) == 0:
stack_size.append(1)
else:
stack_size.append(stack_size[-1] + 1)
elif parts[0] == "M":
f_cnt = int(parts[1])
merged = int(parts[4])
new_components = parts[7].split(";")
new_size = 0
for c in new_components:
new_size += int(c.split(":")[1])
total_merged[f_cnt - 1] = merged
tmp_space[f_cnt - 1] = max(tmp_space[f_cnt - 1], new_size)
elif parts[0] == "C":
components = parts[1].split(";")
f_cnt = int(components[0].split("_")[1])
if f_cnt > len(stack_size):
stack_size.append(len(components))
else:
stack_size[f_cnt - 1] = len(components)
else:
continue
inf.close()
try:
os.remove(os.path.join(logs_dir, task_name + ".load.tmp2"))
except:
pass
with open(os.path.join(logs_dir, task_name + ".load.log"), "w") as outf:
for i in range(len(total_merged)):
outf.write(
"{0}\t{1}\t{2}\t{3}\n".format(total_flushed[i], total_merged[i], stack_size[i], tmp_space[i]))
outf.close()
def zip_logs():
in_files = [
os.path.join(logs_dir, task_name + ".tables.log"),
os.path.join(logs_dir, task_name + ".load.log"),
os.path.join(logs_dir, task_name + ".read.log"),
os.path.join(logs_dir, task_name + ".err")
]
with zipfile.ZipFile(os.path.join(logs_dir, task_name + ".zip"), "w") as z:
for f in in_files:
if os.path.isfile(f) and os.path.getsize(f) > 0:
z.write(f, os.path.basename(f), zipfile.ZIP_DEFLATED)
z.close()
for f in in_files:
if os.path.isfile(f):
os.remove(f)
wait_io()
try:
os.remove(os.path.join(logs_dir, task_name + ".err"))
except:
pass
extract_logs()
zip_logs()
|
'''
Created on Nov 11, 2016
@author: micro
'''
import wpilib
class Test_Run (wpilib.IterativeRobot):
def robotInit(self):
self.motor = wpilib.CANTalon(1)
self.motor = wpilib.CANTalon(2)
self.motor = wpilib.CANTalon(3)
self.motor = wpilib.CANTalon(4)
self.motor.set(1)
self.robot_drive = wpilib.RobotDrive(0,1) |
# coding: utf-8
from stiffsquare import nambu_square_coexistence_vers2 as periodize_nambu
from stiffsquare import stiffness_square
import numpy as np
#=========== DEBUT de definition de la self-fictive, juste pour demo, PAS IMPORTANT ======================
beta = 40.0
znvec_tmp = np.array([(2.0*n + 1.0)*np.pi/beta for n in range(20)], dtype=complex)
sEvec_c_tmp = np.zeros((20, 8, 8), dtype=complex)
MM = 0.0517
pp = 0.005
dd = 0.03
for (ii, zz) in enumerate(znvec_tmp):
# Gup
sEvec_c_tmp[ii, 0, 0] = sEvec_c_tmp[ii, 3, 3] = -MM
sEvec_c_tmp[ii, 1, 1] = sEvec_c_tmp[ii, 2, 2] = MM
# Gdown*
sEvec_c_tmp[ii, 4, 4] = sEvec_c_tmp[ii, 6, 6] = -MM
sEvec_c_tmp[ii, 5, 5] = sEvec_c_tmp[ii, 7, 7] = MM
#F
FF = 1.0/(zz*zz)*np.array([
[0.0, dd + pp, -dd + pp, 0.0],
[dd - pp, 0.0, 0.0, -dd + pp],
[-dd - pp, 0.0, 0.0, dd + pp],
[0.0, -dd - pp, dd - pp, 0.0]
], dtype=complex)
sEvec_c_tmp[ii, :4:, 4::] = FF
#FDag
sEvec_c_tmp[ii, 4::, :4:] = np.conjugate(np.transpose(FF))
#=========== DEBUT de definition de la self-fictive, juste pour demo ======================
(znvec, sEvec_c) = (znvec_tmp, sEvec_c_tmp) # (qcm.sE_cluster ou la petite loop que tu fais pour la discretizer comme en haut)
modelSC = periodize_nambu.ModelNambu(t=1.0, tp=0.40, tpp=0.0, mu=2.9246671954980012, z_vec=1.0j*znvec, sEvec_c=sEvec_c) # tu dois specifier que znvec est complexe
stiffness_square.stiffness(modelSC)
|
'''
AuthHandler encapsulates the logic to authenticate users on the server-side.
'''
import base64
import json
import threading
import time
import urllib
import urllib2
from base64 import encodestring
from codalab.common import PermissionError
class User(object):
'''
Defines a registered user with a unique name and a unique (int) identifier.
'''
def __init__(self, name, unique_id):
self.name = name
self.unique_id = unique_id
class MockAuthHandler(object):
'''
A mock handler, which makes it easy to run a server when no real
authentication is required. There is exactly one root user with no
password.
'''
def __init__(self, users):
self.users = users
self._user = users[0]
def generate_token(self, grant_type, username, key):
'''
Always returns token information.
'''
matches = [user for user in self.users if user.name == username]
if len(matches) == 0:
return None
self._user = matches[0]
return {
'token_type': 'Bearer',
'access_token': '__mock_token__',
'expires_in': 3600 * 24 * 365,
'refresh_token': '__mock_token__',
}
def validate_token(self, token):
'''
Always returns True. The specified token is ignored.
'''
return True
def get_users(self, key_type, keys):
'''
Resolves user names (key_type='names') or user IDs (key_type='ids') to
corresponding User objects.
key_type: The type of input keys: names or ids.
keys: The set of names/ids to resolve.
Returns a dictionary where keys are keys input to this method and
values are either a User object or None if the key does not have
a matching user (either the user does not exist or exists but is
not active).
'''
def get_one(l): return l[0] if len(l) > 0 else None
if key_type == 'names':
return {key : get_one([user for user in self.users if key == user.name]) for key in keys}
if key_type == 'ids':
return {key : get_one([user for user in self.users if key == user.unique_id]) for key in keys}
raise ValueError('Invalid key_type')
def current_user(self):
return self._user
class OAuthHandler(threading.local):
'''
Handles user authentication with a remote OAuth authorization server.
Inherits from threading.local, which makes all instance attributes thread-local.
When an OAuthHandler instance is used from a new thread, __init__ will be called
again, and from thereon all attributes may be different between threads.
https://hg.python.org/cpython/file/2.7/Lib/_threading_local.py
'''
def __init__(self, address, app_id, app_key):
'''
address: the address of the OAuth authorization server
(e.g. https://www.codalab.org).
app_id: OAuth application identifier.
app_key: OAuth application key.
'''
self._address = address
self._app_id = app_id
self._app_key = app_key
self.min_username_length = 1
self.min_key_length = 4
self._user = None
self._access_token = None
self._expires_at = 0.0
def _get_token_url(self):
return "{0}/clients/token/".format(self._address)
def _get_validation_url(self):
return "{0}/clients/validation/".format(self._address)
def _get_user_info_url(self):
return "{0}/clients/info/".format(self._address)
def _generate_new_token(self, username, password):
'''
Get OAuth2 token using Resource Owner Password Credentials Grant.
'''
appname = 'cli_client_{0}'.format(username)
headers = {'Authorization': 'Basic {0}'.format(encodestring('%s:' % appname).replace('\n', ''))}
data = [('grant_type', 'password'),
('username', username),
('password', password)]
request = urllib2.Request(self._get_token_url(), urllib.urlencode(data, True), headers)
try:
response = urllib2.urlopen(request)
token_info = json.load(response)
return token_info
except urllib2.HTTPError as e:
if e.code == 400:
return None
raise e
def _refresh_token(self, username, refresh_token):
'''
Refresh OAuth2 token.
'''
appname = 'cli_client_{0}'.format(username)
headers = {'Authorization': 'Basic {0}'.format(encodestring('%s:' % appname).replace('\n', ''))}
data = [('grant_type', 'refresh_token'),
('refresh_token', refresh_token)]
request = urllib2.Request(self._get_token_url(), urllib.urlencode(data, True), headers)
try:
response = urllib2.urlopen(request)
token_info = json.load(response)
return token_info
except urllib2.URLError as e:
if e.code == 400:
return None
raise e
def generate_token(self, grant_type, username, key):
'''
Generate OAuth access token from username/password or from a refresh token.
If the grant succeeds, the method returns a dictionary of the form:
{ 'token_type': 'Bearer',
'access_token': <token>,
'expires_in': <span in seconds>,
'refresh_token': <token> }
If the grant fails because of invalid credentials, None is returned.
'''
if grant_type == 'credentials':
if len(username) < self.min_username_length or len(key) < self.min_key_length:
raise PermissionError("Invalid username or password.")
return self._generate_new_token(username, key)
if grant_type == 'refresh_token':
return self._refresh_token(username, key)
raise ValueError("Bad request: grant_type is not valid.")
def _generate_app_token(self):
'''
Helper to authenticate this app with the OAuth authorization server.
'''
app_sig = '%s:%s' % (self._app_id, self._app_key)
headers = {'Authorization': 'Basic {0}'.format(encodestring(app_sig).replace('\n', ''))}
data = [('grant_type', 'client_credentials'),
('scope', 'token-validation')]
request = urllib2.Request(self._get_token_url(), urllib.urlencode(data, True), headers)
response = urllib2.urlopen(request)
token_info = json.load(response)
self._access_token = token_info['access_token']
self._expires_at = time.time() + float(token_info['expires_in'])
def validate_token(self, token):
'''
Validate OAuth authorization information.
token: The token to validate. This value may be None to indicate that no
Authorization header was specified. In such case this method will
return true and set the current user to None.
Returns True if the request is authorized to proceed. The current_user
property of this class provides the user associated with the token.
'''
self._user = None
if token is None:
return True
if len(token) <= 0:
return False
if self._access_token is None or self._expires_at < time.time():
self._generate_app_token()
headers = {'Authorization': 'Bearer {0}'.format(self._access_token)}
data = [('token', token)]
request = urllib2.Request(self._get_validation_url(), urllib.urlencode(data, True), headers)
response = urllib2.urlopen(request)
result = json.load(response)
status_code = result['code'] if 'code' in result else 500
if status_code == 200:
self._user = User(result['user']['name'], str(result['user']['id']))
return True
elif status_code == 403 or status_code == 404:
return False # 'User credentials are not valid'
else:
return False # 'The token translation failed.'
def get_users(self, key_type, keys):
'''
Resolves user names (key_type='names') or user IDs (key_type='ids') to
corresponding User objects.
key_type: The type of input keys: names or ids.
keys: The set of names/ids to resolve.
Returns a dictionary where keys are keys input to this method and
values are either a User object or None if the key does not have
a matching user (either the user does not exist or exists but is
not active).
'''
if key_type not in ('names', 'ids'):
raise ValueError('Invalid key_type')
if self._access_token is None or self._expires_at < time.time():
self._generate_app_token()
headers = {'Authorization': 'Bearer {0}'.format(self._access_token)}
request = urllib2.Request(self._get_user_info_url(),
urllib.urlencode([(key_type, keys)], True),
headers)
response = urllib2.urlopen(request)
result = json.load(response)
status_code = result['code'] if 'code' in result else 500
user_dict = None
if status_code == 200:
user_dict = {}
key_type_key = 'name' if key_type == 'names' else 'id'
for user in result['users']:
key = str(user[key_type_key])
if 'active' in user and user['active'] == True:
user_dict[key] = User(user['name'], user['id'])
else:
user_dict[key] = None
return user_dict
def current_user(self):
'''
Returns the current user as set by validate_token.
'''
return self._user
class LocalUserFetcher(object):
'''
Base class for handlers that return users from the local database.
'''
def __init__(self, model):
'''
model: BundleModel instance
'''
self._model = model
def get_users(self, key_type, keys):
'''
Resolves user names (key_type='names') or user IDs (key_type='ids') to
corresponding User objects.
key_type: The type of input keys: names or ids.
keys: The set of names/ids to resolve.
Returns a dictionary where keys are keys input to this method and
values are either a User object or None if the key does not have
a matching user (either the user does not exist or exists but is
not active).
'''
# TODO(klopyrev): Once we've deprecated the OAuth handler that talks to
# the Django server, we can migrate all code that uses this method to
# the BundleModel version.
user_ids = None
usernames = None
if key_type == 'ids':
user_ids = keys
elif key_type == 'names':
usernames = keys
else:
raise ValueError('Invalid key_type')
users = self._model.get_users(user_ids, usernames)
user_dict = {}
for user in users:
key = user.user_id if key_type == 'ids' else user.user_name
user_dict[key] = User(user.user_name, user.user_id)
for key in keys:
if key not in user_dict:
user_dict[key] = None
return user_dict
class RestOAuthHandler(threading.local, LocalUserFetcher):
'''
Handles user authentication with the REST bundle service server. Fetches
other user records from the local database.
Inherits from threading.local, which makes all instance attributes thread-local.
When an OAuthHandler instance is used from a new thread, __init__ will be called
again, and from thereon all attributes may be different between threads.
https://hg.python.org/cpython/file/2.7/Lib/_threading_local.py
'''
def __init__(self, address, model):
'''
address: the address of the server
model: BundleModel instance
'''
super(RestOAuthHandler, self).__init__(model)
self._address = address
self._user = None
def generate_token(self, grant_type, username, key):
'''
Generate OAuth access token from username/password or from a refresh token.
If the grant succeeds, the method returns a dictionary of the form:
{ 'token_type': 'Bearer',
'access_token': <token>,
'expires_in': <span in seconds>,
'refresh_token': <token> }
If the grant fails because of invalid credentials, None is returned.
'''
if grant_type == 'credentials':
return self._make_token_request({
'grant_type': 'password',
'username': username,
'password': key})
return self._generate_token_from_credentials(username, key)
if grant_type == 'refresh_token':
return self._make_token_request({
'grant_type': 'refresh_token',
'refresh_token': key})
raise ValueError("Bad request: grant_type is not valid.")
def _make_token_request(self, data):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + base64.b64encode('codalab_cli_client:'),
'X-Requested-With': 'XMLHttpRequest'}
request = urllib2.Request(
self._address + '/rest/oauth2/token',
headers=headers,
data=urllib.urlencode(data))
try:
response = urllib2.urlopen(request)
result = json.load(response)
return result
except urllib2.HTTPError as e:
if e.code == 401:
return None
raise
def validate_token(self, token):
'''
Validate OAuth authorization information.
token: The token to validate. This value may be None to indicate that no
Authorization header was specified. In such case this method will
return true and set the current user to None.
Returns True if the request is authorized to proceed. The current_user
property of this class provides the user associated with the token.
'''
self._user = None
if token is None:
return True
request = urllib2.Request(
self._address + '/rest/oauth2/validate',
headers={'Authorization': 'Bearer ' + token,
'X-Requested-With': 'XMLHttpRequest'})
try:
response = urllib2.urlopen(request)
result = json.load(response)
self._user = User(result['user_name'], result['user_id'])
return True
except urllib2.HTTPError as e:
if e.code == 401:
return False
raise
def current_user(self):
'''
Returns the current user as set by validate_token.
'''
return self._user
class LocalUserAuthHandler(LocalUserFetcher):
'''
Auth handler that takes the user during construction. Fetches other user
records from the local database.
'''
def __init__(self, user, model):
'''
user: User to use, a codalab.object.user instance
model: BundleModel instance
'''
super(LocalUserAuthHandler, self).__init__(model)
if user is None:
self._user = None
else:
self._user = User(user.user_name, user.user_id)
def current_user(self):
'''
Returns the current user.
'''
return self._user
|
import pygame
import time
import random
pygame.init()
list = [0, 0]
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (200, 0, 0)
green = (0, 200, 0)
bright_red = (255, 0, 0)
bright_green = (0, 255, 0)
block_color = (53, 115, 255)
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Bink Legend Of')
clock = pygame.time.Clock()
BinkImg = pygame.image.load('D:/Pictures/link.png')
gameIcon = pygame.image.load('D:/Pictures/link.png')
pygame.display.set_icon(gameIcon)
pause = False
# crash = True
def main_game():
game_status = GameStatus()
# ...
# when player gets hurt
game_status.reduce_health()
# ...
def game_over():
print("game over, sorry")
def things(thingx, thingy, thingw, thingh, color):
pygame.draw.rect(gameDisplay, color, [thingx, thingy, thingw, thingh])
def Bink(x, y):
gameDisplay.blit(BinkImg, (x, y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
while True:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
# gameDisplay.fill(white)
button("Play Again", 150, 450, 100, 50, green, bright_green, game_loop)
button("Quit", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(gameDisplay, ac, (x, y, w, h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(gameDisplay, ic, (x, y, w, h))
smallText = pygame.font.SysFont("comicsansms", 20)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ((x + (w / 2)), (y + (h / 2)))
gameDisplay.blit(textSurf, textRect)
def quitgame():
pygame.quit()
quit()
def unpause():
global pause
pause = False
def paused():
font = pygame.font.SysFont("comicsansms", 25)
largeText = pygame.font.SysFont("comicsansms", 115)
TextSurf, TextRect = text_objects("Paused", largeText, )
text = font.render("Paused")
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
while pause:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
# gameDisplay.fill(white)
button("Continue Your Misery", 150, 300, 200, 50, green, bright_green, unpause)
button("End Your Life", 500, 300, 200, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
largeText = pygame.font.SysFont("comicsansms", 110)
TextSurf, TextRect = text_objects("Bink Legend Of", largeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
button("Start Your Torture!", 150, 450, 200, 50, green, bright_green, game_loop)
button("Give Up", 500, 450, 200, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(8)
def game_loop():
global pause
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
ychange = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
x_change = -5
if event.key == pygame.K_d:
x_change = 5
if event.key == pygame.K_p:
pause = True
gameDisplay.fill(black)
paused()
if event.key == pygame.K_ESCAPE:
pause = True
gameDisplay.fill(black)
paused()
if event.key == pygame.K_w:
ychange = -5
if event.key == pygame.K_s:
ychange = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_a or event.key == pygame.K_d:
x_change = 0
if event.key == pygame.K_w or event.key == pygame.K_s:
ychange = 0
x += x_change
y += ychange
gameDisplay.fill(white)
Bink(x, y)
pygame.display.update()
clock.tick(25000)
game_intro()
game_loop()
pygame.quit()
quit()
|
from django.db import models
from django.contrib.auth.models import User
#po文
class Post (models.Model):
content = models.TextField('內文')
#user撰寫文章
#建立User和Post之間一對多的關係(一個user可以寫多篇文章)
creator = models.ForeignKey(User,
on_delete=models.PROTECT,
verbose_name='建立者',
related_name='posts')
#貼文按讚
#建立User和Post之間多對多的關係(會多一張like表)
likes = models.ManyToManyField(User,
related_name='liked_posts',
blank= True) #允許貼文沒有人按讚
create_at = models.DateTimeField('建立時間', auto_now_add=True)
update_at = models.DateTimeField('更新時間', auto_now=True)
def __str__(self):
return '{}.Post create by {}'.format(
self.id,
self.creator.username,)
#留言
class Commit(models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE, #ondeletecascade-文章刪除留言也會刪除
verbose_name='文章',
related_name='commits')
content = models.TextField('內文')
creator = models.ForeignKey(User,
on_delete=models.PROTECT,
verbose_name='建立者',
related_name='commits')
likes = models.ManyToManyField(User,
related_name='liked_commits',
blank= True) #允許貼文沒有人按讚
create_at = models.DateTimeField('建立時間', auto_now_add=True)
update_at = models.DateTimeField('更新時間', auto_now=True)
def __str__(self):
return 'Post create by {}'.format(self.creator.username)
|
import numpy as np
import pandas as pd
import torch as torch
class Preprocess:
def __init__(self, max_sequence_length, truncation_side):
self.__max_sequence_length = max_sequence_length
self.__truncation_side = truncation_side
def truncate(self, sequence: np.array) -> np.array:
truncated = {
'head': sequence[(sequence.shape[0] - self.__max_sequence_length):, :],
'tail': sequence[:self.__max_sequence_length, :]
}
return truncated[self.__truncation_side]
def pad_sequence(self, sequence: np.array) -> np.array:
padding = np.zeros((self.__max_sequence_length - len(sequence), sequence.shape[1]))
return np.append(padding, sequence, axis=0)
def transform(self, sequence: pd.DataFrame) -> np.array:
sequence = sequence.values
sequence = self.truncate(sequence)
if len(sequence) < self.__max_sequence_length:
sequence = self.pad_sequence(sequence)
return torch.from_numpy(sequence)
|
pc=rs.pointcloud()
pc.map_to(aligned_depth_frame)
points = pc.calculate(aligned_depth_frame)
pcl_points=pcl.PointCloud()
point_to_pcl(pcl_points,points)
vox = plc_msg.make_voxel_grid_filter()
LEAF_SIZE = 0.01
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
downsampled = vox.filter()
# PassThrough Filter
passthrough = outliers_removed.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
passed = passthrough.filter()
# Limiting on the Y axis too to avoid having the bins recognized as snacks
passthrough = passed.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'y'
passthrough.set_filter_field_name(filter_axis)
axis_min = -0.45
axis_max = +0.45
passthrough.set_filter_limits(axis_min, axis_max)
passed = passthrough.filter()
# RANSAC Plane Segmentation
seg = passed.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = LEAF_SIZE
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# Extract inliers and outliers
# Extract inliers - tabletop
cloud_table = cloud_filtered.extract(inliers, negative=False)
# Extract outliers - objects
cloud_objects = cloud_filtered.extract(inliers, negative=True)
def do_statistical_outlier_filtering(pcl_data, mean_k, tresh):
'''
:param pcl_data: point could data subscriber
:param mean_k: number of neighboring points to analyze for any given point (10)
:param tresh: Any point with a mean distance larger than global will be considered outlier (0.001)
:return: Statistical outlier filtered point cloud data
'''
outlier_filter = pcl_data.make_statistical_outlier_filter()
outlier_filter.set_mean_k(mean_k)
outlier_filter.set_std_dev_mul_thresh(tresh)
return outlier_filter.filter()
# Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(LEAF_SIZE*2)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(2500)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
|
#julia_set.py
"""
-----------------------------------------------------------------------------------------
Generates a visualization of the Julia set by printing cubes in space in Maya
-----------------------------------------------------------------------------------------
One function named run()
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
c - constant complex variable of the formula z = z*z + c, which affects the result
Script modified by Vlasis Gogousis [vgogousis@gmail.com]
MA3D o 2017
Original script source:
Burke, T., 2013. Batchloaf [online].
Available from: https://batchloaf.wordpress.com/2013/02/10/creating-julia-set-images-in-python/
[Accessed 13 February 2017]
"""
#******** IMPORT MODULES ********#
import maya.cmds as cmds
import numpy
#******** RUN JULIA SET VISUALIZATION ********#
def run(max_iteration, size, c):
"""
Generates a visualization of the Julia set by printing cubes in space in Maya
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
c - constant complex variable of the formula z = z*z + c, which affects the result
"""
# Initialize scene
cmds.file(new = True, force = True)
cmds.lookThru( 'top' )
cmds.grid(toggle=False)
# Setup window for progress bar
window = cmds.window()
cmds.columnLayout()
progressControl = cmds.progressBar(maxValue=size**2, width=300)
cmds.showWindow(window)
# Create shades of grey to paint cubes with based on depth
for i in range(max_iteration+1):
shader=cmds.shadingNode("blinn",asShader=True, name = "shader" + str(i))
attr = shader + ".color"
cmds.setAttr (attr, i/float(max_iteration), i/float(max_iteration), i/float(max_iteration))
# Specify real and imaginary range of image
re_min, re_max = -2.0, 2.0
im_min, im_max = -2.0, 2.0
scX = (abs(re_min) + abs(re_max))/size
scZ = (abs(im_min) + abs(im_max))/size
# Generate evenly spaced values over real and imaginary ranges
real_range = numpy.arange(re_min, re_max, (re_max - re_min) / size)
imag_range = numpy.arange(im_max, im_min, (im_min - im_max) / size)
# Run through the grid of our canvas size (size X size)
for im in imag_range:
for re in real_range:
# Initialize z (according to complex plane) and number of iterations
z = complex(re, im)
iteration = 0
# While z is within our space boundaries and we have not exceeded our maximum iteration:
while abs(z) < 10 and iteration < max_iteration:
z = z*z + c
iteration +=1
# Draw appropriate cube in space
cmds.polyCube(n="cube"+str(im)+str(re))
cmds.move(im,0,re)
cmds.scale(scX,0.1,scZ)
cmds.hyperShade(assign="shader"+str(iteration))
# Update progress bar and viewport
cmds.progressBar(progressControl, edit=True, step=1)
cmds.viewFit( 'top', all=True )
cmds.dolly( 'top', os=1.5 )
cmds.refresh()
# Update progress bar and viewport
cmds.progressBar(progressControl, edit=True, step=1)
cmds.refresh()
cmds.toggleWindowVisibility(window) |
# -*- coding: utf-8 -*-
"""Main Controller"""
from tg import expose, tmpl_context
from outages.lib.base import BaseController
from outages.decorators import with_moksha_socket
import moksha.utils
__all__ = ['RootController']
from moksha.api.widgets.live import LiveWidget
from tw2.polymaps import PolyMap
from tw2.dyntext import DynamicTextWidget
class DynamicTextLiveWidget(LiveWidget, DynamicTextWidget):
onmessage = "setDynamicText('${id}', json.text)"
resources = LiveWidget.resources + DynamicTextWidget.resources
class OutageMapWidget(LiveWidget, PolyMap):
topic="map_geojson"
onmessage = "addGeoJsonToPolymap('${id}', json, null)"
center_latlon = {'lat': 43.105556, 'lon' : -76.611389}
zoom = 6
interact = True
hash = True
cloudmade_api_key = "1a1b06b230af4efdbb989ea99e9841af"
# To style the map tiles
cloudmade_tileset = 'midnight-commander'
resources = LiveWidget.resources + PolyMap.resources
class RootController(BaseController):
"""
The root controller for the outages application.
All the other controllers and WSGI applications should be mounted on this
controller. For example::
panel = ControlPanelController()
another_app = AnotherWSGIApplication()
Keep in mind that WSGI applications shouldn't be mounted directly: They
must be wrapped around with :class:`tg.controllers.WSGIAppController`.
"""
@expose('outages.templates.index')
@with_moksha_socket
def index(self):
"""Handle the front-page."""
return dict(outage_map = OutageMapWidget,
outage_count=DynamicTextLiveWidget(topic="stat_outages", id="outage_count"),
affected_count=DynamicTextLiveWidget(topic="stat_affected", id="affected_count"),
)
|
'''
Created on Jul 11, 2013
@author: christian
'''
import os
import mne
import numpy as np
from mne.io import Raw
from eelbrain import datasets, plot, testnd
from eelbrain.plot._base import Figure
from eelbrain.plot._utsnd import _ax_bfly_epoch
def test_plot_butterfly():
"Test plot.Butterfly"
ds = datasets.get_uts(utsnd=True)
p = plot.Butterfly('utsnd', ds=ds, show=False)
p.close()
p = plot.Butterfly('utsnd', 'A%B', ds=ds, show=False)
p.close()
# other y-dim
stc = datasets.get_mne_stc(True)
p = plot.Butterfly(stc)
p.close()
# _ax_bfly_epoch
fig = Figure(1, show=False)
ax = _ax_bfly_epoch(fig._axes[0], ds[0, 'utsnd'])
fig.show()
ax.set_data(ds[1, 'utsnd'])
fig.draw()
def test_plot_array():
"Test plot.Array"
ds = datasets.get_uts(utsnd=True)
p = plot.Array('utsnd', 'A%B', ds=ds, show=False)
p.close()
p = plot.Array('utsnd', ds=ds, show=False)
p.close()
def test_plot_mne_evoked():
"Test plotting evoked from the mne sample dataset"
evoked = datasets.get_mne_evoked()
p = plot.Array(evoked, show=False)
p.close()
def test_plot_mne_epochs():
"Test plotting epochs from the mne sample dataset"
# find paths
data_path = mne.datasets.sample.data_path()
raw_path = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
events_path = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
# read epochs
raw = Raw(raw_path)
events = mne.read_events(events_path)
idx = np.logical_or(events[:, 2] == 5, events[:, 2] == 32)
events = events[idx]
epochs = mne.Epochs(raw, events, None, -0.1, 0.3)
# grand average
p = plot.Array(epochs, show=False)
p.close()
# with model
p = plot.Array(epochs, events[:, 2], show=False)
p.close()
def test_plot_results():
"Test plotting test results"
ds = datasets.get_uts(True)
# ANOVA
res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=0, pmin=0.05)
p = plot.Array(res, show=False)
p.close()
res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05)
p = plot.Array(res, show=False)
p.close()
# Correlation
res = testnd.corr('utsnd', 'Y', 'rm', ds=ds)
p = plot.Array(res, show=False)
p.close()
res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=10, pmin=0.05)
p = plot.Array(res, show=False)
p.close()
|
import numpy as np
import matplotlib.pyplot as plt
import csv
import statistics
index_collumn = []
speed1_collumn = []
speed2_collumn = []
speed3_collumn = []
speed4_collumn = []
speed5_collumn = []
speed6_collumn = []
speed7_collumn = []
speed8_collumn = []
speed9_collumn = []
speed10_collumn = []
meanspeed_collumn = []
with open('speeddata.txt') as csvfile:
csvfilereader = csv.reader(csvfile, delimiter=' ')
for line in csvfilereader:
# index_collumn.append(int(line[0]))
speed1_collumn.append(int(line[0]))
speed2_collumn.append(int(line[1]))
speed3_collumn.append(int(line[2]))
speed4_collumn.append(int(line[3]))
speed5_collumn.append(int(line[4]))
speed6_collumn.append(int(line[5]))
speed7_collumn.append(int(line[6]))
speed8_collumn.append(int(line[7]))
speed9_collumn.append(int(line[8]))
speed10_collumn.append(int(line[9]))
meanspeed_collumn.append(statistics.median([int(line[1]), int(line[2]), int(line[3]), int(line[4]), int(line[5]), int(line[6]), int(line[7]), int(line[8]), int(line[9]), int(line[0])])
# print(size_collumn[:2])
index_collumn = range(len(speed1_collumn))
figure_length = 300
plt.plot(index_collumn[:figure_length], meanspeed_collumn[:figure_length], label="observed size")
# print(np.arange(int(min(index_collumn[:figure_length])), int(max(index_collumn[:figure_length]))+1, 20.0))
plt.xticks(np.arange(int(min(index_collumn[:figure_length])), int(max(index_collumn[:figure_length]))+1, 50.0))
plt.xlabel('Blocks Used')
plt.ylabel('Database Size')
plt.plot(index_collumn[:figure_length], (np.array(index_collumn[:figure_length])*323)+20480, label="(blocks*323)+20480")
plt.title("Database Size Scaling")
plt.legend()
plt.show(block=True)
|
import pandas as pd
from sklearn import neighbors, datasets
from numpy.random import permutation
import matplotlib.pyplot as plt
import numpy as np
import string
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from similarity.cosine import Cosine
import random
from flask import Flask
from flask_cors import CORS,cross_origin
from flask import request,make_response,jsonify
import itertools
app = Flask(__name__)
CORS(app)
def build_preflight_response():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
def build_actual_response(response):
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def course_difficulty(level,df):
data = df.copy().loc[df['Difficulty Level']==level]
data['index'] = np.arange(0, data.shape[0])
return data
def remove_punctuations(text):
text = str(text)
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
def recommender_system(skill,data):
cosine = Cosine(2)
data["skill"] = skill
data["p0"] = data["skill"].apply(lambda s: cosine.get_profile(s))
data["p1"] = data["new_column"].apply(lambda s: cosine.get_profile(s))
data["cosine_sim"] = [cosine.similarity_profiles(p0,p1) for p0,p1 in zip(data["p0"],data["p1"])]
data.drop(["p0", "p1"], axis=1,inplace = True)
return data
def medium_recommender_system(skill):
cosine = Cosine(2)
medium["skill_provided"] = skill
medium["p0"] = medium["skill_provided"].apply(lambda s: cosine.get_profile(s))
medium["p1"] = medium["Skills"].apply(lambda s: cosine.get_profile(s))
medium["cosine_sim"] = [cosine.similarity_profiles(p0,p1) for p0,p1 in zip(medium["p0"],medium["p1"])]
medium.drop(["p0", "p1"], axis=1,inplace = True)
return medium
@app.route('/', methods=['GET'])
def hello():
return jsonify({"response":"This is Sentiment Application"})
@app.route('/predict',methods=['POST'])
@cross_origin()
def predict():
if request.method == 'POST':
user = request.get_json(force=True)
skill = user['skill']
level = user['level']
df = pd.read_csv("https://raw.githubusercontent.com/Ayush-Batra/Hackathon_/main/combine.csv?token=AR6BG7TH4CVTNV4UHXJEOZDBGJZUA",encoding='cp1252')
data = course_difficulty(level,df)
data['final'] = data['all_skill'] + data['Course Description'] + data['Course Name']
data['new_column'] = data['final'].apply(remove_punctuations)
final = recommender_system(skill,data)
data = data.sort_values('cosine_sim', ascending = False)
courses = list(data[0:5]['Course Name'])
courses_links = list(data[0:5]['Course URL'])
courses_levels = list(data[0:5]['Difficulty Level'])
courses_orgs = list(data[0:5]['University / Industry Partner Name'])
courses_desc = list(data[0:5]['Course Description'])
final = []
one = list(itertools.chain(courses, courses_links))
two = list(itertools.chain(courses_levels, courses_orgs))
final = list(itertools.chain(one, two))
return (jsonify(final))
if __name__ == "__main__":
app.run(debug=True)
|
import math
def checkColFill(col, lst, centerRow=1):
if lst[centerRow-1][col] == 1 and lst[centerRow][col] == 1\
and lst[centerRow+1][col] == 1:
return True
return False
noOfCols = lambda x: math.ceil(x/3) if math.ceil(x/3) >= 3 else 3
def do(lst):
for col in range(1, len(lst[0])-1):
if col != len(lst[0])-2:
while checkColFill(col-1, lst) == False:
print('{} {}'.format(2, col+1), flush=True)
rowFilled, colFilled = list(map(int, input().split(' ')))
if rowFilled == 0 and colFilled == 0:
break
else:
lst[rowFilled-1][colFilled-1] = 1
else:
a, b = None, None
while True:
print('{} {}'.format(2, col+1), flush=True)
a, b = list(map(int, input().split(' ')))
if a == 0 and b == 0:
break
else:
lst[a-1][b-1]
for i in range(0, int(input())):
minArea = int(input())
columns = noOfCols(minArea)
lst = [[0]*columns]+[[0]*columns]+[[0]*columns]
do(lst) |
from .rank_and_suit_validator import RankAndSuitValidator
from .royal_straight_flush_validator import RoyalStraightFlushValidator
from .straight_flush_validator import StraightFlushValidator
from .four_of_a_kind_validator import FourOfAKindValidator
from .full_house_validator import FullHouseValidator
from .flush_validator import FlushValidator
from .straight_validator import StraightValidator
from .three_of_a_kind_validator import ThreeOfAKindValidator
from .two_pair_validator import TwoPairValidator
from .pair_validator import PairValidator
from .high_card_validator import HighCardValidator
from .no_cards_validator import NoCardsValidator |
from rest_framework import serializers
from contas.models import Contas, Deposito
class ContaSerializer(serializers.ModelSerializer):
class Meta:
model = Contas
fields = ['agencia', 'conta','saldo', 'get_ultima_movimentacao']
class DepositoSerializer(serializers.ModelSerializer):
class Meta:
model = Deposito
fields = ['id', 'conta', 'valor', 'get_data_deposito', 'get_saldo_atualizado']
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 14:18:11 2019
@author: Ananthan
"""
import numpy as np
import math
import pandas as pd
import process as sp
# import re
import os
def t_n_d(file):
"""
gets title and description from a file
"""
s = file.read()
# des = re.findall(r'\"([^]]*)\"', s)
des = s[s.index(','):]
t = s[:s.index(',')] # not all files in cleaned output had a ," combination, so just look for the comma
return t,des
def make_dict_of_words(path, errorfile):
"""
:return: dict of words and freq in corpus
"""
word_dict={}
for filename in os.listdir(path):
file = open(path+"/"+filename,'r', encoding="utf8")
# print('.', end='')
title,des=t_n_d(file)
try:
# des_ls = sp.tokenize_str(des[0])
des_ls = sp.tokenize_str(des)
except:
# des_ls = sp.tokenize_str("aaa") # previously gave bogus values as a placeholder, but now just throws out missed files entirely.
des_ls = []
# print("missed ", filename)
if errorfile is not None:
errorfile.write(filename + " could not be added to the dictionary\n")
for word in des_ls:
if word not in word_dict:
word_dict[word]=1
else:
word_dict[word]+=1
print('made freqvec dict')
return word_dict
def make_seq_freq_vec(seq_ls,words, words_to_index):
"""
:param seq:
:param words:
:return:
"""
vec=np.zeros(len(words))
for word in seq_ls:
vec[words_to_index[word]] += 1
for i in range(len(seq_ls)):
vec[i] = 1 + math.log(vec[i]) if vec[i] > 0 else vec[i]
# for i in range(len(words)):
# count=seq_ls.count(words[i])
# if count>0:
# vec[i]=1+math.log(count)
return vec
def make_vec_df(path,w_dict, prefix, errorfile):
data=[]
firms=[]
words=list(w_dict.keys())
words_to_index = {word : words.index(word) for word in words}
word_dict_df=pd.DataFrame(words)
word_dict_df.to_csv(prefix + '/freqvec_dict.csv')
i=1
for filename in sorted(os.listdir(path)):
# print('.', end='')
file = open(path+"/"+filename,'r', encoding="utf8")
title,des=t_n_d(file)
try:
# des_ls = sp.tokenize_str(des[0])
des_ls = sp.tokenize_str(des)
vec = make_seq_freq_vec(des_ls,words, words_to_index)
data.append(vec)
firms.append(title)
except:
pass
#des_ls = sp.tokenize_str("aaa")
# print("missed ", filename)
# if errorfile is not None:
# errorfile.write(filename + " missed in vector-making step\n")
i+=1
data = np.array(data).T.tolist()
df = pd.DataFrame(data, columns=firms)
return df
def run_freq_vec(prefix="", errorfile=None):
out_df=make_vec_df('test_data',make_dict_of_words('test_data', errorfile), prefix, errorfile)
out_df.to_csv(prefix + '/freqvec_vectors.csv')
print('made freqvec vectors')
return prefix + '/freqvec_vectors.csv' |
"""
http://2018.igem.org/wiki/images/0/09/2018_InterLab_Plate_Reader_Protocol.pdf
"""
import json
from urllib.parse import quote
import sbol3
from tyto import OM
import labop
import uml
from labop.execution_engine import ExecutionEngine
from labop_convert.markdown.markdown_specialization import MarkdownSpecialization
doc = sbol3.Document()
sbol3.set_namespace("http://igem.org/engineering/")
#############################################
# Import the primitive libraries
print("Importing libraries")
labop.import_library("liquid_handling")
print("... Imported liquid handling")
labop.import_library("plate_handling")
# print('... Imported plate handling')
labop.import_library("spectrophotometry")
print("... Imported spectrophotometry")
labop.import_library("sample_arrays")
print("... Imported sample arrays")
labop.import_library("culturing")
#############################################
# create the materials to be provisioned
dh5alpha = sbol3.Component(
"dh5alpha", "https://identifiers.org/pubchem.substance:24901740"
)
dh5alpha.name = "_E. coli_ DH5 alpha"
doc.add(dh5alpha)
lb_cam = sbol3.Component("lb_cam", "https://identifiers.org/pubchem.substance:24901740")
lb_cam.name = "LB Broth+chloramphenicol"
doc.add(lb_cam)
chloramphenicol = sbol3.Component(
"chloramphenicol", "https://identifiers.org/pubchem.substance:24901740"
)
chloramphenicol.name = "chloramphenicol"
doc.add(chloramphenicol)
neg_control_plasmid = sbol3.Component("neg_control_plasmid", sbol3.SBO_DNA)
neg_control_plasmid.name = "Negative control"
neg_control_plasmid.description = "BBa_R0040 Kit Plate 7 Well 2D"
pos_control_plasmid = sbol3.Component("pos_control_plasmid", sbol3.SBO_DNA)
pos_control_plasmid.name = "Positive control"
pos_control_plasmid.description = "BBa_I20270 Kit Plate 7 Well 2B"
test_device1 = sbol3.Component("test_device1", sbol3.SBO_DNA)
test_device1.name = "Test Device 1"
test_device1.description = "BBa_J364000 Kit Plate 7 Well 2F"
test_device2 = sbol3.Component("test_device2", sbol3.SBO_DNA)
test_device2.name = "Test Device 2"
test_device2.description = "BBa_J364001 Kit Plate 7 Well 2H"
test_device3 = sbol3.Component("test_device3", sbol3.SBO_DNA)
test_device3.name = "Test Device 3"
test_device3.description = "BBa_J364002 Kit Plate 7 Well 2J"
test_device4 = sbol3.Component("test_device4", sbol3.SBO_DNA)
test_device4.name = "Test Device 4"
test_device4.description = "BBa_J364007 Kit Plate 7 Well 2L"
test_device5 = sbol3.Component("test_device5", sbol3.SBO_DNA)
test_device5.name = "Test Device 5"
test_device5.description = "BBa_J364008 Kit Plate 7 Well 2N"
test_device6 = sbol3.Component("test_device6", sbol3.SBO_DNA)
test_device6.name = "Test Device 6"
test_device6.description = "BBa_J364009 Kit Plate 7 Well 2P"
doc.add(neg_control_plasmid)
doc.add(pos_control_plasmid)
doc.add(test_device1)
doc.add(test_device2)
doc.add(test_device3)
doc.add(test_device4)
doc.add(test_device5)
doc.add(test_device6)
protocol = labop.Protocol("interlab")
protocol.name = "Cell measurement protocol"
protocol.description = """Prior to performing the cell measurements you should perform all three of the calibration measurements. Please do not proceed unless you have completed the three calibration protocols. Completion of the calibrations will ensure that you understand the measurement process and that you can take the cell measurements under the same conditions. For the sake of consistency and reproducibility, we are requiring all teams to use E. coli K-12 DH5-alpha. If you do not have access to this strain, you can request streaks of the transformed devices from another team near you, and this can count as a collaboration as long as it is appropriately documented on both teams' wikis. If you are absolutely unable to obtain the DH5-alpha strain, you may still participate in the InterLab study by contacting the Measurement Committee (measurement at igem dot org) to discuss your situation.
For all of these cell measurements, you must use the same plates and volumes that you used in your calibration protocol. You must also use the same settings (e.g., filters or excitation and emission wavelengths) that you used in your calibration measurements. If you do not use the same plates, volumes, and settings, the measurements will not be valid."""
doc.add(protocol)
plasmids = [
neg_control_plasmid,
pos_control_plasmid,
test_device1,
test_device2,
test_device3,
test_device4,
test_device5,
test_device6,
]
# Day 1: Transformation
transformation = protocol.primitive_step(
f"Transform", host=dh5alpha, dna=plasmids, selection_medium=lb_cam
)
# Day 2: Pick colonies and culture overnight
culture_container_day1 = protocol.primitive_step(
"ContainerSet",
quantity=uml.LiteralInteger(value=len(plasmids)),
specification=labop.ContainerSpec(
name=f"culture (day 1)",
queryString="cont:CultureTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
overnight_culture = protocol.primitive_step(
"Culture",
inoculum=transformation.output_pin("transformants"),
growth_medium=lb_cam,
volume=sbol3.Measure(5, OM.millilitre), # Actually 5-10 ml in the written protocol
duration=sbol3.Measure(16, OM.hour), # Actually 16-18 hours
orbital_shake_speed=sbol3.Measure(220, None), # No unit for RPM or inverse minutes
temperature=sbol3.Measure(37, OM.degree_Celsius),
container=culture_container_day1.output_pin("samples"),
)
# Day 3 culture
culture_container_day2 = protocol.primitive_step(
"ContainerSet",
quantity=uml.LiteralInteger(value=len(plasmids)),
specification=labop.ContainerSpec(
name=f"culture (day 2)",
queryString="cont:CultureTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
back_dilution = protocol.primitive_step(
"Dilute",
source=culture_container_day1.output_pin("samples"),
destination=culture_container_day2.output_pin("samples"),
diluent=lb_cam,
amount=sbol3.Measure(5.0, OM.millilitre),
dilution_factor=uml.LiteralInteger(value=10),
)
baseline_absorbance = protocol.primitive_step(
"MeasureAbsorbance",
samples=culture_container_day2.output_pin("samples"),
wavelength=sbol3.Measure(600, OM.nanometer),
)
conical_tube = protocol.primitive_step(
"ContainerSet",
quantity=uml.LiteralInteger(value=len(plasmids)),
specification=labop.ContainerSpec(
name=f"culture (day 2), backdiluted",
queryString="cont:ConicalTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
) # Should be opaque
dilution = protocol.primitive_step(
"DiluteToTargetOD",
source=culture_container_day2.output_pin("samples"),
destination=conical_tube.output_pin("samples"),
diluent=lb_cam,
amount=sbol3.Measure(12, OM.millilitre),
target_od=sbol3.Measure(0.2, None),
) # Dilute to a target OD of 0.2, opaque container
microfuge_tube_0hrs = protocol.primitive_step(
"ContainerSet",
quantity=uml.LiteralInteger(value=len(plasmids)),
specification=labop.ContainerSpec(
name="absorbance timepoint (0 hrs)",
queryString="cont:MicrofugeTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
transfer = protocol.primitive_step(
"Transfer",
source=conical_tube.output_pin("samples"),
destination=microfuge_tube_0hrs.output_pin("samples"),
amount=sbol3.Measure(0.5, OM.milliliter),
)
hold = protocol.primitive_step(
"Hold",
location=microfuge_tube_0hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
incubate = protocol.primitive_step(
"Incubate",
location=conical_tube.output_pin("samples"),
duration=sbol3.Measure(6, OM.hour),
temperature=sbol3.Measure(37, OM.degree_Celsius),
shakingFrequency=sbol3.Measure(220, None),
)
microfuge_tube_6hrs = protocol.primitive_step(
"ContainerSet",
quantity=uml.LiteralInteger(value=len(plasmids)),
specification=labop.ContainerSpec(
name=f"absorbance timepoint (6 hrs)",
queryString="cont:MicrofugeTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
transfer = protocol.primitive_step(
"Transfer",
source=conical_tube.output_pin("samples"),
destination=microfuge_tube_6hrs.output_pin("samples"),
amount=sbol3.Measure(0.5, OM.milliliter),
)
hold = protocol.primitive_step(
"Hold",
location=microfuge_tube_6hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
# Transfer to Plate
plate = protocol.primitive_step(
"EmptyContainer",
specification=labop.ContainerSpec(
name=f"measurement plate",
queryString="cont:Plate96Well",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
plan = labop.SampleData(
values=quote(
json.dumps(
{
1: "A1:D1",
2: "A2:D2",
3: "A3:D3",
4: "A4:D4",
5: "A5:D5",
6: "A6:D6",
7: "A7:D7",
8: "A8:D8",
}
)
)
)
transfer = protocol.primitive_step(
"TransferByMap",
source=microfuge_tube_0hrs.output_pin("samples"),
destination=plate.output_pin("samples"),
amount=sbol3.Measure(100, OM.microliter),
plan=plan,
)
plate_blanks = protocol.primitive_step(
"Transfer",
source=[lb_cam],
destination=plate.output_pin("samples"),
coordinates="A9:D9",
amount=sbol3.Measure(100, OM.microliter),
)
measure_absorbance = protocol.primitive_step(
"MeasureAbsorbance",
samples=plate.output_pin("samples"),
wavelength=sbol3.Measure(600, OM.nanometer),
)
measure_fluorescence = protocol.primitive_step(
"MeasureFluorescence",
samples=plate.output_pin("samples"),
excitationWavelength=sbol3.Measure(485, OM.nanometer),
emissionWavelength=sbol3.Measure(530, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(30, OM.nanometer),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=baseline_absorbance.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=measure_absorbance.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=measure_fluorescence.output_pin("measurements"),
)
agent = sbol3.Agent("test_agent")
ee = ExecutionEngine(specializations=[MarkdownSpecialization("test_LUDOX_markdown.md")])
execution = ee.execute(protocol, agent, id="test_execution", parameter_values=[])
print(ee.specializations[0].markdown)
with open("example.md", "w", encoding="utf-8") as f:
f.write(ee.specializations[0].markdown)
|
import os
from GAN import GAN
from utils import show_all_variables
from utils import check_folder
import tensorflow as tf
import argparse
import os
def str2bool(v):
return v.lower() in ('True', '1')
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of GAN collections"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--dataset', type=str, default='mnist', help='The name of dataset')
parser.add_argument('--epoch', type=int, default=20, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=64, help='The size of batch')
parser.add_argument('--z_dim', type=int, default=62, help='Dimension of noise vector')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--input_dir', type=str,
help='Directory name of input images')
parser.add_argument('--label_path', type=str, default=None,
help='label path of train images')
parser.add_argument('--mode', type=str2bool, default=True, help='train or test')
# parser.add_argument('--mode', type=bool, default=True, help='train or test') it won't take effect(always True),add_argument() doesn't parse type bool!
parser.add_argument('--polar_transform', type=str2bool, default=False, help='whether transform to polar coordinate')
parser.add_argument('--gpu', type=str, default='0', help='gpu device used')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
check_folder(args.checkpoint_dir)
# --result_dir
check_folder(args.result_dir)
# --result_dir
check_folder(args.log_dir)
# --epoch
assert args.epoch >= 1, 'number of epochs must be larger than or equal to one'
# --batch_size
assert args.batch_size >= 1, 'batch size must be larger than or equal to one'
# --z_dim
assert args.z_dim >= 1, 'dimension of noise vector must be larger than or equal to one'
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
#set gpu device
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth = True
# open session
with tf.Session(config=run_config) as sess:
# declare instance for GAN
gan = GAN(sess,
epoch=args.epoch,
batch_size=args.batch_size,
z_dim=args.z_dim,
dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir,
result_dir=args.result_dir,
log_dir=args.log_dir,
input_dir=args.input_dir)
# build graph
gan.build_model()
# show network architecture
show_all_variables()
print(args.mode)
if args.mode:
# launch the graph in a session
gan.train()
print(" [*] Training finished!")
# visualize learned generator
gan.visualize_results(args.epoch-1)
print(" [*] Testing finished!")
else:
if not gan.load(args.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
print('generate samples.')
gan.generate(image_num=10000)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# D. Jones - 9/1/15
"""BEAMS method for PS1 data"""
from __future__ import print_function
import numpy as np
fitresheader = """# VERSION: PS1_PS1MD
# FITOPT: NONE
# ----------------------------------------
NVAR: 30
VARNAMES: CID IDSURVEY TYPE FIELD zHD zHDERR HOST_LOGMASS HOST_LOGMASS_ERR SNRMAX1 SNRMAX2 SNRMAX3 PKMJD PKMJDERR x1 x1ERR c cERR mB mBERR x0 x0ERR COV_x1_c COV_x1_x0 COV_c_x0 NDOF FITCHI2 FITPROB
# VERSION_SNANA = v10_39i
# VERSION_PHOTOMETRY = PS1_PS1MD
# TABLE NAME: FITRES
#
"""
fitresheaderbeams = """# CID IDSURVEY TYPE FIELD zHD zHDERR HOST_LOGMASS HOST_LOGMASS_ERR SNRMAX1 SNRMAX2 SNRMAX3 PKMJD PKMJDERR x1 x1ERR c cERR mB mBERR x0 x0ERR COV_x1_c COV_x1_x0 COV_c_x0 NDOF FITCHI2 FITPROB PA PL SNSPEC
"""
fitresfmtbeams = '%s %i %i %s %.5f %.5f %.4f %.4f %.4f %.4f %.4f %.3f %.3f %8.5e %8.5e %8.5e %8.5e %.4f %.4f %8.5e %8.5e %8.5e %8.5e %8.5e %i %.4f %.4f %.4f %.4f %i'
fitresvarsbeams = ["CID","IDSURVEY","TYPE","FIELD",
"zHD","zHDERR","HOST_LOGMASS",
"HOST_LOGMASS_ERR","SNRMAX1","SNRMAX2",
"SNRMAX3","PKMJD","PKMJDERR","x1","x1ERR",
"c","cERR","mB","mBERR","x0","x0ERR","COV_x1_c",
"COV_x1_x0","COV_c_x0","NDOF","FITCHI2","FITPROB",
"PA","PL","SNSPEC"]
fitresvars = ["CID","IDSURVEY","TYPE","FIELD",
"zHD","zHDERR","HOST_LOGMASS",
"HOST_LOGMASS_ERR","SNRMAX1","SNRMAX2",
"SNRMAX3","PKMJD","PKMJDERR","x1","x1ERR",
"c","cERR","mB","mBERR","x0","x0ERR","COV_x1_c",
"COV_x1_x0","COV_c_x0","NDOF","FITCHI2","FITPROB"]
fitresfmt = 'SN: %s %i %i %s %.5f %.5f %.4f %.4f %.4f %.4f %.4f %.3f %.3f %8.5e %8.5e %8.5e %8.5e %.4f %.4f %8.5e %8.5e %8.5e %8.5e %8.5e %i %.4f %.4f'
class snbeams:
def __init__(self):
self.clobber = False
self.verbose = False
def add_options(self, parser=None, usage=None, config=None):
import optparse
if parser == None:
parser = optparse.OptionParser(usage=usage, conflict_handler="resolve")
# the basics
parser.add_option('-v', '--verbose', action="count", dest="verbose",default=1)
parser.add_option('--debug', default=False, action="store_true",
help='debug mode: more output and debug files')
parser.add_option('--clobber', default=False, action="store_true",
help='clobber output image')
if config:
parser.add_option('--piacol', default=config.get('inputdata','piacol'), type="string",
help='Column in FITRES file used as guess at P(Ia)')
parser.add_option('--specconfcol', default=config.get('inputdata','specconfcol'), type="string",
help='Column in FITRES file indicating spec.-confirmed SNe with 1')
# Light curve cut parameters
parser.add_option(
'--crange', default=list(map(float,config.get('lightcurve','crange').split(','))),type="float",
help='Peculiar velocity error (default=%default)',nargs=2)
parser.add_option(
'--x1range', default=list(map(float,config.get('lightcurve','x1range').split(','))),type="float",
help='Peculiar velocity error (default=%default)',nargs=2)
parser.add_option('--x1cellipse',default=config.getboolean('lightcurve','x1cellipse'),
action="store_true",
help='Elliptical, not box, cut in x1 and c')
parser.add_option(
'--fitprobmin', default=config.get('lightcurve','fitprobmin'),type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option(
'--x1errmax', default=config.get('lightcurve','x1errmax'),type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option(
'--pkmjderrmax', default=config.get('lightcurve','pkmjderrmax'),type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option('--cutwin',default=config.get('lightcurve','cutwin'),
type='string',action='append',
help='parameter range for specified variable',nargs=3)
# SALT2 parameters and intrinsic dispersion
parser.add_option('--salt2alpha', default=config.get('lightcurve','salt2alpha'),
type="float",
help='SALT2 alpha parameter from a spectroscopic sample (default=%default)')
parser.add_option('--salt2alphaerr', default=config.get('lightcurve','salt2alphaerr'),
type="float",
help='nominal SALT2 alpha uncertainty from a spectroscopic sample (default=%default)')
parser.add_option('--salt2beta', default=config.get('lightcurve','salt2beta'),
type="float",
help='nominal SALT2 beta parameter from a spec. sample (default=%default)')
parser.add_option('--salt2betaerr', default=config.get('lightcurve','salt2betaerr'),
type="float",
help='nominal SALT2 beta uncertainty from a spec. sample (default=%default)')
parser.add_option('--sigint', default=config.get('lightcurve','sigint'),
type="float",
help='nominal intrinsic dispersion, MCMC fits for this if not specified (default=%default)')
# Mass options
parser.add_option(
'--masscorr', default=config.getboolean('mass','masscorr'),action="store_true",
help='If true, perform mass correction (default=%default)')
parser.add_option(
'--masscorrfixed', default=config.getboolean('mass','masscorrfixed'),action="store_true",
help='If true, perform fixed mass correction (default=%default)')
parser.add_option(
'--masscorrmag', default=config.get('mass','masscorrmag'),type="float",
help="""mass corr. and uncertainty (default=%default)""")
parser.add_option(
'--masscorrmagerr', default=config.get('mass','masscorrmagerr'),type="float",
help="""mass corr. and uncertainty (default=%default)""")
parser.add_option(
'--masscorrdivide', default=config.get('mass','masscorrdivide'),type="float",
help="""location of low-mass/high-mass split (default=%default)""")
parser.add_option('--nthreads', default=config.get('mcmc','nthreads'), type="int",
help='Number of threads for MCMC')
parser.add_option('--zmin', default=config.get('lightcurve','zmin'), type="float",
help='minimum redshift')
parser.add_option('--zmax', default=config.get('lightcurve','zmax'), type="float",
help='maximum redshift')
parser.add_option('--nbins', default=config.get('mcmc','nbins'), type="int",
help='number of bins in log redshift space')
parser.add_option('--equalbins', default=config.getboolean('mcmc','equalbins'), action="store_true",
help='if set, every bin contains the same number of SNe')
parser.add_option('-f','--fitresfile', default=config.get('inputdata','fitresfile'), type="string",
help='fitres file with the SN Ia data')
parser.add_option('-o','--outfile', default=config.get('inputdata','outfile'), type="string",
help='Output file with the derived parameters for each redshift bin')
parser.add_option('--mcsubset', default=config.getboolean('bootstrap','mcsubset'), action="store_true",
help='generate a random subset of SNe from the fitres file')
parser.add_option('--mcrandseed', default=config.get('bootstrap','mcrandseed'), type="int",
help='seed for np.random')
parser.add_option('--subsetsize', default=config.get('bootstrap','subsetsize'), type="int",
help='number of SNe in each MC subset ')
parser.add_option('--lowzsubsetsize', default=config.get('bootstrap','lowzsubsetsize'), type="int",
help='number of low-z SNe in each MC subset ')
parser.add_option('--nmc', default=config.get('bootstrap','nmc'), type="int",
help='number of MC samples ')
parser.add_option('--nmcstart', default=config.get('bootstrap','nmcstart'), type="int",
help='start at this MC sample')
parser.add_option('--mclowz', default=config.get('bootstrap','mclowz'), type="string",
help='low-z SN file, to be appended to the MC sample')
parser.add_option('--onlyIa', default=config.getboolean('sim','onlyIa'), action="store_true",
help='remove the TYPE != 1 SNe from the bunch')
parser.add_option('--pcutval', default=config.get('sim','pcutval'),type="float",
help="""the traditional method - make a cut on probability and
then everything with P(Ia) > that cut is reset to P(Ia) = 1""")
parser.add_option('--onlyCC', default=config.getboolean('sim','onlyCC'), action="store_true",
help='remove the TYPE = 1 SNe from the bunch')
parser.add_option('--nobadzsim', default=config.getboolean('sim','nobadzsim'), action="store_true",
help='If working with simulated data, remove the bad redshifts')
parser.add_option('--zminphot', default=config.get('inputdata','zminphot'), type='float',
help='set a minimum redshift for P(Ia) != 1 sample')
parser.add_option('--specidsurvey', default=config.get('inputdata','specidsurvey'), type='string',
help='will fix P(Ia) at 1 for IDSURVEY = this value')
parser.add_option('--photidsurvey', default=config.get('inputdata','photidsurvey'), type='float',
help='photometric survey ID, only necessary for zminphot')
parser.add_option('--nspecsne', default=config.get('sim','nspecsne'), type='int',
help='a spectroscopic sample to help BEAMS (for sim SNe)')
parser.add_option('--nsne', default=config.get('inputdata','nsne'), type='int',
help='maximum number of SNe to fit')
# alternate functional models
parser.add_option('--twogauss', default=config.getboolean('models','twogauss'), action="store_true",
help='two gaussians for pop. B')
parser.add_option('--skewedgauss', default=config.getboolean('models','skewedgauss'), action="store_true",
help='skewed gaussian for pop. B')
parser.add_option('--zCCdist', default=config.getboolean('models','zCCdist'), action="store_true",
help='fit for different CC parameters at each redshift control point')
# emcee options
parser.add_option('--nthreads', default=config.get('mcmc','nthreads'), type="int",
help='Number of threads for MCMC')
parser.add_option('--nwalkers', default=config.get('mcmc','nwalkers'), type="int",
help='Number of walkers for MCMC')
parser.add_option('--nsteps', default=config.get('mcmc','nsteps'), type="int",
help='Number of steps (per walker) for MCMC')
parser.add_option('--ninit', default=config.get('mcmc','ninit'), type="int",
help="Number of steps before the samples wander away from the initial values and are 'burnt in'")
parser.add_option('--ntemps', default=config.get('mcmc','ntemps'), type="int",
help="Number of temperatures for the sampler")
parser.add_option('--minmethod', default=config.get('mcmc','minmethod'), type="string",
help="""minimization method for scipy.optimize. L-BFGS-B is probably the best, but slow.
SLSQP is faster. Try others if using unbounded parameters""")
parser.add_option('--miniter', default=config.get('mcmc','miniter'), type="int",
help="""number of minimization iterations - uses basinhopping
algorithm for miniter > 1""")
parser.add_option('--forceminsuccess', default=config.getboolean('mcmc','forceminsuccess'), action="store_true",
help="""if true, minimizer must be successful or code will crash.
Default is to let the MCMC try to find a minimum if minimizer fails""")
else:
parser.add_option('--piacol', default='FITPROB', type="string",
help='Column in FITRES file used as guess at P(Ia)')
parser.add_option('--specconfcol', default=None, type="string",
help='Column in FITRES file indicating spec.-confirmed SNe with 1')
# Light curve cut parameters
parser.add_option(
'--crange', default=(-0.3,0.3),type="float",
help='Peculiar velocity error (default=%default)',nargs=2)
parser.add_option(
'--x1range', default=(-3.0,3.0),type="float",
help='Peculiar velocity error (default=%default)',nargs=2)
parser.add_option('--x1cellipse',default=False,action="store_true",
help='Circle cut in x1 and c')
parser.add_option(
'--fitprobmin', default=0.001,type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option(
'--x1errmax', default=1.0,type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option(
'--pkmjderrmax', default=2.0,type="float",
help='Peculiar velocity error (default=%default)')
parser.add_option('--cutwin',default=[],
type='string',action='append',
help='parameter range for specified variable',nargs=3)
# SALT2 parameters and intrinsic dispersion
parser.add_option('--salt2alpha', default=0.147, type="float",#0.147
help='SALT2 alpha parameter from a spectroscopic sample (default=%default)')
parser.add_option('--salt2alphaerr', default=0.01, type="float",#0.01
help='nominal SALT2 alpha uncertainty from a spectroscopic sample (default=%default)')
parser.add_option('--salt2beta', default=3.13, type="float",#3.13
help='nominal SALT2 beta parameter from a spec. sample (default=%default)')
parser.add_option('--salt2betaerr', default=0.12, type="float",#0.12
help='nominal SALT2 beta uncertainty from a spec. sample (default=%default)')
parser.add_option('--sigint', default=None, type="float",
help='nominal intrinsic dispersion, MCMC fits for this if not specified (default=%default)')
# Mass options
parser.add_option(
'--masscorr', default=False,action="store_true",
help='If true, perform mass correction (default=%default)')
parser.add_option(
'--masscorrfixed', default=False,action="store_true",
help='If true, perform fixed mass correction (default=%default)')
parser.add_option(
'--masscorrmag', default=0.07,type="float",
help="""mass corr. and uncertainty (default=%default)""")
parser.add_option(
'--masscorrmagerr', default=0.023,type="float",
help="""mass corr. and uncertainty (default=%default)""")
parser.add_option(
'--masscorrdivide', default=10,type="float",
help="""location of low-mass/high-mass split (default=%default)""")
parser.add_option('--nthreads', default=8, type="int",
help='Number of threads for MCMC')
parser.add_option('--zmin', default=0.009, type="float",
help='minimum redshift')
parser.add_option('--zmax', default=0.70, type="float",
help='maximum redshift')
parser.add_option('--nbins', default=25, type="int",
help='number of bins in log redshift space')
parser.add_option('--equalbins', default=False, action="store_true",
help='if set, every bin contains the same number of SNe')
parser.add_option('-f','--fitresfile', default='ps1_psnidprob.fitres', type="string",
help='fitres file with the SN Ia data')
parser.add_option('-o','--outfile', default='beamsCosmo.out', type="string",
help='Output file with the derived parameters for each redshift bin')
parser.add_option('--mcsubset', default=False, action="store_true",
help='generate a random subset of SNe from the fitres file')
parser.add_option('--mcrandseed', default=None, type="int",
help='seed for np.random')
parser.add_option('--subsetsize', default=105, type="int",
help='number of SNe in each MC subset ')
parser.add_option('--lowzsubsetsize', default=250, type="int",
help='number of low-z SNe in each MC subset ')
parser.add_option('--nmc', default=100, type="int",
help='number of MC samples ')
parser.add_option('--nmcstart', default=1, type="int",
help='start at this MC sample')
parser.add_option('--mclowz', default="", type="string",
help='low-z SN file, to be appended to the MC sample')
parser.add_option('--onlyIa', default=False, action="store_true",
help='remove the TYPE != 1 SNe from the bunch')
parser.add_option('--pcutval', default=None,type="float",
help="""the traditional method - make a cut on probability and
then everything with P(Ia) > that cut is reset to P(Ia) = 1""")
parser.add_option('--onlyCC', default=False, action="store_true",
help='remove the TYPE = 1 SNe from the bunch')
parser.add_option('--nobadzsim', default=False, action="store_true",
help='If working with simulated data, remove the bad redshifts')
parser.add_option('--zminphot', default=0.08, type='float',
help='set a minimum redshift for P(Ia) != 1 sample')
parser.add_option('--photidsurvey', default=15, type='float',
help='photometric survey ID, only necessary for zminphot')
parser.add_option('--specidsurvey', default='53,5,50,61,62,63,64,65,66,150,151,152', type='string',
help='will fix P(Ia) at 1 for IDSURVEY = this value')
parser.add_option('--nspecsne', default=0, type='int',
help='a spectroscopic sample to help BEAMS (for sim SNe)')
parser.add_option('--nsne', default=0, type='int',
help='maximum number of SNe to fit')
# alternate functional models
parser.add_option('--twogauss', default=False, action="store_true",
help='two gaussians for pop. B')
parser.add_option('--skewedgauss', default=False, action="store_true",
help='skewed gaussian for pop. B')
parser.add_option('--zCCdist', default=False, action="store_true",
help='fit for different CC parameters at each redshift control point')
# emcee options
parser.add_option('--nthreads', default=8, type="int",
help='Number of threads for MCMC')
parser.add_option('--nwalkers', default=200, type="int",
help='Number of walkers for MCMC')
parser.add_option('--nsteps', default=3000, type="int",
help='Number of steps (per walker) for MCMC')
parser.add_option('--ninit', default=1500, type="int",
help="Number of steps before the samples wander away from the initial values and are 'burnt in'")
parser.add_option('--ntemps', default=0, type="int",
help="Number of temperatures for the sampler")
parser.add_option('--minmethod', default='SLSQP', type="string",
help="""minimization method for scipy.optimize. L-BFGS-B is probably the best, but slow.
SLSQP is faster. Try others if using unbounded parameters""")
parser.add_option('--miniter', default=1, type="int",
help="""number of minimization iterations - uses basinhopping
algorithm for miniter > 1""")
parser.add_option('--forceminsuccess', default=False, action="store_true",
help="""if true, minimizer must be successful or code will crash.
Default is to let the MCMC try to find a minimum if minimizer fails""")
parser.add_option('-p','--paramfile', default='', type="string",
help='fitres file with the SN Ia data')
parser.add_option('-m','--mcmcparamfile', default='mcmcparams.input', type="string",
help='file that describes the MCMC input parameters')
parser.add_option('--fix',default=[],
type='string',action='append',
help='parameter range for specified variable')
parser.add_option('--bounds',default=[],
type='string',action='append',
help='variable, lower bound, upper bound. Overrides MCMC parameter file.',nargs=3)
parser.add_option('--guess',default=[],
type='string',action='append',
help='parameter guess for specified variable. Overrides MCMC parameter file',nargs=2)
parser.add_option('--prior',default=[],
type='string',action='append',
help='parameter prior for specified variable. Overrides MCMC parameter file',nargs=3)
parser.add_option('--bins',default=[],
type='string',action='append',
help='number of bins for specified variable. Overrides MCMC parameter file',nargs=2)
parser.add_option('--use',default=[],
type='string',action='append',
help='use specified variable. Overrides MCMC parameter file',nargs=2)
return(parser)
def main(self,fitres,mkcuts=False):
from txtobj import txtobj
from astropy.cosmology import Planck13 as cosmo
fr = txtobj(fitres,fitresheader=True)
if self.options.zmin < np.min(fr.zHD): self.options.zmin = np.min(fr.zHD)
if self.options.zmax > np.max(fr.zHD): self.options.zmax = np.max(fr.zHD)
from dobeams import salt2mu
fr.MU,fr.MUERR = salt2mu(x1=fr.x1,x1err=fr.x1ERR,c=fr.c,cerr=fr.cERR,mb=fr.mB,mberr=fr.mBERR,
cov_x1_c=fr.COV_x1_c,cov_x1_x0=fr.COV_x1_x0,cov_c_x0=fr.COV_c_x0,
alpha=self.options.salt2alpha,
beta=self.options.salt2beta,
x0=fr.x0,sigint=self.options.sigint,z=fr.zHD)
fr = self.mkfitrescuts(fr,mkcuts=mkcuts)
root = os.path.splitext(fitres)[0]
# Prior SN Ia probabilities
P_Ia = np.zeros(len(fr.CID))
for i in range(len(fr.CID)):
P_Ia[i] = fr.__dict__[self.options.piacol][i]
if self.options.specconfcol:
if fr.__dict__[self.options.specconfcol][i] == 1:
P_Ia[i] = 1
from dobeams import BEAMS
import configparser, sys
sys.argv = ['./doBEAMS.py']
beam = BEAMS()
parser = beam.add_options()
options, args = parser.parse_args(args=None,values=None)
options.paramfile = self.options.paramfile
if options.paramfile:
config = configparser.ConfigParser()
config.read(options.paramfile)
else: config=None
parser = beam.add_options(config=config)
options, args = parser.parse_args()
beam.options = options
# clumsy - send some options to the code
beam.options.twogauss = self.options.twogauss
beam.options.skewedgauss = self.options.skewedgauss
beam.options.zCCdist = self.options.zCCdist
beam.options.nthreads = self.options.nthreads
beam.options.nwalkers = self.options.nwalkers
beam.options.nsteps = self.options.nsteps
beam.options.mcmcparamfile = self.options.mcmcparamfile
beam.options.fix = self.options.fix
beam.options.bounds = self.options.bounds
beam.options.guess = self.options.guess
beam.options.prior = self.options.prior
beam.options.bins = self.options.bins
beam.options.use = self.options.use
beam.options.minmethod = self.options.minmethod
beam.options.forceminsuccess = self.options.forceminsuccess
beam.options.miniter = self.options.miniter
beam.options.ninit = self.options.ninit
beam.options.ntemps = self.options.ntemps
beam.options.debug = self.options.debug
beam.options.mcrandseed = self.options.mcrandseed
beam.options.salt2alpha = self.options.salt2alpha
beam.options.salt2beta = self.options.salt2beta
options.fitresfile = '%s.input'%root
if self.options.masscorr:
beam.options.plcol = 'PL'
import scipy.stats
#cols = np.where(fr.HOST_LOGMASS > 0)
#for k in fr.__dict__.keys():
# fr.__dict__[k] = fr.__dict__[k][cols]
fr.PL = np.zeros(len(fr.CID))
for i in range(len(fr.CID)):
if fr.HOST_LOGMASS_ERR[i] <= 0: fr.HOST_LOGMASS_ERR[i] = 1e-5
fr.PL[i] = scipy.stats.norm.cdf(self.options.masscorrdivide,fr.HOST_LOGMASS[i],fr.HOST_LOGMASS_ERR[i])
#P_Ia = P_Ia[cols]
if self.options.masscorrfixed: beam.options.lstepfixed = True
beam.options.zrange = (self.options.zmin,self.options.zmax)
beam.options.nbins = self.options.nbins
# make the BEAMS input file
fr.PA = fr.__dict__[self.options.piacol]
if not self.options.masscorr: fr.PL = np.zeros(len(fr.PA))
writefitres(fr,list(range(len(fr.PA))),'%s.input'%root,
fitresheader=fitresheaderbeams,
fitresfmt=fitresfmtbeams,
fitresvars=fitresvarsbeams)
beam.options.append = False
beam.options.clobber = self.options.clobber
beam.options.outfile = self.options.outfile
beam.options.equalbins = self.options.equalbins
beam.main(options.fitresfile)
bms = txtobj(self.options.outfile)
self.writeBinCorrFitres('%s.fitres'%self.options.outfile.split('.')[0],bms,fr=fr)
return
def mkfitrescuts(self,fr,mkcuts=False):
# Light curve cuts
if mkcuts:
sf = -2.5/(fr.x0*np.log(10.0))
invvars = 1./(fr.mBERR**2.+ self.options.salt2alpha**2. * fr.x1ERR**2. + \
self.options.salt2beta**2. * fr.cERR**2. + 2.0 * self.options.salt2alpha * (fr.COV_x1_x0*sf) - \
2.0 * self.options.salt2beta * (fr.COV_c_x0*sf) - \
2.0 * self.options.salt2alpha*self.options.salt2beta * (fr.COV_x1_c) )
if self.options.x1cellipse:
# I'm just going to assume cmax = abs(cmin) and same for x1
cols = np.where((fr.x1**2./self.options.x1range[0]**2. + fr.c**2./self.options.crange[0]**2. < 1) &
(fr.x1ERR < self.options.x1errmax) & (fr.PKMJDERR < self.options.pkmjderrmax*(1+fr.zHD)) &
(fr.FITPROB >= self.options.fitprobmin) &
(fr.zHD > self.options.zmin) & (fr.zHD < self.options.zmax) &
(fr.__dict__[self.options.piacol] >= 0) & (invvars > 0))
else:
cols = np.where((fr.x1 > self.options.x1range[0]) & (fr.x1 < self.options.x1range[1]) &
(fr.c > self.options.crange[0]) & (fr.c < self.options.crange[1]) &
(fr.x1ERR < self.options.x1errmax) & (fr.PKMJDERR < self.options.pkmjderrmax*(1+fr.zHD)) &
(fr.FITPROB >= self.options.fitprobmin) &
(fr.zHD > self.options.zmin) & (fr.zHD < self.options.zmax) &
(fr.__dict__[self.options.piacol] >= 0) & (invvars > 0))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
else:
sf = -2.5/(fr.x0*np.log(10.0))
invvars = 1./(fr.mBERR**2.+ self.options.salt2alpha**2. * fr.x1ERR**2. + \
self.options.salt2beta**2. * fr.cERR**2. + 2.0 * self.options.salt2alpha * (fr.COV_x1_x0*sf) - \
2.0 * self.options.salt2beta * (fr.COV_c_x0*sf) - \
2.0 * self.options.salt2alpha*self.options.salt2beta * (fr.COV_x1_c) )
cols = np.where((fr.__dict__[self.options.piacol] >= 0) & (invvars > 0) &
(fr.zHD >= self.options.zmin) & (fr.zHD <= self.options.zmax))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
if len(self.options.cutwin):
cols = np.arange(len(fr.CID))
for cutopt in self.options.cutwin:
i,min,max = cutopt[0],cutopt[1],cutopt[2]; min,max = float(min),float(max)
if i not in fr.__dict__:
if i not in self.options.histvar:
print(('Warning : key %s not in fitres file %s! Ignoring for this file...'%(i,fitresfile)))
else:
raise RuntimeError('Error : key %s not in fitres file %s!'%(i,fitresfile))
else:
cols = cols[np.where((fr.__dict__[i][cols] >= min) & (fr.__dict__[i][cols] <= max))]
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
# create the SNSPEC field - these probabilities will be fixed at 1!!
fr.SNSPEC = np.zeros(len(fr.CID))
for s in self.options.specidsurvey.split(','):
fr.SNSPEC[fr.IDSURVEY == float(s)] = 1
# set a certain number of simulated SNe to be 'confirmed' SN Ia
if self.options.nspecsne:
from random import sample
cols = sample(list(range(len(fr.CID[(fr.IDSURVEY != self.options.specidsurvey) &
(fr.SIM_TYPE_INDEX == 1) &
(np.abs(fr.SIM_ZCMB - fr.zHD) < 0.01)]))),
self.options.nspecsne)
fr.SNSPEC[np.where((fr.IDSURVEY != self.options.specidsurvey) &
(fr.SIM_TYPE_INDEX == 1) &
(np.abs(fr.SIM_ZCMB - fr.zHD) < 0.01))[0][cols]] = 1
fr.__dict__[self.options.piacol][np.where((fr.IDSURVEY != self.options.specidsurvey) &
(fr.SIM_TYPE_INDEX == 1) &
(np.abs(fr.SIM_ZCMB - fr.zHD) < 0.01))[0][cols]] = 1
# can get the Ia-only likelihood as a consistency check
if self.options.onlyIa:
cols = np.where((fr.SIM_TYPE_INDEX == 1) & (np.abs(fr.SIM_ZCMB - fr.zHD) < 0.01))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
elif self.options.onlyCC:
cols = np.where((fr.SIM_TYPE_INDEX != 1) | (np.abs(fr.SIM_ZCMB - fr.zHD) > 0.01))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
elif self.options.piacol == 'PTRUE_Ia':
# Hack - bad redshifts are called CC SNe when running 'true' probabilities
cols = np.where(np.abs(fr.SIM_ZCMB - fr.zHD) > 0.01)
fr.PTRUE_Ia[cols] = 0
# reset everything with P(Ia) > pcutval to P(Ia) = 1 and remove everything else
if self.options.pcutval:
cols = np.where(fr.__dict__[self.options.piacol] >= self.options.pcutval)
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
fr.__dict__[self.options.piacol][:] = 1
# all those low-z photometric SNe are probably CC SNe?
if self.options.zminphot:
print(('setting minimum redshift for the photometric sample to z = %.3f'%self.options.zminphot))
cols = np.where(((fr.zHD >= self.options.zminphot) & (fr.IDSURVEY == self.options.photidsurvey)) |
(fr.IDSURVEY != self.options.photidsurvey))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
# try getting rid of the bad redshifts
if self.options.nobadzsim:
cols = np.where((np.abs(fr.SIM_ZCMB - fr.zHD) < 0.01))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
# try a random subset of the fulll fitres file
if self.options.nsne and self.options.nsne < len(fr.CID):
from random import sample
cols = sample(list(range(len(fr.CID))),
self.options.nsne)
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
return(fr)
def writeBinCorrFitres(self,outfile,bms,skip=0,fr=None):
import os
from astropy.cosmology import Planck13 as cosmo
from txtobj import txtobj
fout = open(outfile,'w')
print(fitresheader, file=fout)
for i in range(self.options.nbins):
outvars = ()
for v in fitresvars:
if v == 'zHD':
outvars += (bms.zCMB[i],)
elif v == 'z':
outvars += (bms.zCMB[i],)
elif v == 'mB':
outvars += (bms.popAmean[i]-19.36,)
elif v == 'mBERR':
outvars += (bms.popAmean_err[i],)
else:
outvars += (0,)
print(fitresfmt%outvars, file=fout)
def mcsamp(self,fitresfile,mciter,lowzfile,nsne,nlowzsne):
import os
from txtobj import txtobj
import numpy as np
fitresheader = """# VERSION: PS1_PS1MD
# FITOPT: NONE
# ----------------------------------------
NVAR: 31
VARNAMES: CID IDSURVEY TYPE FIELD zHD zHDERR HOST_LOGMASS HOST_LOGMASS_ERR SNRMAX1 SNRMAX2 SNRMAX3 PKMJD PKMJDERR x1 x1ERR c cERR mB mBERR x0 x0ERR COV_x1_c COV_x1_x0 COV_c_x0 NDOF FITCHI2 FITPROB PBAYES_Ia PGAL_Ia PFITPROB_Ia PNN_Ia PTRUE_Ia PHALF_Ia SIM_TYPE_INDEX SIM_ZCMB
# VERSION_SNANA = v10_39i
# VERSION_PHOTOMETRY = PS1_PS1MD
# TABLE NAME: FITRES
#
"""
fitresvars = ["CID","IDSURVEY","TYPE","FIELD",
"zHD","zHDERR","HOST_LOGMASS",
"HOST_LOGMASS_ERR","SNRMAX1","SNRMAX2",
"SNRMAX3","PKMJD","PKMJDERR","x1","x1ERR",
"c","cERR","mB","mBERR","x0","x0ERR","COV_x1_c",
"COV_x1_x0","COV_c_x0","NDOF","FITCHI2","FITPROB",
"PBAYES_Ia","PGAL_Ia","PFITPROB_Ia","PNN_Ia",
"PTRUE_Ia","PHALF_Ia","SIM_TYPE_INDEX","SIM_ZCMB"]
fitresfmt = 'SN: %s %i %i %s %.5f %.5f %.4f %.4f %.4f %.4f %.4f %.3f %.3f %8.5e %8.5e %8.5e %8.5e %.4f %.4f %8.5e %8.5e %8.5e %8.5e %8.5e %i %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %i %.5f'
name,ext = os.path.splitext(fitresfile)
outname,outext = os.path.splitext(self.options.outfile)
fitresoutfile = '%s_%s_mc%i%s'%(name,outname.split('/')[-1],mciter,ext)
fr = txtobj(fitresfile,fitresheader=True)
if 'PTRUE_Ia' not in fr.__dict__: fr.PTRUE_Ia = np.array([-99]*len(fr.CID))
if 'SIM_TYPE_INDEX' not in fr.__dict__: fr.SIM_TYPE_INDEX = np.array([-99]*len(fr.CID))
if 'SIM_ZCMB' not in fr.__dict__: fr.SIM_ZCMB = np.array([-99]*len(fr.CID))
if lowzfile:
frlowz = txtobj(lowzfile,fitresheader=True)
if 'PTRUE_Ia' not in frlowz.__dict__: frlowz.PTRUE_Ia = np.array([-99]*len(fr.CID))
if 'SIM_TYPE_INDEX' not in frlowz.__dict__: frlowz.SIM_TYPE_INDEX = np.array([-99]*len(fr.CID))
if 'SIM_ZCMB' not in frlowz.__dict__: frlowz.SIM_ZCMB = np.array([-99]*len(fr.CID))
# Light curve cuts
sf = -2.5/(fr.x0*np.log(10.0))
invvars = 1./(fr.mBERR**2.+ self.options.salt2alpha**2. * fr.x1ERR**2. + \
self.options.salt2beta**2. * fr.cERR**2. + 2.0 * self.options.salt2alpha * (fr.COV_x1_x0*sf) - \
2.0 * self.options.salt2beta * (fr.COV_c_x0*sf) - \
2.0 * self.options.salt2alpha*self.options.salt2beta * (fr.COV_x1_c) )
if self.options.x1cellipse:
# I'm just going to assume cmax = abs(cmin) and same for x1
cols = np.where((fr.x1**2./self.options.x1range[0]**2. + fr.c**2./self.options.crange[0]**2. < 1) &
(fr.x1ERR < self.options.x1errmax) & (fr.PKMJDERR < self.options.pkmjderrmax*(1+fr.zHD)) &
(fr.FITPROB >= self.options.fitprobmin) &
(fr.zHD > self.options.zmin) & (fr.zHD < self.options.zmax) &
(fr.__dict__[self.options.piacol] >= 0) & (invvars > 0))
else:
cols = np.where((fr.x1 > self.options.x1range[0]) & (fr.x1 < self.options.x1range[1]) &
(fr.c > self.options.crange[0]) & (fr.c < self.options.crange[1]) &
(fr.x1ERR < self.options.x1errmax) & (fr.PKMJDERR < self.options.pkmjderrmax*(1+fr.zHD)) &
(fr.FITPROB >= self.options.fitprobmin) &
(fr.zHD > self.options.zmin) & (fr.zHD < self.options.zmax) &
(fr.__dict__[self.options.piacol] >= 0) & (invvars > 0))
for k in list(fr.__dict__.keys()):
fr.__dict__[k] = fr.__dict__[k][cols]
import random
if self.options.mcrandseed: random.seed(self.options.mcrandseed)
try:
cols = random.sample(list(range(len(fr.CID))),nsne)
writefitres(fr,cols,
fitresoutfile,fitresheader=fitresheader,
fitresvars=fitresvars,fitresfmt=fitresfmt)
except ValueError:
print('Warning : crashed because not enough SNe! Making only a low-z file...')
if lowzfile:
writefitres(frlowz,random.sample(list(range(len(frlowz.CID))),nlowzsne),
fitresoutfile,append=False,fitresheader=fitresheader,
fitresvars=fitresvars,fitresfmt=fitresfmt)
return(fitresoutfile)
if lowzfile:
try:
writefitres(frlowz,random.sample(list(range(len(frlowz.CID))),nlowzsne),
fitresoutfile,append=True,fitresheader=fitresheader,
fitresvars=fitresvars,fitresfmt=fitresfmt)
except:
frlowz.PHALF_Ia = np.ones(len(frlowz.CID))
writefitres(frlowz,list(range(len(frlowz.CID))),
fitresoutfile,append=True,fitresheader=fitresheader,
fitresvars=fitresvars,fitresfmt=fitresfmt)
return(fitresoutfile)
def combwithlowz(highzroot,lowzroot,outroot):
import os
# append the fitres files
fin = open('%s.fitres'%highzroot,'r')
os.system('cp %s.fitres %s.fitres'%(lowzroot,outroot))
fout = open('%s.fitres'%outroot,'a')
for line in fin:
if not line.startswith('#') and \
not line.startswith('VARNAMES:') and \
not line.startswith('NVAR:'):
print(line.replace('\n',''), file=fout)
fin.close(); fout.close()
# append the output files
fin = open('%s.out'%highzroot,'r')
os.system('cp %s.out %s.out'%(lowzroot,outroot))
fout = open('%s.out'%outroot,'a')
for line in fin:
if not line.startswith('#'):
print(line.replace('\n',''), file=fout)
fin.close(); fout.close()
# append the covmat
covhighz = np.loadtxt('%s.covmat'%highzroot,unpack=True)
covlowz = np.loadtxt('%s.covmat'%lowzroot,unpack=True)
lenlowz = np.sqrt(len(covlowz)-1)
covhighz = covhighz[1:].reshape(np.sqrt(len(covhighz)-1),
np.sqrt(len(covhighz)-1))
covlowz = covlowz[1:].reshape(np.sqrt(len(covlowz)-1),
np.sqrt(len(covlowz)-1))
fout = open('%s.covmat'%outroot,'w')
print('%i'%(len(covhighz)+len(covlowz)), file=fout)
shape = len(covhighz)+len(covlowz)
for i in range(shape):
for j in range(shape):
if j < lenlowz and i < lenlowz:
print('%8.5e'%covlowz[j,i], file=fout)
elif j < lenlowz and i >= lenlowz:
print('%8.5e'%0, file=fout)
elif j >= lenlowz and i < lenlowz:
print('%8.5e'%0, file=fout)
else:
print('%8.5e'%covhighz[j-lenlowz,i-lenlowz], file=fout)
fout.close()
def gauss(x,x0,sigma):
return(normpdf(x,x0,sigma))
def normpdf(x, mu, sigma):
u = (x-mu)/np.abs(sigma)
y = (1/(np.sqrt(2*np.pi)*np.abs(sigma)))*np.exp(-u*u/2)
return y
def gausshist(x,sigma=1,peak=1.,center=0):
y = peak*np.exp(-(x-center)**2./(2.*sigma**2.))
return(y)
def salt2mu(x1=None,x1err=None,
c=None,cerr=None,
mb=None,mberr=None,
cov_x1_c=None,cov_x1_x0=None,cov_c_x0=None,
alpha=None,beta=None,
alphaerr=None,betaerr=None,
M=None,x0=None,sigint=None,z=None,peczerr=0.00083):
from uncertainties import ufloat, correlated_values, correlated_values_norm
alphatmp,betatmp = alpha,beta
alpha,beta = ufloat(alpha,alphaerr),ufloat(beta,betaerr)
sf = -2.5/(x0*np.log(10.0))
cov_mb_c = cov_c_x0*sf
cov_mb_x1 = cov_x1_x0*sf
invvars = 1.0 / (mberr**2.+ alphatmp**2. * x1err**2. + betatmp**2. * cerr**2. + \
2.0 * alphatmp * (cov_x1_x0*sf) - 2.0 * betatmp * (cov_c_x0*sf) - \
2.0 * alphatmp*betatmp * (cov_x1_c) )
mu_out,muerr_out = np.array([]),np.array([])
for i in range(len(x1)):
covmat = np.array([[mberr[i]**2.,cov_mb_x1[i],cov_mb_c[i]],
[cov_mb_x1[i],x1err[i]**2.,cov_x1_c[i]],
[cov_mb_c[i],cov_x1_c[i],cerr[i]**2.]])
mb_single,x1_single,c_single = correlated_values([mb[i],x1[i],c[i]],covmat)
mu = mb_single + x1_single*alpha - beta*c_single + 19.36
if sigint: mu = mu + ufloat(0,sigint)
zerr = peczerr*5.0/np.log(10)*(1.0+z[i])/(z[i]*(1.0+z[i]/2.0))
mu = mu + ufloat(0,np.sqrt(zerr**2. + 0.055**2.*z[i]**2.))
mu_out,muerr_out = np.append(mu_out,mu.n),np.append(muerr_out,mu.std_dev)
return(mu_out,muerr_out)
def writefitres(fitresobj,cols,outfile,append=False,fitresheader=None,
fitresvars=None,fitresfmt=None):
import os
if not append:
fout = open(outfile,'w')
print(fitresheader, file=fout)
else:
fout = open(outfile,'a')
for c in cols:
outvars = ()
for v in fitresvars:
outvars += (fitresobj.__dict__[v][c],)
print(fitresfmt%outvars, file=fout)
fout.close()
if __name__ == "__main__":
usagestring="""BEAMS method (Kunz et al. 2006) for PS1 data.
Uses Bayesian methods to estimate the true distance moduli of SNe Ia and
a second "other" species. In this approach, I'll estimate this quantity in
rolling redshift bins at the location of each SN, using a nominal linear
fit at z > 0.1 and a cosmological fit to low-z spec data at z < 0.1.
Additional options are provided to doBEAMS.py with the parameter file.
USAGE: snbeams.py [options]
examples:
"""
import os
import optparse
sne = snbeams()
parser = sne.add_options(usage=usagestring)
options, args = parser.parse_args()
if options.paramfile:
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(options.paramfile)
else: config=None
parser = sne.add_options(usage=usagestring,config=config)
options, args = parser.parse_args()
sne.options = options
sne.verbose = options.verbose
sne.clobber = options.clobber
from scipy.optimize import minimize
import emcee
from astropy.cosmology import Planck13 as cosmo
if options.mcsubset:
outfile_orig = options.outfile[:]
for i in range(options.nmcstart,options.nmc+1):
frfile = sne.mcsamp(options.fitresfile,i,options.mclowz,options.subsetsize,options.lowzsubsetsize)
name,ext = os.path.splitext(outfile_orig)
options.outfile = '%s_mc%i%s'%(name,i,ext)
sne.main(frfile)
else:
sne.main(options.fitresfile)
|
#ASSIGNMENT13
#QUESTION:1 Name and handle the exception occured in the following program:
# a=3
# if a<4:
# a=a/(a-3)
# print(a)
#SOLUTION:
#in above code there is an indentation error in line if a<4: and print(a).after resolving the
#indentation error the code give the output which is ZeroDivisionError
a=3
if a<4:
a=a/(a-3)
print(a)
#OUTPUT:ZeroDivisionError: division by zero
# after handling
try:
a=3
if a<4:
a=a/(a-3)
print(a)
except Exception:
print("Exception occur")
#QUESTION:2 Name and handle the exception occurred in the following program:
l=[1,2,3]
print(l[3])
#SOLUTION:
#The output of the above code is IndexError: list index out of range
try:
l=[1,2,3]
print(l[3])
except Exception:
print("Exception occur")
#QUESTION:3 Program to depict Raising Exception
# try:
# raise NameError("Hi there") # Raise Error
# except NameError:
# print "An exception"
# raise # To determine whether the exception was raised or not
#SOLUTION:
# In above code there is an error in print-Missing parentheses in call to 'print'.
# after solving this error the code is:
try:
raise NameError("Hi there")
except NameError:
print("An exception")
raise
# The Exception was raised and the output is
# raise NameError("Hi there") # Raise Error
# NameError: Hi there
#QUESTION:4 What will be the output of the following code:
# Function which returns a/b
# def AbyB(a , b):
# try:
# c = ((a+b) / (a-b))
# except ZeroDivisionError:
# print "a/b result in 0"
# else:
# print c
# Driver program to test above function
# AbyB(2.0, 3.0)
# AbyB(3.0, 3.0)
#SOLUTION:
# In above code there is an error in print statements which is of missing parentheses which is
# support in python2 not in python3.After removing the error from print statements the code is:
def AbyB(a , b):
try:
c = ((a+b) / (a-b))
except ZeroDivisionError:
print("a/b result in 0")
else:
print(c)
AbyB(2.0, 3.0)
AbyB(3.0, 3.0)
# and the output is
# -5.0
# a/b result in 0
#QUESTION:5 Write a program to show and handle following exceptions:
#1. Import Error
#SOLUTION:
import club
print("hello world")
#after handling
try:
import club
print("hello world")
except Exception:
print("Exception occured")
#2. Value Error
#SOLUTION:
n=int(input("Enter a number: ")
print("number is ",n)
# after handling
try:
n=int(input("Enter a number: "))
print("number is ",n)
except Exception:
print("Exception occur for value error")
#3. Index Error
#SOLUTION:
l=[1,2,3]
print(l[3])
#The output of the above code is IndexError: list index out of range
#after handling
try:
l=[1,2,3]
print(l[3])
except Exception:
print("Exception occur")
#QUESTION:6 Create a user-defined exception AgeTooSmallError() that
# warns the user when they have entered age less than 18.
# The code must keep taking input till the user enters the appropriate age number(less than 18).
#SOLUTION:
class AgeTooSmallError(Exception):
pass
a=1
while True:
print("you have to enter the age 18 or more than 18")
try:
a=int(input("enter the age:"))
if a<18:
raise AgeTooSmallError()
print("Correct")
break
except Exception:
print("Incorrect Age")
|
import pygame, sys, time
from pygame.locals import *
pygame.init()
Display_Width = 800
Display_Height = 600
DW_Half = Display_Width / 2
DH_Half = Display_Height / 2
Display_Area = Display_Width * Display_Height
DS = pygame.display.set_mode((Display_Width, Display_Height))
x = 0
y = 0
def event_handler():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
while True:
if event.type == KEYDOWN:
if event.key == K_w:
event_handler()
pygame.display.update()
DS.fill([0, 0, 0])
|
unit=int(input("enter no.of units consumed="))
if(unit>=1 and unit<=50):
Rate=unit*3
elif(unit>=51 and unit<=100):
Rate=unit*6
elif(unit>=100 and unit<=150):
Rate=unit*9
elif(unit>=151 and unit<=200):
Rate=unit*12
else:
Rate=unit*15
print("unit=",unit)
print("Rate=",Rate)
|
import sys
import mysql.connector
import base64
try:
connection = mysql.connector.connect(host='localhost',
database='laravel',
user=base64.b64decode('cm9vdA=='),
password=base64.b64decode('YWxlZl8xMjM0NQ=='))
if connection.is_connected():
print("Connection to the MySQL database is sucessful")
connection.close()
sys.exit(0)
except Exception as e:
print("Connection to the MySQL database failed")
sys.exit(2)
|
import unirest
from core.models import SMSOutgoing
from core.models import Transaction
import functools
import urllib
import SMS_MESSAGES
import re
SMS_URL= 'http://shahz.pagekite.me/sendsms'
def resend_sms(sms):
unirest.get(SMS_URL, params = {'phone':urllib.pathname2url(sms.reciever), 'text':sms.message}, callback = functools.partial(callback_resend, sms))
def callback_resend(sms, response):
print response.body
if 'SENT!' in response.body:
sms.status = 1
sms.save()
def send_sms(to, message, ad):
# to phone number must be in URI. for some reason unirest does not decode it!
unirest.get(SMS_URL, params = {'phone':urllib.pathname2url(to), 'text':message}, callback = functools.partial(callback, to, message, ad))
def callback(to, message,ad, response):
print 'response'
print response.body
print 'to'
print to
print 'message'
print message
status = 0
if 'SENT!' in response.body:
status = 1
sms = SMSOutgoing(reciever=to, message=message,status = status, topup=None, type= 11, ad =ad)
sms.save()
def parse_sms(sender, message, smsincoming):
if sender == '+923404902633': # most import if condition to verify.
# if 1 == 1: # remove this line when upar wali condition activated.
message = message.lower()
print message
if 'easypaisa' in message: # this is actually a easypaisa msg
if 'with easypaisa account' in message and 'cnic' not in message:
# ACCOUNT TO ACCOUNT
msg_list = message.split(' ')
index = msg_list.index('received')
index += 2
money = float(msg_list[index])
index = msg_list.index('from')
index += 1
name= []
name.append(msg_list[index])
if msg_list[index+1] != 'with':
name.append(msg_list[index+1])
if msg_list[index+2] != 'with':
name.append(msg_list[index+2])
if msg_list[index+3] != 'with':
name.append(msg_list[index+3])
index_of_num = message.find('03')
number = message[index_of_num:index_of_num+12]
print 'money'
print money
print 'name'
print name
print 'number'
print number
elif 'account' in message and 'cnic' not in message:
print 'dsds'
# CNIC TO WALLET
pass
elif 'account' not in message and 'cnic' in message:
# CNIC TO CNIC
msg_list = message.split(' ')
trx_id = int(msg_list[2].replace(".",""))
print 'trx_id'
print trx_id
money = float(msg_list[msg_list.index('rs.')+1])
print 'money'
print money
cnic = msg_list[msg_list.index('cnic')+1]
print 'cnic'
print cnic
# LOOKUP CNIC TO topup.cnic
n = Transaction.objects.filter(cnic=cnic, status=0).count()
if n == 1:
trc= Transaction.objects.get(cnic=cnic, status=0)
trc.money = money
trc.trx_id = trx_id
trc.sms = smsincoming
if trc.money >= trc.topup.money_paid:
trc.status = 1
if trc.money > trc.topup.money_paid:
trc.status = 6
trc.save()
send_sms(trc.phone_number, SMS_MESSAGES.ask_secret_code_advertiser, trc.topup.ad)
send_sms(trc.topup.closed_by.phone_number, SMS_MESSAGES.ask_secret_code_agent, trc.topup.ad)
elif trc.money < trc.topup.money_paid:
Transaction(cnic= trc.cnic, status= 4, phone_number= trc.phone_number,
trx_id=trx_id,topup=trc.topup,money=money,sms=smsincoming).save()
else:
print 'mismatched payment'
Transaction(money=money, trx_id=trx_id,cnic=cnic,sms=smsincoming, status= 5).save()
# MAKE MISMATCHED PAYMENT here.
pass
else:
print 'some thing happened'
elif sender != '3737' and 2 == 2:
sender = '0'+sender[3:]
print sender
khoofia = re.search('(\[^0-9])*\d{5}(\[^0-9])*',message,re.DEBUG)
n = Transaction.objects.filter(phone_number= sender, status=1).count()
print n
if n == 1 and khoofia is not None:
trc= Transaction.objects.get(phone_number= sender, status=1)
trc.secret_code = khoofia
print 'khoofia'
print trc.secret_code
trc.status = 2
trc.save()
trc.topup.status = 5
trc.topup.save()
trc.topup.make_it_live()
else:
print 'Someone random is sending us texts LOL'
# THIS IS A RANDOM NUMBER. SENDING US
# TODO.
pass
|
n = int(input("Digite um numero inteiro: "))
divisor = 2
condicao = True
while divisor < n and condicao:
if n%divisor == 0:
condicao = False
divisor=divisor+1
if n%divisor == 0:
print("primo")
else:
print("não primo")
|
"""Module containing the sender alias API of the v1 API."""
from flask import abort
from flask.views import MethodView
from .root import API_V1
from .models import SenderAlias
from ...db import DB
from ...db.models.sender_alias import SenderAlias as SenderAlias_DB
@API_V1.route("/sender_alias/")
class SenderAliasList(MethodView):
"""Root endpoint for all sender alias resources."""
@API_V1.response(SenderAlias(many=True))
def get(self):
"""Get all sender aliases"""
return SenderAlias_DB.query.all()
@API_V1.arguments(SenderAlias, description="The alias to add")
@API_V1.response(SenderAlias, code=201)
def post(self, new_data):
"""Add a new sender alias"""
item = SenderAlias_DB(**new_data)
DB.session.add(item)
DB.session.commit()
return item
@API_V1.route("/sender_alias/create_many")
class SenderAliasCreateMany(MethodView):
"""Endpoint to create many aliases in one request."""
@API_V1.arguments(SenderAlias(many=True), description="The aliases to add")
@API_V1.response(SenderAlias(many=True), code=201)
def post(self, new_data):
"""Add new sender aliases"""
items = []
for data in new_data:
item = SenderAlias_DB(**data)
DB.session.add(item)
items.append(item)
DB.session.commit()
return items
@API_V1.route("/sender_alias/replace")
class SenderAliasReplace(MethodView):
"""Endpoint to replace all sender aliases."""
@API_V1.arguments(SenderAlias(many=True), description="The new list which should be set")
@API_V1.response(code=204)
def post(self, new_data):
"""Replace all sender aliases with the given list."""
SenderAlias_DB.query.delete()
for data in new_data:
item = SenderAlias_DB(**data)
DB.session.add(item)
DB.session.commit()
@API_V1.route("/sender_alias/<sender_alias_id>/")
class SenderAlias(MethodView):
"""Endpoint for a single sender alias resource"""
@API_V1.doc(responses={'404': {'description': 'When requested sender alias is not found'}})
@API_V1.response(SenderAlias())
def get(self, sender_alias_id):
""" Get a single sender alias """
item = SenderAlias_DB.query.filter(SenderAlias_DB.id == sender_alias_id).first()
if item is None:
abort(404, "Requested sender alias not found.")
return item
@API_V1.arguments(SenderAlias, description="The new values for the alias")
@API_V1.response(SenderAlias())
def put(self, sender_alias_id, update_data):
""" Update a single sender alias """
item = SenderAlias_DB.query.filter(SenderAlias_DB.id == sender_alias_id).first()
if item is None:
abort(404, "Requested sender alias not found.")
item.update(update_data)
DB.session.commit()
return item
@API_V1.response(code=204)
def delete(self, sender_alias_id):
""" Delete a single sender alias """
item = SenderAlias_DB.query.filter(SenderAlias_DB.id == sender_alias_id).first()
if item is None:
return
DB.session.delete(item)
DB.session.commit()
|
def Create(Database, Cursor, table, dict, log=False):
SQLStatement = ""
columns = []
values = []
for column, value in dict.items():
columns += [column]
values += [f"'{value}'"]
SQLStatement = f"INSERT INTO {table} ({','.join(columns)}) VALUES ({','.join(values)})"
if log:
print(SQLStatement)
Cursor.execute(SQLStatement)
Database.commit()
def Read(Database, Cursor, table, id='All', columns='All', log=False):
SQLStatement = ""
if columns == 'All':
if id == 'All':
SQLStatement = f"SELECT * FROM {table}"
elif type(id) == int:
SQLStatement = f"SELECT * FROM {table} WHERE id={id}"
else:
raise ValueError()
else:
if id == 'All':
SQLStatement = f"SELECT {','.join(columns)} FROM {table}"
elif type(id) == int:
SQLStatement = f"SELECT {','.join(columns)} FROM {table} WHERE id={id}"
else:
raise ValueError()
if log:
print(SQLStatement)
Cursor.execute(SQLStatement)
Resp = Cursor.fetchall()
return Resp
def Update(Database, Cursor, table, id, dict, log=False):
SQLStatement = ""
items = []
for column, value in dict.items():
items += [f"{column} = '{value}'"]
if type(id) == int:
SQLStatement = f"UPDATE {table} SET {', '.join(items)} WHERE id={id}"
else:
raise ValueError()
if log:
print(SQLStatement)
Cursor.execute(SQLStatement)
Database.commit()
def Delete(Database, Cursor, table, id='All', log=False):
SQLStatement = ""
if id == 'All':
SQLStatement = f"DELETE FROM {table}"
elif type(id) == list:
ids = []
for i in id:
ids += [i]
SQLStatement = f"DELETE FROM {table} WHERE id IN ({', '.join([str(i) for i in id])})"
elif type(id) == int:
SQLStatement = f"DELETE FROM {table} WHERE id={id}"
else:
raise ValueError()
if log:
print(SQLStatement)
Cursor.execute(SQLStatement)
Database.commit()
ResetId(Database=Database, Cursor=Cursor, table=table)
def ResetId(Database, Cursor, table):
ResetIdQuery = [
"set @autoid :=0",
f"update {table} set id = @autoid := (@autoid+1)",
f"alter table {table} Auto_Increment = 1"
]
for ResetLine in ResetIdQuery:
Cursor.execute(ResetLine)
Database.commit()
|
# *_* coding=utf8 *_*
"""
描述:Unreal Proxy 安装脚本。
作者:Tang Wanwan
"""
import setuptools
setuptools.setup(
requirements = ["eventlet, tornado"],
name="unreal",
version="2013.8",
author="Tang",
description="Tang Wanwan's Unreal proxy for bad things.",
packages=setuptools.find_packages(exclude=['tests', 'bin']),
scripts=['bin/unreal-server'],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
PROBLEMA:
Projete (design) uma animação de um semáforo.
Seu programa deve mostrar um semáforo que é vermelho, depois verde,
depois amarelo, depois vermelho, etc. Para este programa, a definição
de dados do estado mutável do mundo deve ser uma enumeração.
Para fazer as luzes mudarem a uma velocidade razoável, você pode usar
a opção 'frequencia' do big_bang. Por exemplo, 'big_bang(inic, frequencia=1, ...)' fará o
big-bang esperar 1 segundo antes de chamar a função no 'quando_tick'.
Lembre-se de seguir a receita de Como Projetar Mundos e as demais!
Não esqueça também de fazer a análise de domínio antes de começar a
trabalhar no código.
Dica1: a parte da definição de dados já foi feita em aula! Apenas revise-a e utilize no projeto.
Dica2: o ideal é não utilizar a imagem como o valor do tipo de dado semáforo, mas apenas como a
representação gráfica (como no caso do gato, que é apenas um número).
Imagens de exemplo na pasta (vc pode colocar outras imagens se preferir)
'''
from htdp_pt_br.universe import *
LARGURA = 215
ALTURA = 470
FREQUENCIA = 1
tela = criar_tela_base(LARGURA,ALTURA)
'''
CorSemaforo é uma dessas:
- "verde"
- "amarelo"
- "vermelho"
interp. a cor do semaforo
'''
# estado inicial
cor = "vermelho"
VERMELHO = carregar_imagem('semaforo1.png')
AMARELO = carregar_imagem('semaforo2.png')
VERDE = carregar_imagem('semaforo3.png')
ERRO_COR = "Erro: cor invalida"
# cor -> cor
def proxima_cor(cor):
'''
:param cor: CorSemaforo
:return: CorSemaforo(proximo estado)
'''
if cor == "verde":
return "amarelo"
elif cor == "amarelo":
return "vermelho"
elif cor == "vermelho":
return "verde"
else:
return ERRO_COR
def parar(tecla):
'''
:param tecla: tecla
:return: bool
'''
if tecla == pg.K_ESCAPE:
return True
#else
return False
def desenha(cor):
'''
:param cor: Cor
:return: Imagem
'''
if cor == "verde":
colocar_imagem(VERDE, tela, LARGURA / 2, ALTURA // 2)
return tela
elif cor == "amarelo":
colocar_imagem(AMARELO, tela, LARGURA / 2, ALTURA // 2)
return tela
elif cor == "vermelho":
colocar_imagem(VERMELHO, tela, LARGURA / 2, ALTURA // 2)
return tela
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
TIME_JUMP_IN_SECONDS = int(sys.argv[3])
with open(input_file) as src, open(output_file, 'w') as dest:
for line in src:
line = line.strip()
if "-->" not in line:
print(line, file=dest)
continue
ts_from, ts_to = line.split(' --> ')
ts_from = datetime.strptime(ts_from, '%H:%M:%S.%f')
ts_to = datetime.strptime(ts_to, '%H:%M:%S.%f')
ts_from_new = (ts_from - timedelta(seconds=TIME_JUMP_IN_SECONDS)).strftime('%H:%M:%S.%f')[:-3]
ts_to_new = (ts_to - timedelta(seconds=TIME_JUMP_IN_SECONDS)).strftime('%H:%M:%S.%f')[:-3]
new_line = f'{ts_from_new} --> {ts_to_new}'
print(new_line, file=dest)
|
import unittest
from value_objects import EntityMixin
from value_objects.util.testing import assert_unequal_objects_but_equal_strings, assert_unequal_objects_and_strings
# ============================================================================
# Person
# ============================================================================
class Person( EntityMixin ):
def __init__( self, name, age ):
pass
# ============================================================================
# test
# ============================================================================
class EntityMixinTestCase( unittest.TestCase ):
def test_entity_mixin_example( self ):
bob1 = Person( name = 'bob', age = 40 )
bob2 = Person( name = 'bob', age = 40 )
# double check that we have different values but they print the same
assert_unequal_objects_but_equal_strings( bob1, bob2 )
# mutate
bob2.age = 41
# now they should't print the same
assert_unequal_objects_and_strings( bob1, bob2 )
|
x= "150905"
result = [i for i in filelist if x in i] |
# _*_ coding:UTF-8_*_
from bs4 import BeautifulSoup
from multiprocessing import Process, Queue, Event
import requests, random, codecs, time, re, os
DIR_PATH = 'G:\\crawl\\zhihu\\content'
PAGE_QUEUE = Queue(25)
IMG_QUEUE = Queue(80)
class DerivedProcess(Process):
'''
Derived from multiprocessing subclasses
for the production of new processes
'''
#args is a tuple, not a non-keyword variable-length argument
def __init__(self, func, args, name = ''):
Process.__init__(self)
self.func = func
self.args = args
self.name = name
def get_result(self):
return slef.result
def run(self):
self.result = apply(self.func, self.args)
class GoDownloadPage(object):
'''
Used to download pages, and parse page information
'''
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'
headers = {
'User-Agent' : user_agent,
'Referer' : 'https://www.zhihu.com/collection/19923662'
}
def __init__(self):
self.base_url = 'https://www.zhihu.com/collection/19923662'
self.end_sentinel = 'queue_end'
self.change_author = 'next_author'
self.stroage_engine = storageEngine()
self.anonymous_mark = {}
self.author_path_q = Queue()
#Create three processes, download the page, parse the page, store the information
def startCrawl(self):
#Create the event object that will be used to signal startup
self.download_url_evt = self.parse_details_evt = self.parse_imgs_evt = Event()
download_url = DerivedProcess(self.downloadUrl, (self.base_url, PAGE_QUEUE))
parse_details = DerivedProcess(self.parsePages, (PAGE_QUEUE, IMG_QUEUE))
parse_imgs = DerivedProcess(self.parseImgs, (IMG_QUEUE, ))
download_url.daemon = parse_details.daemon = parse_imgs.daemon = True
download_url.start()
parse_details.start()
parse_imgs.start()
#Wait for the process to start
self.download_url_evt.wait()
print 'downloadUrl is running'
self.parse_details_evt.wait()
print 'parseDetails is running'
self.parse_imgs_evt.wait()
print 'ParseImgs is running'
download_url.join()
parse_details.join()
parse_imgs.join()
def downloadUrl(self, url, page_out_q, headers = headers):
print 'downloadUrl starting'
self.download_url_evt.set()
while url:
content = requests.get(url, headers = headers).content
current_dom_soup = BeautifulSoup(content)
page_out_q.put(current_dom_soup)
page_list = current_dom_soup.find('div', attrs = {'class' : 'zm-invite-pager'})
#Find next page url
next_page = page_list.find_all('span')[-1].find('a')
if next_page:
#Next page url
url = self.base_url + '/' + next_page.get('href')
else:
url = None
#Put the sentinel on the queue to indicate completion
page_out_q.put(self.end_sentinel)
def parsePages(self, page_in_q, img_out_q):
print 'parseDetails starting'
self.parse_details_evt.set()
while True:
#Get current document soup
soup = page_in_q.get()
#Check for termination
if soup == self.end_sentinel:
page_in_q.put(self.end_sentinel)
break
question_list = soup.find_all('div', attrs = {'class' : 'zm-item'})
for question_info in question_list:
question_title = question_info.find('a', attrs = {'target' : '_blank'}).getText()
#Replace the special characters in the title to create the folder
question_title = re.sub(u'[<\ /:*:"?|?.>]', '', question_title)
#Create a question catalog
question_path = self.stroage_engine.mkDir(question_title)
self.parseAnswers(question_info, question_title, img_out_q)
#Change author path
img_out_q.put(self.change_author)
# Put the sentinel on the queue to indicate completion
self.author_path_q.put(self.end_sentinel)
img_out_q.put(self.end_sentinel)
def parseAnswers(self, tag, dir_path, img_out_q):
answer_content = {}
try:
author = tag.find('a', attrs = {'class' : 'author-link'}).getText()
answer_file_path = dir_path + '\\' + author
except AttributeError, e:
author = u'匿名用户'
bg_signature = ''
#update anonymous mark
question_id = tag.find('a', attrs = {'target' : '_blank'}).get('href').split('/')[-1]
if self.anonymous_mark.get(question_id, None):
self.anonymous_mark[question_id] += 1
else:
self.anonymous_mark[question_id] = 1
answer_file_path = dir_path + '\\' + author + '-' + str(self.anonymous_mark[question_id])
else:
try:
bg_signature = tag.find('span', attrs = {'class' : 'bio'}).getText().strip()
except AttributeError, e:
bg_signature = ''
finally:
answer_vote = tag.find('a', attrs = {'class' : 'zm-item-vote-count'}).getText()
answer_text = tag.find('textarea', attrs = {'class' : 'content'}).getText()
answer_content['vote'] = u'{vote_count}赞同'.format(vote_count = answer_vote.strip())
answer_content['author'] = author.strip()
answer_content['signature'] = bg_signature.strip()
#Create a answer catalog
answer_file_path = self.stroage_engine.mkDir(answer_file_path)
self.author_path_q.put(answer_file_path)
#Separate the image from the text
pattern = re.compile(r'(?<=data-original=").*?\.[jp][pn]g')
img_urls = re.findall(pattern, answer_text)
img_out_q.put(img_urls)
answer_text = re.sub(u'</p>', '\n', answer_text)
answer_text = re.sub(u'<.*?>', '', answer_text)
answer_content['text'] = answer_text.strip()
self.stroage_engine.saveAnswers(answer_content, answer_file_path, author)
def parseImgs(self, img_in_q):
print 'parseImgs starting'
self.parse_imgs_evt.set()
img_dir_path = self.author_path_q.get()
while True:
#Get image url
img_urls = img_in_q.get()
#Check for author path
if img_urls == self.change_author:
img_dir_path = self.author_path_q.get()
if img_dir_path == self.end_sentinel:
break
else:
continue
#Check for termination
if img_urls == self.end_sentinel:
img_in_q.put(self.end_sentinel)
break
img_count = 1
for img_url in img_urls:
extension = self.stroage_engine.getExtension(img_url)
img_path_name = img_dir_path + '\\' + str(img_count) + '.' + extension
img_info = requests.get(img_url, verify = False).content
try:
self.stroage_engine.saveImg(img_info, img_path_name)
except ConnectionError, e:
print img_url
print e
img_count += 1
continue
finally:
print img_count
img_count += 1
print img_dir_path,
print 'Done.'
class storageEngine:
"""
Used to save the analysis to the page information
including questions, authors, pictures and so on
"""
def __init__(self):
self.path = DIR_PATH
if not self.path.endswith('\\'):
self.path = self.path + '\\'
elif not self.path.exists(self.path):
os.makedirs(self.path)
def mkDir(self, path):
path = path.strip()
dir_path = self.path + path
exists = os.path.exists(dir_path)
if not exists:
os.makedirs(dir_path)
return dir_path
else:
return dir_path
def saveAnswers(self, content_dict, dir_path, name):
file_name = dir_path + '\\' + name + '.txt'
with codecs.open(file_name, 'w+', encoding = 'utf-8') as answer_file:
answer_file.write(u'{author}, {signature}\n{vote}\n{answer}\n'.format(
author = content_dict['author'],
signature = content_dict['signature'],
vote = content_dict['vote'],
answer = content_dict['text']))
def saveImg(self, content, img_path_name):
with open(img_path_name, 'wb') as img_f:
img_f.write(content)
def getExtension(self, url):
extension = url.split('.')[-1]
return extension
|
import random
N = 5000
B = 926
C = 937
chance_0 = 1
chance_1 = 1
chance_2 = 2
choices = []
for i in range(chance_0): choices.append(0)
for i in range(chance_1): choices.append(1)
for i in range(chance_2): choices.append(2)
def gen_num(i):
return random.choice(choices)
with open("../subtasks/02_bonus/05.in", 'w') as fout:
fout.write("{} {} {}\n".format(N, B, C))
for i in range(N):
if i != 0:
fout.write(' ')
fout.write(str(gen_num(i)))
fout.write("\n")
|
import tarefas
import escalonador
from random import randint
# Definindo a lista para enviarmos ao sistema.
listaTeste = []
# Declarando as quatro tarefas que precisaremos escalonar.
tarefa1 = tarefas.Tarefa()
tarefa2 = tarefas.Tarefa()
tarefa3 = tarefas.Tarefa()
tarefa4 = tarefas.Tarefa()
# Estamos atribuindo valores a elas, fazendo propositalmente com que existem choques entre elas.
tarefa1.nome = "Divisão"
tarefa1.inicio = 1
tarefa1.fim = 3
tarefa2.nome = "Multiplicação"
tarefa2.inicio = 3
tarefa2.fim = 5
tarefa3.nome = "Raíz quadrada"
tarefa3.inicio = 6
tarefa3.fim = 9
tarefa4.nome = "Outra conta"
tarefa4.inicio = 2
tarefa4.fim = 6
# Estamos adicionando todas elas a uma lista.
listaTeste.append(tarefa1)
listaTeste.append(tarefa4)
listaTeste.append(tarefa2)
listaTeste.append(tarefa3)
# Chamamos a função escalonar para que todo o processo seja de fato executado.
escalonador.escalonar(listaTeste) |
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.core.text import LabelBase
#font file needs to be in the folder
LabelBase.register(name="Dodger", fn_regular= "dodger3condital.ttf")
LabelBase.register(name="Roboto", fn_regular= "RobotoSlab-Regular.ttf")
import serial
import threading
import serial.tools.list_ports
import pyglet
import multiprocessing
from apscheduler.schedulers.background import BackgroundScheduler
import os
SERIAL_PORT = ""
def port_listB():
try:
comlist = serial.tools.list_ports.comports()
connected = []
for element in comlist:
connected.append(element.device)
print("Connected COM ports: " + str(connected))
label2.text = "Connected COM ports: " + str(connected)
except:
label2.text = 'serial problem'
print('serial problem')
button = [" X "," A "," B "," Y "," Lb "," Rb "," Lt "," Rt "," Back "," Start "," Hat_1 "," Hat_2 "]
def joystick():
try:
while True:
joysticks = pyglet.input.get_joysticks()
joystick = joysticks[0]
joystick.open()
#print(joystick.event_types)
#joystick.register_event_type('on_joyaxis_motion',)
#for k, v in vars(joystick).items():
#print (k, v)
if not int(joystick.x) == 0:
label10.text = 'x'+str(joystick.x)
if not int(joystick.y) == 0:
label10.text = 'y'+str(joystick.y)
if not int(joystick.z) == 0:
label10.text = 'z'+str(joystick.z)
if not int(joystick.hat_x) == 0:
label10.text = 'hx'+str(joystick.hat_x)
if not int(joystick.hat_y) == 0:
label10.text = 'hy'+str(joystick.hat_y)
for i in range(len(joystick.buttons)):
if not (joystick.buttons[i]) == False:
label10.text = str(button[i])
#Don't like this but it's the only way I got it to behave
#For the mac the joystick needs to be in D
#x and y on the hats are discrete
#green light on the controller needs to be on and solid
#if the joystick connection fails the program has to be restarted
label5.text = "Direction Pad x "+str(joystick.x)+" y "+str(joystick.y)+"\n"+"Hats x "+str(joystick.hat_x)+" y "+str(joystick.hat_y)+" z "+str(joystick.z)+"\n"+"Buttons " + str(button[0]) + str(joystick.buttons[0]) + str(button[1]) + str(joystick.buttons[1])+ str(button[2]) + str(joystick.buttons[2])+ str(button[3]) + str(joystick.buttons[3])+ str(button[4]) + str(joystick.buttons[4])+ str(button[5]) + str(joystick.buttons[5])+"\n"+ str(button[6]) + str(joystick.buttons[6])+ str(button[7]) + str(joystick.buttons[7])+ str(button[8]) + str(joystick.buttons[8])+ str(button[9]) + str(joystick.buttons[9])+ str(button[10]) + str(joystick.buttons[10])+ str(button[11]) + str(joystick.buttons[11])
#writing to the arduino will have to be handled with the apscheduler
except:
print('Joystick Failed')
label5.text = 'Joystick Failed'
def port_contB():
if SERIAL_PORT != "":
try:
global input_queue
global output_queue
global sp
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
sp = SerialProcess(output_queue, input_queue)
sp.daemon = True
sp.start()
label3.text = 'Connected'
print('connected')
except:
label3.text = 'Serial Problem'
print('serial problem')
print(SERIAL_PORT)
else:
label3.text = 'Input Serialport Address'
print('Input Serialport Address')
def arduino(self):
global sched
sched = BackgroundScheduler(dameon = True)
sched.add_job(self.checkQueue,'interval', seconds = 1)
try:
sched.start()
label6.text = "Arduino Streaming"
except:
label6.text = "Streaming Failed"
print("Streaming Failed")
class SerialTestWindow(FloatLayout):
def term(self):
sp.terminate()
os._exit(-1)
def send_command(self, value):
#writing to the arduino will have to be handled with the apscheduler
pass
#self.cmd_result += 'Output: {}\n'.format(self.cmd_send.text)
#output_queue.put(self.cmd_send.text.encode('ascii'))
#output_queue.put(label7.text)
#self.cmd_send.text = ''
#cmd_send = ""
#self.cmd_send.focus = True
def checkQueue(self):
if not input_queue.empty():
global label9
label9 = self.ids['l9']
label9.text = str(input_queue.get())
output_queue.put(label10.text)
#this is handled by the apscheduler
def writeQueue(x):
pass
#global label10
#label10.text = str(output_queue.put())
#label10.text = str(joystick.get_controls())
#this is handled by the apscheduler
def port_list(self):
global label2
label2 = self.ids['l2']
t1 = threading.Thread(target=port_listB)
t1.daemon = True
t1.start()
def port_cont(self):
global label3
label3 = self.ids['l3']
t2 = threading.Thread(target=port_contB)
t2.daemon = True
t2.start()
def got_port(self):
global SERIAL_PORT
SERIAL_PORT = self.ids['l4'].text
def got_joystick(self):
global label5
global label10
label5 = self.ids['l5']
label10 = self.ids['l10']
t3 = threading.Thread(target=joystick)
t3.daemon = True
t3.start()
def got_data(self):
global label6
label6 = self.ids['l6']
t4 = threading.Thread(target=arduino(self))
t4.daemon = True
t4.start()
class SerialProcess(multiprocessing.Process):
def __init__ (self, input_queue, output_queue):
try:
multiprocessing.Process.__init__(self)
self.input_queue = input_queue
self.output_queue = output_queue
self.sp = serial.Serial(SERIAL_PORT, 115200, timeout=1)
except:
label3.text = 'serial problem'
print('serial problem')
print(SERIAL_PORT)
def close(self):
self.sp.close()
def writeSerial(self, data1):
try:
return self.sp.write(data1)
except:
pass
def readSerial(self):
try:
return self.sp.readline()
except:
pass
def run(self):
self.sp.flushInput()
while True:
if not self.input_queue.empty():
try:
data1 = self.input_queue.get()
self.writeSerial(data1)
except:
pass
if self.sp.inWaiting() > 0:
try:
data2 = self.readSerial()
self.output_queue.put(data2)
except:
pass
class Serial_test_6_7_19App(App):
def build(self):
return SerialTestWindow()
if __name__=='__main__':
Serial_test_6_7_19App().run()
|
import boto3
def create_sns(sns_name):
"""
A function to create sns
"""
conn = boto3.client('sns', region_name='ap-south-1')
# create topic
response = conn.create_topic(
Name=sns_name)
# get arn
topic_arn = response['TopicArn']
# subscribe to topic
response = conn.subscribe(
TopicArn=topic_arn,
Protocol='HTTP',
Endpoint='specify',
)
create_sns('sns1')
|
# Import APM package
from apm import *
# define server and application
s = 'http://byu.apmonitor.com'
a = 'drill'
# Clear prior application
apm(s,a,'clear all')
# Load model file
apm_load(s,a,'drilling.apm')
# Global settings
apm_option(s,a,'apm.solver',1)
apm_option(s,a,'apm.max_iter',200)
# Adjustable parameters
apm_info(s,a,'FV','Ro_a_1')
apm_info(s,a,'FV','f_a_2')
apm_info(s,a,'FV','Ro_a_2')
apm_info(s,a,'FV','f_a_3')
apm_info(s,a,'FV','Ro_a_3')
apm_info(s,a,'FV','f_a_4')
apm_info(s,a,'FV','Ro_a_4')
apm_info(s,a,'FV','f_a_5')
apm_info(s,a,'FV','Ro_a_5')
apm_info(s,a,'FV','f_a_6')
apm_info(s,a,'FV','Ro_a_6')
apm_info(s,a,'FV','f_a_7')
apm_info(s,a,'FV','Ro_a_7')
apm_info(s,a,'FV','f_a_8')
apm_info(s,a,'FV','Ro_a_8')
apm_info(s,a,'FV','f_a_9')
apm_info(s,a,'FV','Ro_a_9')
apm_info(s,a,'FV','f_a_10')
apm_info(s,a,'FV','Ro_a_10')
apm_info(s,a,'FV','n')
# Define MVs
apm_info(s,a,'MV','z_choke')
apm_info(s,a,'MV','q_p')
apm_info(s,a,'MV','wob_sp')
apm_info(s,a,'MV','rpm')
# Define CVs
apm_info(s,a,'CV','p_c')
apm_info(s,a,'CV','p_a_1')
apm_info(s,a,'CV','rop')
# Start with a steady state solution
apm_option(s,a,'apm.imode',1)
output = apm(s,a,'solve')
print('Steady State Solution --------------')
print(output)
# Load data file
csv_load(s,a,'drilling.csv')
apm_option(s,a,'apm.csv_read',1)
print('Dynamic Optimization Initialization --------------')
# Solve dynamic optimization problem
apm_option(s,a,'apm.imode',7)
apm_option(s,a,'apm.nodes',2)
apm_option(s,a,'apm.coldstart',0)
#apm_option(s,a,'apm.time_shift',0)
output = apm(s,a,'solve')
apm_get(s,a,'results.csv')
print(output)
# Manipulated variable tuning
# MV: q_p
apm_option(s,a,'q_p.status',1)
apm_option(s,a,'q_p.fstatus',0)
apm_option(s,a,'q_p.dmax',0.1) # rate of change limits
apm_option(s,a,'q_p.dcost',50000) # adding cost for change
apm_option(s,a,'q_p.lower',0.01) # lower limit
apm_option(s,a,'q_p.upper',1.5) # upper limit
#apm_option(s,a,'q_p.cost',1000)
# MV: z_choke
apm_option(s,a,'Z_choke.status',1)
apm_option(s,a,'Z_choke.fstatus',0)
apm_option(s,a,'Z_choke.dmax',0.1) # rate of change limits
#apm_option(s,a,'Z_choke.dcost',0) # adding cost for change
apm_option(s,a,'Z_choke.dcost',10000) # adding cost for change
apm_option(s,a,'Z_choke.lower',0) # lower limit
apm_option(s,a,'Z_choke.upper',1) # upper limit
#apm_option(s,a,'Z_choke.cost',0)
# MV: rpm
apm_option(s,a,'RPM.status',1)
apm_option(s,a,'RPM.fstatus',0)
apm_option(s,a,'RPM.dmax',10) # rate of change limits
#apm_option(s,a,'Z_choke.dcost',0) # adding cost for change
apm_option(s,a,'RPM.dcost',0.6) # adding cost for change
apm_option(s,a,'RPM.lower',50) # lower limit
apm_option(s,a,'RPM.upper',500) # upper limit
#apm_option(s,a,'RPM.cost',0.02)
# Define target values for CVs
apm_option(s,a,'p_c.sphi',35.0)
apm_option(s,a,'p_c.splo',30.0)
apm_option(s,a,'p_c.wsphi',10.0)
apm_option(s,a,'p_c.wsplo',10.0)
apm_option(s,a,'p_c.status',0)
apm_option(s,a,'p_a_1.sphi',315.0)
apm_option(s,a,'p_a_1.splo',300.0)
apm_option(s,a,'p_a_1.wsphi',10.0)
apm_option(s,a,'p_a_1.wsplo',10.0)
apm_option(s,a,'p_a_1.status',1)
apm_option(s,a,'rop.sphi',100.0 * 0.304 / 3600.0)
apm_option(s,a,'rop.splo',95.0 * 0.304 / 3600.0)
apm_option(s,a,'rop.wsphi',10000.0)
apm_option(s,a,'rop.wsplo',10000.0)
apm_option(s,a,'rop.status',1)
#apm_option(s,a,'rop.cost',-10.0)
print('Dynamic Optimization --------------')
# Load in new CSV file
apm(s,a,'clear csv');
csv_load(s,a,'results.csv');
#csv_load(s,a,'q_x.csv');
apm_option(s,a,'apm.reqctrlmode',3);
apm_option(s,a,'apm.timeshift',0);
apm_option(s,a,'apm.imode',6);
apm_option(s,a,'apm.coldstart',0)
output = apm(s,a,'solve')
print(output)
y = apm_sol(s,a)
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(y['time'],y['p_c'],'r-')
plt.plot(y['time'],y['p_a_1'],'b--')
plt.legend(('Choke Pressure','Bit Pressure'))
plt.xlabel('Time (sec)')
plt.ylabel('Pressure (bar)')
plt.savefig('plt_Pressure.png')
plt.show()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Mesh Structural Comparisons
=============================
::
In [4]: run mesh.py
INFO:__main__:base /usr/local/env/geant4/geometry/export/DayaBay_VGDX_20140414-1300/g4_00.96ff965744a2f6b78c24e33c80d3a4cd.dae/GMergedMesh/1
[[ 720 362 3199 3155]
[ 672 338 3200 3199]
[ 960 482 3201 3200]
[ 480 242 3202 3200]
[ 96 50 3203 3200]]
INFO:__main__:base /tmp/GMergedMesh/baseGeometry
[[ 720 362 3199 3155]
[ 672 338 3200 3199]
[ 960 482 3201 3200]
[ 480 242 3202 3200]
[ 96 50 3203 3200]]
WARNING:__main__:NO PATH /tmp/GMergedMesh/modifyGeometry/iidentity.npy
INFO:__main__:base /tmp/GMergedMesh/modifyGeometry
[[ 720 362 3199 3155]
[ 672 338 3200 3199]
[ 960 482 3201 3200]
[ 480 242 3202 3200]
[ 96 50 3203 3200]
[ 12 24 0 4294967295]]
WARNING:__main__:NO PATH /usr/local/env/geant4/geometry/export/dpib/cfg4.f7ba6061a8e024189e641c86eb847ee4.dae/GMergedMesh/0/aiidentity.npy
WARNING:__main__:NO PATH /usr/local/env/geant4/geometry/export/dpib/cfg4.f7ba6061a8e024189e641c86eb847ee4.dae/GMergedMesh/0/iidentity.npy
WARNING:__main__:NO PATH /usr/local/env/geant4/geometry/export/dpib/cfg4.f7ba6061a8e024189e641c86eb847ee4.dae/GMergedMesh/0/itransforms.npy
INFO:__main__:base /usr/local/env/geant4/geometry/export/dpib/cfg4.f7ba6061a8e024189e641c86eb847ee4.dae/GMergedMesh/0
[[ 0 0 0 4294967295]
[ 720 362 1 0]
[ 720 362 2 1]
[ 960 482 3 2]
[ 576 288 4 2]
[ 0 0 5 2]]
"""
import os, logging
import numpy as np
from opticks.ana.base import opticks_environment
from opticks.ana.mergedmesh import MergedMesh
import matplotlib.pyplot as plt
log = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
opticks_environment()
DPIB_ALL = os.path.expandvars("$IDPATH_DPIB_ALL");
DPIB_PMT = os.path.expandvars("$IDPATH_DPIB_PMT");
bases = ["$IDPATH/GMergedMesh/1",
"/tmp/GMergedMesh/baseGeometry",
"/tmp/GMergedMesh/modifyGeometry",
os.path.join(DPIB_ALL,"GMergedMesh/0"),
os.path.join(DPIB_PMT,"GMergedMesh/0"),
]
for base in bases:
base = os.path.expandvars(base)
if not os.path.exists(base):continue
mm = MergedMesh(base=base)
if base.find(DPIB_ALL)>-1 or base.find(DPIB_PMT)>0:
mm.node_offset = 1
else:
mm.node_offset = 0
pass
log.info("base %s " % base)
print "nodeinfo\n", mm.nodeinfo.view(np.int32)
nv = mm.nodeinfo[:,1].sum()
print "nvert %5d v.shape %s " % (nv, repr(mm.vertices.shape))
#print "ce\n", mm.center_extent
print "itransforms\n",mm.itransforms
print "iidentity\n",mm.iidentity
print "aiidentity\n", mm.aiidentity
|
from flask import Flask
import os
from blueprints.movies import movies
from model.movies import Movies
app = Flask(__name__)
app.movies = Movies()
@app.route('/')
def hello_world():
return 'Hello continuous delivery'
app.register_blueprint(movies, url_prefix='/movies')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=os.getenv('PORT', None))
|
from django import forms
from django.db.models import fields
from django.forms import DateInput
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Empleado, Equipo, Ticket
#La creacion del formulario de Empleado
class EmpleadoForm(forms.ModelForm):
class Meta:
model = Empleado
fields = ('__all__')
widgets = {
'email':forms.EmailInput(),
}
#La creacion del formulario de Equipo
class EquipoForm(forms.ModelForm):
class Meta:
model = Equipo
fields = ['numero_de_serie',
'modelo',
'marca',
'tipo_equipo',
'fecha_adquisicion',
'fecha_puesta_en_marcha',
'proveedor_nombre',
'proveedor_tlf',
'planta']
widgets = {
'fecha_adquisicion': forms.NumberInput(attrs={'type':'date'}),
'fecha_puesta_en_marcha': forms.NumberInput(attrs={'type':'date'}),
}
#La creacion del formulario de Ticket
class TicketForm(forms.ModelForm):
URGENCIA = (
("Alta","Alta"),
("Media","Media"),
("Baja","Baja"),
)
TIPO = (
("Tipo1","Tipo1"),
("Tipo2","Tipo2"),
("Tipo3","Tipo3"),
)
ESTADO = (
("Estado1","Estado1"),
("Estado2","Estado2"),
("Estado3","Estado3"),
)
nivel_urgencia = forms.ChoiceField(choices = URGENCIA)
tipo_ticket = forms.ChoiceField(choices = TIPO)
estado_ticket = forms.ChoiceField(choices = ESTADO)
class Meta:
model = Ticket
fields = ('__all__')
widgets = {
'fecha_apertura': forms.NumberInput(attrs={'type':'date'}),
'fecha_resolucion': forms.NumberInput(attrs={'type':'date'}),
}
class UserRegisterForm(UserCreationForm):
username = forms.CharField(max_length=100)
password1 = forms.CharField(widget=forms.PasswordInput, label='Contraseña')
password2 = forms.CharField(widget=forms.PasswordInput, label='Confirmar Contraseña')
class Meta:
model= User
fields = ['username', 'email', 'password1','password2']
help_texts = {k:"" for k in fields }
class LoginForm(forms.Form):
# Usuario
usuario = forms.CharField(max_length=100)
# Contraseña
attrs = {
"type": "password" # Atributo para mostrarlo como contraseña
}
contrasena = forms.CharField(widget=forms.TextInput(attrs=attrs)) |
import numpy as np
import scipy as sp
import matplotlib.pyplot as py
import random as rd
import math as m
global ylim
global xlim
global nb_cust
global kNN
global clim
global Capacity
global Error
ylim = 200
xlim = 200
clim = 30
nb_cust = 10
kNN = 5
Capacity = 175
Error = (0, (0, 0), ([[0], 0], [[0], 0]))
# Creation of a test instance
inst_test1 = [(0, 0), (38, 127), (78, -181), (139, -128), (-50, -148), (-60, 83), (-30, -14), (7, -65),
(-11, 32), (-113, -161), (-166, -11), (-110, -
134), (-75, 95), (9, 45), (-137, 58),
(69, 175), (-121, 154), (70, 20), (-68, -73), (-165, 180), (59, 82), (-89, 60)]
r1_test1 = [[0, 3, 6, 9, 12, 15, 18, 21], 128]
r2_test1 = [[0, 1, 4, 7, 10, 13, 16, 19], 147]
r3_test1 = [[0, 2, 5, 8, 11, 14, 17, 20], 129]
demand1 = [0, 25, 19, 26, 19, 22, 10, 7, 8, 14,
19, 10, 11, 24, 24, 27, 30, 29, 18, 23, 17, 22]
edge1 = (8, 11)
# Creation of a random instance
def create_instance(n):
inst = [(0, 0)]
demand = []
route1 = [0]
route2 = [0]
route3 = [0]
for i in range(n):
x = rd.randint(-xlim, xlim)
y = rd.randint(-ylim, ylim)
c = rd.randint(0, clim)
inst.append((x, y))
demand.append(c)
if i % 3 == 0:
route1.append(i)
elif i % 3 == 1:
route2.append(i)
else:
route3.append(i)
return inst, route1, route2, route3, demand
# Print the routes
def print_instance(inst):
dep = inst[0]
cust = inst[1:]
py.plot(dep[0], dep[1], color='blue', marker='o')
for i in cust:
py.plot(i[0], i[1], color='red', marker='o')
def print_route(route, inst, c):
x = []
y = []
for i in range(len(route)):
x.append(inst[route[i]][0])
y.append(inst[route[i]][1])
x.append(inst[route[0]][0])
y.append(inst[route[0]][1])
py.plot(x, y, label="route " + str(c))
def print_routes(routes, inst):
c = 1
for i in routes:
print_route(i[0], inst, c)
c += 1
def print_current_sol(routes, inst):
print_instance(inst)
print_routes(routes, inst)
# Compute the cost of a solution
def distance(p1, p2):
return m.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
def cost_sol(routes, inst):
c = 0
for r in routes:
for i in range(len(r[0])-1):
a = inst[r[0][i]]
b = inst[r[0][i+1]]
c += distance(a, b)
c += distance(inst[r[0][len(r[0])-1]], inst[r[0][0]])
return c
# Compute the kNN for each node
def voisins(k, inst):
v = []
for i in range(len(inst)):
vi = []
couples = []
for j in range(len(inst)):
if i != j:
vi.append([distance(inst[i], inst[j]), j])
vi.sort()
for l in vi:
couples.append(l[1])
v.append(couples[:k])
return v
# Find the route, which contains customer i
def find_route(i, routes):
for k in range(len(routes)):
if i in routes[k][0]:
return routes[k]
# Return the nearest route of the edge given. Can return -1, if nothing found.
def another_route(a, voisins, routes, demand):
r1 = find_route(a, routes)
for i in voisins[a]:
r2 = find_route(i, routes)
if r2 != r1 and r2[1]+demand[a] <= Capacity:
return ((r1, r2), i)
return (r1, r1), -1
# Compute the saving of the new edge
def saving(i, ri, j, rj, inst):
ri.append(0)
rj.append(0)
s = distance(inst[ri[i]], inst[ri[i+1]])
s += distance(inst[ri[i]], inst[ri[i-1]])
s -= distance(inst[ri[i+1]], inst[ri[i-1]])
s += distance(inst[rj[j]], inst[rj[j+1]])
s -= distance(inst[ri[i]], inst[rj[j]])
s -= distance(inst[ri[i]], inst[rj[j+1]])
ri.pop()
rj.pop()
return s
# evalue a possible next edge.
def eval_cand(edge, voisins, routes, inst, demand):
(a, b) = edge
if b != 0:
(r1, r2), v = another_route(b, voisins, routes, demand)
if v < 0:
return Error
i_v, i = r2[0].index(v), r1[0].index(b)
else:
(r1, r2), v = another_route(a, voisins, routes, demand)
if v < 0:
return Error
i_v, i = r2[0].index(v), r1[0].index(a)
return (saving(i, r1[0], i_v, r2[0], inst), (i, i_v), (r1, r2))
# return the best relocation for each point p in the route.
# Return the point to relocate and his neighbour considered.
def best_cand(route, np, voisins, routes, inst, demand):
S = []
for p in route:
i = route.index(p)
if p != np:
S.append(eval_cand((route[i-1], p), voisins, routes, inst, demand))
S.sort()
return S[-1]
def ejection_chain(l, edge, voisins, routes, inst, demand):
S = 0 # global cost modification of the current solution
initial_routes = np.copy(routes)
s, I, R = eval_cand(edge, voisins, routes, inst, demand)
if (s, I, R) == Error:
return routes
S += s
relocated_cust = R[0][0][I[0]]
# update the routes
R[1][0].insert(I[1]+1, relocated_cust)
R[1][1] += demand[relocated_cust]
R[0][1] -= demand[relocated_cust]
R[0][0].remove(relocated_cust)
for k in range(l-1):
curr_route = R[1][0]
s, I, R = best_cand(curr_route, relocated_cust,
voisins, routes, inst, demand)
if (s, I, R) == Error:
return routes
S += s
relocated_cust = R[0][0][I[0]]
R[1][0].insert(I[1]+1, relocated_cust)
R[1][1] += demand[relocated_cust]
R[0][1] -= demand[relocated_cust]
R[0][0].remove(relocated_cust)
if S < 0: # If the final result is worse than the initial then we don't apply changes
return initial_routes
return routes
# Test execution
routes = [r1_test1, r2_test1, r3_test1]
print_current_sol(routes, inst_test1)
py.plot([inst_test1[edge1[0]][0], inst_test1[edge1[1]][0]], [
inst_test1[edge1[0]][1], inst_test1[edge1[1]][1]], color='black', label='chosen')
py.title("Test de l'opérateur ejection_chain")
py.legend()
py.show()
v = voisins(kNN, inst_test1)
new_routes = ejection_chain(15, edge1, v, routes, inst_test1, demand1)
print_current_sol(new_routes, inst_test1)
py.show()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
u"""
This example produces two arrows whose scale stays fixed with respect to the
distance from the camera (i.e. as you zoom in and out). Standard spheres are
drawn for comparison.
"""
import sys
import os.path
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class VTKFrame(QtGui.QFrame):
def __init__(self, parent = None):
super(VTKFrame, self).__init__(parent)
self.vtkWidget = QVTKRenderWindowInteractor(self)
vl = QtGui.QVBoxLayout(self)
vl.addWidget(self.vtkWidget)
vl.setContentsMargins(0, 0, 0, 0)
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0.1, 0.2, 0.4)
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Create a set of points
fixedPointSource = vtk.vtkPointSource()
fixedPointSource.SetNumberOfPoints(2)
# Calculate the distance to the camera of each point
distanceToCamera = vtk.vtkDistanceToCamera()
distanceToCamera.SetInputConnection(fixedPointSource.GetOutputPort())
distanceToCamera.SetScreenSize(100.0)
# Glyph each point with an arrow
arrow = vtk.vtkArrowSource()
fixedGlyph = vtk.vtkGlyph3D()
fixedGlyph.SetInputConnection(distanceToCamera.GetOutputPort())
fixedGlyph.SetSourceConnection(arrow.GetOutputPort())
# Scale each point
fixedGlyph.SetScaleModeToScaleByScalar()
fixedGlyph.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
"DistanceToCamera")
# Create a mapper
fixedMapper = vtk.vtkPolyDataMapper()
fixedMapper.SetInputConnection(fixedGlyph.GetOutputPort())
fixedMapper.SetScalarVisibility(False)
# Create an actor
fixedActor = vtk.vtkActor()
fixedActor.SetMapper(fixedMapper)
fixedActor.GetProperty().SetColor(0, 1, 1)
#............................................................
# Draw some spheres that get bigger when zooming in.
# Create a set of points
pointSource = vtk.vtkPointSource()
pointSource.SetNumberOfPoints(4)
# Glyph each point with a sphere
sphere = vtk.vtkSphereSource()
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(pointSource.GetOutputPort())
glyph.SetSourceConnection(sphere.GetOutputPort())
glyph.SetScaleFactor(0.1)
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph.GetOutputPort())
mapper.SetScalarVisibility(False)
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0, 1, 1)
distanceToCamera.SetRenderer(self.ren)
# Add the actors to the scene
self.ren.AddActor(fixedActor)
self.ren.AddActor(actor)
self.ren.ResetCamera()
self._initialized = False
def showEvent(self, evt):
if not self._initialized:
self.iren.Initialize()
#self.startTimer(30)
self._initialized = True
def timerEvent(self, evt):
self.ren.GetActiveCamera().Azimuth(1)
self.vtkWidget.GetRenderWindow().Render()
class MainPage(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainPage, self).__init__(parent)
self.setCentralWidget(VTKFrame())
self.setWindowTitle("Distance to Camera")
def categories(self):
return ['Demo']
def mainClasses(self):
return ['vtkPointSource', 'vtkDistanceToCamera', 'vtkArrowSource', 'vtkGlyph3D']
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainPage()
w.show()
sys.exit(app.exec_())
|
# This code generates the fibonacci serie from 1 to 1000
# This single line set a = b and in that point b = 0; then set b = 1
a,b=0,1
while b<1000:
# print("a="+str(a)),
# print("b="+str(b)),
# print("a+b="+str(a+b)),
print(b),
a,b=b,a+b,
|
# ToDo : update_record_model not implemented
# NOTE : Moved config into pui
import configuration
import wizard
import utils
import gtk , gobject
try :
import hildon
except :
hildon = False
import time
delay_quit_interval = 10
def delete_event ( widget , event , data=None ) :
locator = None
if data :
if data.config.db.locator :
locator = data.config.db.locator
else :
if event.config.db.locator :
locator = event.config.db.locator
if locator :
if locator.timeout_handler :
gobject.timeout_add( delay_quit_interval * 1000 , mainloop_exit , locator )
else :
gtk.main_quit()
else :
gtk.main_quit()
def mainloop_exit ( locator ) :
if locator.timeout_handler :
return True
gtk.main_quit()
return False
def destroy_event ( widget , event , data=None ) :
widget.destroy()
def get_store_and_iter ( model , view , iter , storeiter , config ) :
sortable = view.get_model()
store = sortable.get_model()
if model and iter :
if storeiter :
raise Exception ("Not implemented")
storeiter = model.convert_iter_to_child_iter( storeiter , iter )
else :
storeiter = None
return store , storeiter
def ui_update_row_data ( store , iter , config , date, km, trip, fill, consum, price, priceperlitre, service, oil, tires, notes , id , visible ) :
if date :
userdate = utils.convdate( config.dateformat , None , date )
store.set( iter, configuration.column_dict['DAY'], userdate)
if not km < 0.0 : store.set( iter, configuration.column_dict['KM'], config.SIlength2user(km) )
if not trip < 0.0 : store.set( iter, configuration.column_dict['TRIP'], config.SIlength2user(trip) )
if not fill < 0.0 : store.set( iter, configuration.column_dict['FILL'], config.SIvolume2user(fill) )
if not consum < 0.0 : store.set( iter, configuration.column_dict['CONSUM'], config.SIconsumption2user(consum) )
if not price < 0.0 : store.set( iter, configuration.column_dict['PRICE'], price )
if price > 0 and trip > 0 : store.set( iter, configuration.column_dict['PRICEPERTRIP'], price/config.SIlength2user(trip))
if not priceperlitre < 0.0 : store.set( iter, configuration.column_dict['PRICEPERLITRE'], config.SIppl2user(priceperlitre) )
if not service < 0.0 : store.set( iter, configuration.column_dict['SERVICE'], service )
if not oil < 0.0 : store.set( iter, configuration.column_dict['OIL'], oil )
if not tires < 0.0 : store.set( iter, configuration.column_dict['TIRES'], tires )
if not consum < 0.0 : store.set( iter, configuration.column_dict['CO2EMISSION'], 0.0) #JP# config.SIemission2user(calc_co2_emission(consum,currentcar)) )
if not notes : notes = ""
store.set( iter, configuration.column_dict['NOTES'], notes)
store.set( iter, configuration.column_dict['ID'], id )
store.set( iter, configuration.column_dict['VISIBLE'], visible)
def ui_find_iter( store , id ) :
iter = store.get_iter_first()
while iter :
curid = store.get( iter , configuration.column_dict['ID'] )[0]
if curid == id :
break
iter = store.iter_next(iter)
return iter
def settings_response ( widget , event , editwin , pui ) :
view , config = pui.view , pui.config
if not config.db.is_open() :
widget.destroy()
return
# NOTE ?? : response from hildon wizard is an unexpected value
if event == gtk.RESPONSE_ACCEPT or event == 2 :
if editwin.widgets["mainviewfontsize"].get_active() != pui.config.fontsize :
pui.config.fontsize = editwin.widgets["mainviewfontsize"].get_active()
if pui.config.fontsize == 1 :
fontsize_x_small ( None , pui )
elif pui.config.fontsize == 2 :
fontsize_small ( None, pui )
elif pui.config.fontsize == 3 :
fontsize_medium ( None , pui )
elif pui.config.fontsize == 4 :
fontsize_large ( None , pui )
if editwin.widgets["current_unit"].get_active() != pui.config.units :
pui.config.units = editwin.widgets["current_unit"].get_active()
if editwin.widgets["currency"].get_text() != pui.config.currency :
pui.config.currency = editwin.widgets["currency"].get_text()
if editwin.widgets["gps_timeout"].get_text() != pui.config.gps_timeout :
pui.config.gps_timeout = int( editwin.widgets["gps_timeout"].get_text() )
pui.view.update( pui )
widget.destroy()
elif event == gtk.RESPONSE_REJECT :
widget.destroy()
def edit_record_response ( widget , event , editwin , pui ) :
consum = 0.0
view , config = pui.view , pui.config
if not config.db.is_open() :
widget.destroy()
return
# NOTE ?? : response from hildon wizard is an unexpected value
if event == gtk.RESPONSE_ACCEPT : # or event == 2 :
selection = pui.view.get_selection()
model , iter = selection.get_selected()
if iter :
id = model.get( iter , configuration.column_dict['ID'] )[0]
date = editwin.entrydate.get_datestring()
date = utils.date2sqlite( config.dateformat , date )
km = config.user2SIlength( editwin.entrykm.get_text() )
trip = config.user2SIlength( editwin.entrytrip.get_text() )
fill = config.user2SIvolume( editwin.entryfill.get_text() )
price = editwin.entryprice.get_text()
if editwin.entryservice :
service = editwin.entryservice.get_text()
oil = editwin.entryoil.get_text()
tires = editwin.entrytires.get_text()
notes = editwin.entrynotes.get_text()
else :
service = oil = tires = 0.0
notes = ""
if fill and trip :
oldnotfull = False
# Well need to obtain the unmodified data to be excluded from the new
# consumption calculations
query = config.db.ppStmtOneRecord % id
row = config.db.get_row( query )
if row :
oldfill = row[3]
oldtrip = row[2]
oldconsum = row[9]
oldnotfull = oldfill>0.0 and abs(oldconsum)<1e-5
notfull = editwin.buttonnotfull.get_active()
if notfull :
# Find next full record
fullid , fullfill , fullkm = config.db.find_next_full( km )
if fullid :
if not oldnotfull :
oldfill = 0.0
oldtrip = 0.0
fullconsum = (fullfill+fill-oldfill)/(fullkm+trip-oldtrip)*100
# Update now the full record consum
query = "UPDATE record set consum=%s WHERE id=%s" % ( fullconsum , fullid )
config.db.db.execute( query )
else :
if oldnotfull :
# Find next full record
fullid , fullfill , fullkm = config.db.find_next_full( km )
if fullid :
fullconsum = (fullfill-oldfill)/(fullkm-oldtrip)*100
# Update now the full record consum
query = "UPDATE record set consum=%s WHERE id=%s" % ( fullconsum , fullid )
config.db.db.execute( query )
# Find if there are any not full fills before this record
fullfill , fullkm = config.db.find_prev_full( km )
if oldnotfull :
oldfill = 0.0
oldtrip = 0.0
consum = (fullfill+fill)/(fullkm+trip)*100
if config.db.is_open() :
if fill > 0 :
priceperlitre = price / fill
else :
priceperlitre = 0.0
recordid = config.db.update_record(id, date, km, trip, fill, consum, price, priceperlitre, service, oil, tires, notes)
if recordid == id :
store , storeiter = get_store_and_iter(model, view, iter, None , config)
ui_update_row_data(store, storeiter, config, date, km, trip, fill, consum, price, priceperlitre, service, oil, tires, notes, recordid, True)
if fill and trip :
# Update the data for the full fill
if notfull or notfull!=oldnotfull : # not enough to test notfull, but when?
if fullid :
fullstore , storeiter = get_store_and_iter(None, view, None, None, config)
fullstoreiter = ui_find_iter( fullstore , fullid )
if fullstoreiter :
ui_update_row_data(fullstore, fullstoreiter, config , None, -1.0, -1.0, -1.0, fullconsum, -1.0, -1.0, -1.0, -1.0, -1.0, None, fullid, True)
pui.update_totalkm()
widget.destroy()
elif event == gtk.RESPONSE_REJECT :
widget.destroy()
def add_record_response ( widget , event , editwin , pui ) :
consum = 0.0
view , config = pui.view , pui.config
if not config.db.is_open() :
widget.destroy()
return
# NOTE : response from hildon wizard is an unexpected value
if event == gtk.RESPONSE_ACCEPT or event == 2 :
if config.changed :
update_car_changed(pui);
date = editwin.entrydate.get_datestring()
date = utils.date2sqlite( config.dateformat , date )
km = config.user2SIlength( editwin.entrykm.get_text() )
trip = config.user2SIlength( editwin.entrytrip.get_text() )
fill = config.user2SIvolume( editwin.entryfill.get_text() )
price = editwin.entryprice.get_text()
if editwin.entryservice :
service = editwin.entryservice.get_text()
oil = editwin.entryoil.get_text()
tires = editwin.entrytires.get_text()
notes = editwin.entrynotes.get_text()
else :
service = oil = tires = 0.0
notes = ""
if fill and trip :
if editwin.buttonnotfull.get_active() :
# Find next full record
fullid , fullfill , fullkm = config.db.find_next_full( km )
if fullid :
fullconsum = (fullfill+fill)/(fullkm+trip)*100
# Update now the full record consum and tree view also
query = "UPDATE record SET consum=%s WHERE id=%s" % ( fullconsum , fullid )
config.db.execute( query )
store , storeiter = get_store_and_iter(None, view, None, None, config)
storeiter = ui_find_iter( store , fullid )
if storeiter :
ui_update_row_data(store, storeiter, config , None, -1.0, -1.0, -1.0, fullconsum, -1.0, -1.0, -1.0, -1.0, -1.0, None, fullid, True)
else :
# Find if there are any not full fills before this record
fullfill , fullkm = config.db.find_prev_full( km )
consum = (fullfill+fill)/(fullkm+trip)*100
# This is verified also within add_record method
if config.db.is_open() :
if fill > 0 :
priceperlitre = price / fill
else :
priceperlitre = 0.0
recordid = config.db.add_record(date, km, trip, fill, consum, price, priceperlitre, service, oil, tires, notes)
if recordid : # record succesfully inserted
store , storeiter = get_store_and_iter(None, view, None, None, config)
storeiter = store.append()
ui_update_row_data(store, storeiter, config, date, km, trip, fill, consum, price, priceperlitre, service, oil, tires, notes, recordid, True)
pui.update_totalkm()
widget.destroy()
elif event == gtk.RESPONSE_REJECT :
widget.destroy()
def about ( action, pui ) :
dialog = gtk.MessageDialog( pui,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO,
gtk.BUTTONS_OK,
"PyFuelpad 1.1"
)
dialog.set_title( "About PyFuelpad ..." )
dialog.format_secondary_text( "(C) Javier Palacios Bermejo 2010,2020\nLicense : GPLv2" )
dialog.connect("response", lambda x,y: dialog.destroy())
dialog.show_all()
def settings ( action, pui ) :
header = ( "Settings" , )
editwin = wizard.FuelpadSettingsEdit( pui.config )
dialog = gtk.Dialog( header[0],
pui,
gtk.DIALOG_MODAL,
( gtk.STOCK_OK, gtk.RESPONSE_ACCEPT )
)
dialog.vbox.pack_start(editwin , True, True, 0)
editwin.show()
dialog.connect( "response", settings_response, editwin , pui )
dialog.show_all()
def editrecord ( action , pui ) :
header = ( "Edit a record" , )
if pui.config.db.is_open() :
selection = pui.view.get_selection()
model , iter = selection.get_selected()
if iter :
dialog = gtk.Dialog( header[0],
pui,
gtk.DIALOG_MODAL,
( gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT
)
)
editwin = wizard.FuelpadFullEdit( pui , model.get_value( iter , 0 ) )
# FIXME : notfull toggle needs to be manually worked
for colid,widget in editwin.widgets.iteritems() :
if 'set_title' in dir(widget) :
widget.set_title( "%s" % model.get_value( iter , colid ) )
else :
widget.set_text( "%s" % model.get_value( iter , colid ) )
dialog.vbox.pack_start(editwin , True, True, 0)
editwin.show()
dialog.connect( "response", edit_record_response, editwin , pui )
else :
dialog = gtk.Dialog( header[0],
pui ,
gtk.DIALOG_MODAL ,
( gtk.STOCK_OK, gtk.RESPONSE_REJECT )
)
label = gtk.Label( "Select a record first" )
dialog.vbox.pack_start( label, True, True, 5)
label.show()
dialog.connect( "response", destroy_event , None )
else :
dialog = gtk.Dialog( header[0],
pui ,
gtk.DIALOG_MODAL ,
( gtk.STOCK_OK, gtk.RESPONSE_REJECT )
)
label = gtk.Label( "Select a record first" )
label = gtk.Label( "Can't access database - editing records not possible" )
dialog.vbox.pack_start( label, True, True, 5)
label.show()
dialog.connect( "response", destroy_event , None )
dialog.show()
# http://wiki.maemo.org/PyMaemo/UI_tutorial/Windows_and_dialogs#Using_GtkDialogs_in_Hildon_applications
def newrecord ( action, pui , allowreduced=False ) :
header = ( "Add a new record" , )
if pui.config.db.is_open() :
if pui.config.reducedinput and allowreduced :
editwin = wizard.FuelpadEdit( pui.config , 1 )
else :
editwin = wizard.FuelpadFullEdit( pui , False )
if hildon and pui.config.reducedinput and allowreduced :
dialog = hildon.WizardDialog( pui , header[0] , editwin )
else :
dialog = gtk.Dialog( header[0],
pui,
gtk.DIALOG_MODAL,
( gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT
)
)
dialog.vbox.pack_start(editwin , True, True, 0)
editwin.show_all()
dialog.connect( "response", add_record_response, editwin , pui )
else :
dialog = gtk.Dialog( header[0],
pui ,
gtk.DIALOG_MODAL ,
( gtk.STOCK_OK, gtk.RESPONSE_REJECT )
)
label = gtk.Label( "Can't access database - adding records not possible" )
dialog.vbox.pack_start( label, True, True, 5)
label.show()
dialog.connect( "response", destroy_event , None )
dialog.show()
def recordactivated ( view , path , col=None ) :
newtime = time.time()
if path[0] == view.taprow :
if newtime - view.taptime < gtk.settings_get_default().get_property("gtk-double-click-time") / 1000.0 :
iter = view.get_model().get_iter(path)
if iter :
editrecord( None , view.get_toplevel() )
view.taprow = -1
else :
view.taprow = path[0]
view.taptime = newtime
def deleterecord ( action, pui ) :
km = fill = trip = consum = 0.0
selection = pui.view.get_selection()
model , iter = selection.get_selected()
if iter :
id = model.get( iter , configuration.column_dict['ID'] )[0]
# Ask for confirmation and remove events
query = pui.config.db.ppStmtOneRecord % id
row = pui.config.db.get_row( query )
if row :
km = row[1]
fill = row[3]
trip = row[2]
consum = row[9]
if pui.config.db.delete_record( id ) :
# Remove from window
store , storeiter = get_store_and_iter(model, pui.view, iter, None , pui.config)
store.remove( storeiter )
if fill :
if abs(consum) < 1e-5 :
# Find next full record
fullid , fullfill , fullkm = config.db.find_next_full( km )
if fullid :
fullconsum = fullfill/fullkm*100
# Update now the full record consum
query = "UPDATE record SET consum=%s WHERE id=%s" % ( fullconsum , fullid )
config.db.db.execute( query )
# Update the data for the full fill
fullstore , storeiter = get_store_and_iter(None, view, None, None, config)
fullstoreiter = ui_find_iter( fullstore , fullid )
if fullstoreiter :
ui_update_row_data(fullstore, fullstoreiter, config , None, -1.0, -1.0, -1.0, fullconsum, -1.0, -1.0, -1.0, -1.0, -1.0, None, fullid, True)
pui.update_totalkm()
# Delete the corresponding alarmevent
# Actions for carcombo item done
def update_car_changed ( pui ) :
pui.config.save()
pui.view.update( pui )
def car_apply_cb ( widget , window ) :
update_car_changed( window )
# Update the next event estimates
#window.alarmview = create_alarmview_and_model( window )
#window.warn = update_next_event( window.alarmview.get_model() )
#update_reminder_toolbutton (window, window.warn);
window.toolbar_show_hide()
# BUG : Under font change, labels are not rescaled
# Font scaling done
def update_font_scale ( view , fontsize ) :
view.hide()
for info in configuration.column_info :
if info[6] :
col = view.get_column( info[0] )
attrs = configuration.font_attrs( fontsize , col.get_widget() )
for renderer in col.get_cell_renderers() :
renderer.set_property( "scale" , configuration.fontscalefactors[fontsize] )
view.show()
def fontsize_x_small ( action , pui ) :
pui.config.fontsize = configuration.XSMALL;
update_font_scale( pui.view , pui.config.fontsize )
update_record_model( pui )
def fontsize_small ( action, pui ) :
pui.config.fontsize = configuration.SMALL;
update_font_scale( pui.view , pui.config.fontsize )
def fontsize_medium ( action , pui ) :
pui.config.fontsize = configuration.MEDIUM;
update_font_scale( pui.view , pui.config.fontsize )
def fontsize_large ( action , pui ) :
pui.config.fontsize = configuration.LARGE;
update_font_scale( pui.view , pui.config.fontsize )
# Toolbars toggles done
def main_fullscreen ( action , pui ) :
main_window_fullscreen(pui)
pui.main_menu_item_fullscreen.set_active( pui.mainfullscreen )
def main_toolbar ( action , pui ) :
pui.config.main_toolbar_visible = not pui.config.main_toolbar_visible;
pui.toolbar_show_hide()
def secondary_toolbar ( action , pui ) :
pui.config.secondary_toolbar_visible = not pui.config.secondary_toolbar_visible;
pui.toolbar_show_hide()
|
import json
from django.views.generic import View
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
from apps.master_file import models
class AjaxProductList(View):
def get(self, request):
products = self._datatables(request)
return HttpResponse(json.dumps(products, cls=DjangoJSONEncoder), content_type='application/json')
def _datatables(self, request):
datatables = request.GET
main_cat = datatables.get("main_cat")
draw = int(datatables.get('draw'))
start = int(datatables.get('start'))
length = int(datatables.get('length'))
search = datatables.get('search[value]')
records_total = models.Product.objects.filter(warehouse_id=main_cat).count()
records_filtered = records_total
products = models.Product.objects.filter(warehouse_id=main_cat).order_by('category', 'code')
if search:
products = models.Product.objects.filter(
Q(warehouse__id__icontains=search) |
Q(category__id__icontains=search) |
Q(code__icontains=search) |
Q(description__icontains=search) |
Q(profile__profile_id__icontains=search) |
Q(wood_type__wood_type_id__icontains=search) |
Q(unit__unit__icontains=search)
)
records_total = products.count()
records_filtered = records_total
paginator = Paginator(products, length)
# page_number = start / length + 1
try:
object_list = paginator.page(draw).object_list
except PageNotAnInteger:
object_list = paginator.page(1).object_list
except EmptyPage:
object_list = paginator.page(paginator.num_pages).object_list
data = [
{
'warehouse': prod.warehouse.description,
'category': prod.category.description,
'code': prod.code,
'description': prod.description,
'profile': prod.profile.profile_sym,
'color': prod.color_id,
'wood_type': prod.wood_type.sym,
'unit': prod.unit.unit,
'min_qty': prod.min_qty,
'max_qty': prod.max_qty
} for prod in object_list
]
return {
'draw': draw,
'recordsTotal': records_total,
'recordsFiltered': records_filtered,
'data': data
}
class AjaxColorList(View):
def get(self, request):
colors = self._datatables(request)
return HttpResponse(json.dumps(colors, cls=DjangoJSONEncoder), content_type='application/json')
def _datatables(self, request):
datatables = request.GET
draw = int(datatables.get('draw'))
start = int(datatables.get('start'))
length = int(datatables.get('length'))
search = datatables.get('search[value]')
records_total = models.Color.objects.all().count()
records_filtered = records_total
colors = models.Color.objects.all().order_by('color_group', 'color_id')
if search:
colors = models.Color.objects.filter(
Q(color_group__icontains=search) |
Q(color_id__icontains=search) |
Q(description__icontains=search) |
Q(sort_group__icontains=search) |
Q(sort_group_note__icontains=search) |
Q(wood_type__icontains=search) |
Q(gloss__icontains=search)
)
records_total = colors.count()
records_filtered = records_total
paginator = Paginator(colors, length)
# page_number = start / length + 1
try:
object_list = paginator.page(draw).object_list
except PageNotAnInteger:
object_list = paginator.page(1).object_list
except EmptyPage:
object_list = paginator.page(paginator.num_pages).object_list
data = [
{
'color_group': color.color_group,
'color_id': color.color_id,
'description': color.description,
'wood_type': color.wood_type_id,
'sort_group': color.sort_group_id,
'sort_group_note': color.sort_group_note,
'gloss': color.gloss,
'printed': color.printed,
'distressed': color.distressed,
'distressed_remark': color.distressed_remark,
'phun_hot': color.phun_hot,
'emboss': color.emboss,
'scratch': color.scratch,
'glazed': color.glazed,
'danh_bui': color.danh_bui,
'remark': color.remark
} for color in object_list
]
return {
'draw': draw,
'recordsTotal': records_total,
'recordsFiltered': records_filtered,
'data': data
}
|
#
# GRPC Server for NK Shapelet Classifier
#
# Uses GRPC service config in protos/grapevine.proto
#
from flask import Flask, request
import time
import pandas as pd
import numpy as np
import configparser
from Sloth.classify import Shapelets
from Sloth.preprocess import events_to_rates
from tslearn.preprocessing import TimeSeriesScalerMinMax
import grpc
import logging
import grapevine_pb2
import grapevine_pb2_grpc
from concurrent import futures
# GLOBALS
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
DEBUG = True # boolean to specify whether or not print DEBUG information
restapp = Flask(__name__)
#-----
class NKShapeletClassifier(grapevine_pb2_grpc.ClassifierServicer):
def __init__(self):
self.series = []
# set shapelet HPs
self.EPOCHS = 100 # dummy, not training here
self.LENGTH = 0.05
self.NUM_SHAPELET_LENGTHS = 12
self.NUM_SHAPELETS = 0.25
self.LEARNING_RATE = .01
self.WEIGHT_REGULARIZER = .001
# set rate function HPs
self.SERIES_LENGTH = 240 * 60 # series length in seconds
self.MIN_POINTS = 5
self.NUM_BINS = 300
self.FILTER_BANDWIDTH = 2
# instantiate shapelet clf and model object using deployed weights
self.clf = Shapelets(self.EPOCHS, self.LENGTH, self.NUM_SHAPELET_LENGTHS, self.NUM_SHAPELETS, self.LEARNING_RATE, self.WEIGHT_REGULARIZER)
self.model = self.clf.generate_model(int(self.NUM_BINS), len(CATEGORIES.split(',')))
print('Load weights...')
self.model.load_weights("deployed_checkpoints/" + MODEL_OBJECT)
self.model._make_predict_function()
print('Weights loaded...')
self.clf.encode(CATEGORIES.split(','))
# Main classify function
def Classify(self, request, context):
# init classifier result object
result = grapevine_pb2.Classification(
domain=DOMAIN_OBJECT,
prediction='false',
confidence=0.0,
model="NK_shapelet_classifer",
version="0.0.2",
meta=grapevine_pb2.Meta(),
)
# get text from input message
input_time = request.createdAt
# exception case
if input_time is None:
return result
# Shapelet NK_shapelet_classifier prediction code
start_time = time.time()
# add new timestamp to time series
self.series.append(input_time)
# delete old timestamps if necessary
while max(self.series) - min(self.series) > (self.SERIES_LENGTH):
print('Deleting point {} from series'.format(self.series.index(min(self.series))))
del self.series[self.series.index(min(self.series))]
# check if >= min_points exist in the series
if len(self.series) < self.MIN_POINTS:
print('There are not enough points in this series to make a shapelet classification.')
print('This series has {} points, but at least {} are needed for classification'.format(len(self.series), self.MIN_POINTS))
return result
# transform series to rate function, scale, and make prediction
max_time = min(self.series) + self.SERIES_LENGTH
series_values, _ = events_to_rates(self.series, filter_bandwidth = self.FILTER_BANDWIDTH, max_time = max_time, min_time = min(self.series), num_bins = self.NUM_BINS, density = True)
series_values = series_values.reshape((1, len(series_values), 1))
series_values = TimeSeriesScalerMinMax().fit_transform(series_values)
y_probs = self.model.predict(series_values)
print(y_probs)
pred, confidence = self.clf.decode(y_probs,P_THRESHOLD)
print("Classification result is (class / confidence): {} / {}".format(pred, confidence))
elapsed_time = time.time()-start_time
print("Total time for classification is : %.2f sec" % elapsed_time)
if pred and confidence: # empty edge case
result.prediction = pred[0]
result.confidence = confidence[0]
return result
#-----
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
grapevine_pb2_grpc.add_ClassifierServicer_to_server(NKShapeletClassifier(), server)
server.add_insecure_port('[::]:' + GRPC_PORT)
server.start()
restapp.run()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
@restapp.route("/healthcheck")
def health():
return "HEALTHY"
if __name__ == '__main__':
logging.basicConfig() # purpose?
config = configparser.ConfigParser()
config.read('config.ini')
modelName = config['DEFAULT']['modelName']
print("using model " + modelName + " ...")
global MODEL_OBJECT
MODEL_OBJECT = modelName
categories = config['DEFAULT']['categories']
print("using categories " + categories + " ...")
global CATEGORIES
CATEGORIES = categories
p_threshold = config['DEFAULT']['p_threshold']
print("using p_threshold " + p_threshold + " ...")
global P_THRESHOLD
P_THRESHOLD = float(p_threshold)
domain = config['DEFAULT']['domain']
print("using domain " + domain + " ...")
global DOMAIN_OBJECT
DOMAIN_OBJECT = domain
port_config = config['DEFAULT']['port_config']
print("using port " + port_config + " ...")
global GRPC_PORT
GRPC_PORT = port_config
serve()
|
"""Write a function (with helper functions if needed) called to Excel that takes
an excel column value (A,B,C,D,...,AA,AB,AC,..., AAA...) and returns a corresponding
integer value (A=1,B=2,..., AA=26).
"""
import unittest
A = ord('A') - 1
Z = ord('Z')
def col2int(col):
"Calculates the index of an Excel column"
sum, i = 0, 0
for char in col[::-1]:
sum += (ord(char) - A) * pow(Z - A, i)
i += 1
return sum
class Col2intTest(unittest.TestCase):
def test_function(self):
self.assertEqual(col2int('A'), 1)
self.assertEqual(col2int('B'), 2)
self.assertEqual(col2int('C'), 3)
self.assertEqual(col2int('Z'), 26)
self.assertEqual(col2int('AA'), 27)
self.assertEqual(col2int('AB'), 28)
self.assertEqual(col2int('AC'), 29)
self.assertEqual(col2int('AZ'), 52)
self.assertEqual(col2int('BA'), 53)
self.assertEqual(col2int('BB'), 54)
self.assertEqual(col2int('BC'), 55)
self.assertEqual(col2int('AAA'), 703)
self.assertEqual(col2int('AAB'), 704)
self.assertEqual(col2int('ABA'), 729)
self.assertEqual(col2int('ABB'), 730)
self.assertEqual(col2int('BBB'), 1406)
self.assertEqual(col2int('BBB'), 1406)
self.assertEqual(col2int('BET'), 1502)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import mxnet as mx
import cv2, time
from collections import namedtuple
def load_inception_model():
with open('./synset.txt', 'r') as f:
synsets = [l.rstrip() for l in f]
sym, arg_params, aux_params = mx.model.load_checkpoint('Inception-BN', 0)
model = mx.mod.Module(symbol=sym, context=mx.cpu())
model.bind(for_training=False, data_shapes=[('data', (1,3,224,224))])
model.set_params(arg_params, aux_params)
return model,synsets
def load_image(filename):
img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (224, 224))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
return img
def get_top_categories(prob, synsets, n=5):
topN = []
a = np.argsort(prob)[::-1]
for i in a[0:n]:
print('probability=%f, class=%s' %(prob[i], synsets[i]))
topN.append((prob[i], synsets[i]))
return topN
def get_top1_message(topN):
top1 = topN[0]
# Convert probability to integer percentage
prob = (str)((int)(top1[0]*100))
# Remove category number
item = top1[1].split(' ')
item = ' '.join(item[1:])
message = "I'm "+prob+"% sure that this is a "+item+". "
return message
def predict(image, model):
Batch = namedtuple('Batch', ['data'])
time1 = time.time()
model.forward(Batch([mx.nd.array(image)]))
prob = model.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
time2 = time.time()
print("forward pass in "+str(time2-time1))
return prob
|
from app import db
class Plugin(db.Model):
__table_args__ = {"extend_existing": True}
pid = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
identifier = db.Column(db.String(16), nullable=False)
name = db.Column(db.String(32), nullable=False)
version = db.Column(db.String(16), nullable=False)
author = db.Column(db.String(20), nullable=True)
def __init__(self, identifier: str, name: str, version: str, author: str):
self.identifier = identifier
self.name = name
self.version = version
self.author = author
def __repr__(self):
return "<Plugin {}>".format(self.identifier)
class User(db.Model):
__table_args__ = {"extend_existing": True}
uid = db.Column(db.Integer, autoincrement=True, nullable=False, primary_key=True)
name = db.Column(db.String, nullable=False)
bnet_tag = db.Column(db.String)
realm = db.Column(db.String)
avatar = db.Column(db.String)
char_class = db.Column(db.Integer)
level = db.Column(db.Integer)
guild = db.Column(db.String)
oauth_token = db.Column(db.String)
def __init__(self, name: str, bnet_tag: str, realm: str, avatar: str, char_class: int, level: int, guild: str,
oauth_token: str):
self.name = name
self.bnet_tag = bnet_tag
self.realm = realm
self.avatar = avatar
self.char_class = char_class
self.level = level
self.guild = guild
self.oauth_token = oauth_token
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return str(self.uid)
@classmethod
def get(cls, uid: int):
u = User.query.filter_by(uid=uid).all()
if len(u) == 0 or len(u) > 1:
return None
else:
return u[0]
def __repr__(self):
return "<User {}>".format(self.name)
|
import numpy as np
import os, sys, csv, random
def read_labels(labels_filename):
names = []
with open(labels_filename, "rt") as f:
reader = csv.reader(f, delimiter=";")
for line in reader:
names.append(line[0].strip())
print('labels:', names)
vectors = np.eye(len(names))
def vector_by_name(name):
i = names.index(name)
return vectors[i]
def name_by_vector(vector):
i = np.argmax(vector)
return names[i]
return {
'name_by_vector': name_by_vector,
'vector_by_name': vector_by_name,
'names': names }
def read_all_data(train_data_file):
x = []
y = []
with open(train_data_file, 'rt') as f:
csvreader = csv.reader(f, delimiter=';')
for row in csvreader:
size = len(row)
t = row[0]
datum = row[1: size - 1]
label = row[-1]
x.append(datum)
y.append(label)
return x, y
def shuffle_data(x, y):
z = list(zip(x, y))
random.shuffle(z)
x, y = zip(*z)
return x, y
class Dataset:
def __init__(self, train_data_file='wifiscan_1.csv', labels_filename='wifiscan_labels.csv', mean_path='mean.npy', std_path='std.npy'):
self.mean = np.load(mean_path)
self.std = np.load(std_path)
self.x, self.y = read_all_data(train_data_file)
u = read_labels(labels_filename)
self.vector_by_name = u['vector_by_name']
self.new_epoch()
def get_num_features(self):
return len(self.x[0])
def new_epoch(self):
self.x, self.y = shuffle_data(self.x, self.y)
self.index = 0
def get_batch(self, batch_size, forever=False):
#print(" -- i=", self.index, len(self.x))
if self.index == len(self.x):
self.index = 0
if forever:
self.new_epoch()
else:
return False, 0, 0
start = self.index
end = min(len(self.x), start + batch_size)
x = (np.asarray( self.x[start:end], dtype=np.float ) - self.mean)/ (self.std + 1e-10)
y = np.stack( [self.vector_by_name(name) for name in self.y[start:end]] )
self.index = end
return True, x, y
|
import tensorflow as tf
import config
from tensorflow.compat.v1 import placeholder, Variable, get_variable
from tensorflow.contrib import slim
def Input(input_shape):
return placeholder(tf.float32, input_shape)
def Conv2D(inputs, kernel=3, output_channel=64, stride=1):
return slim.conv2d(inputs, output_channel, [kernel, kernel],
stride, 'SAME', data_format='NHWC', activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
def PReLU(inputs, alpha=0.2):
pos = tf.nn.relu(inputs)
neg = alpha * (inputs - abs(inputs)) * 0.5
return pos + neg
def LeakyReLU(inputs, alpha=0.2):
return tf.nn.leaky_relu(inputs, alpha)
def Sigmoid(inputs):
return tf.nn.sigmoid(inputs)
def BatchNormal(inputs, is_training=True):
return slim.batch_norm(inputs, decay=0.9, epsilon=0.001,
updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
scale=False, fused=True, is_training=is_training)
def phaseShift(inputs, scale, shape_1, shape_2):
# Tackle the condition when the batch is None
X = tf.reshape(inputs, shape_1)
X = tf.transpose(X, [0, 1, 3, 2, 4])
return tf.reshape(X, shape_2)
def PixelShuffler(inputs, scale=2):
size = tf.shape(inputs)
batch_size = size[0]
h = size[1]
w = size[2]
c = inputs.get_shape().as_list()[-1]
# Get the target channel size
channel_target = c // (scale * scale)
channel_factor = c // channel_target
shape_1 = [batch_size, h, w, channel_factor // scale, channel_factor // scale]
shape_2 = [batch_size, h * scale, w * scale, 1]
# Reshape and transpose for periodic shuffling for each channel
input_split = tf.split(inputs, channel_target, axis=3)
output = tf.concat([phaseShift(x, scale, shape_1, shape_2) for x in input_split], axis=3)
return output
def SubPixelConv2d(inputs, kernel=3, output_channel=256, stride=1):
net = Conv2D(inputs, kernel, output_channel, stride)
net = PixelShuffler(net, scale=2)
net = PReLU(net)
return net
def Flatten(inputs):
return slim.flatten(inputs)
def Dense(inputs, output_size):
return tf.layers.dense(inputs, output_size, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer()) |
#!/usr/bin/env python
"""
Script for testing out basic functionality of Eads' adaboost implementation.
Software dependencies can be found at:
svn checkout http://svn.eadsware.com/tinyboost
svn checkout http://convert-xy.googlecode.com/svn/trunk/ convert-xy-read-only/
##### help(tiny_boost) :
X: A M by N array of N examples in M dimensions
ks: The feature indices of the weak hypotheses.
alphas: The alphas/weights of the weak hypotheses.
thresholds: The thresholds of the decision stumps of the weak hypotheses.
directions: The directions of the decision stumps of the weak hypotheses.
sum(alphas) :: total weights
preds / sum(alphas) :: normalized predictions, range: [-1..1]
"""
import sys, os
if __name__ == '__main__':
import tiny_boost
os.chdir('/home/pteluser/src/tinyboost')
import numpy
X = numpy.loadtxt("iono.txt",delimiter=",")
Y=X[:,-1] ## Last column are the labels
X=X[:,:-1] # Everything but the last column are the feature vectors
# NOTE: the training function requires the feature vectors to be columns of the matrix. Each row is the entire dataset wrt to a single feeature. X is M by N where M=number of features and N=number of training examples.
# NOTE: The 3rd argument (100 in this example) is the number of rounds of boosting to run. If the labels are in the set {-1, 1}, then this is a traditional AdaBoost. If the labels are in the interval [-1, 1] then it uses an adaption of AdaBoost which Eads came up with.
(k, alphas, thresholds, directions) = tiny_boost.adaboost_train(X.T, Y, 100)
##### Generate predictions using the trained classifier and the training set:
preds = tiny_boost.adaboost_apply(X.T,k, alphas, thresholds, directions)
alphas_sum = numpy.sum(alphas)
normalized_preds = preds / alphas_sum
for i, pred in enumerate(normalized_preds):
confidence = numpy.abs(pred)
prediction = numpy.sign(pred)
orig_int_classif = int(Y[i])
note = 'match'
if prediction != orig_int_classif:
note = 'misclassified'
print "i=%4d conf=%lf pred=%2d orig=%2d %s" % (i, confidence, prediction, orig_int_classif, note)
print
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.