code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import RPi.GPIO as GPIO
import time
import utils
GPIO.setmode(GPIO.BOARD)
pwr = utils.PSU(13, 15)
pwr.on()
print "Power on"
fan = utils.Fan(32)
fan.setDC(90)
print "Fan set at 90%"
pump = utils.NMOS(11)
pump.on()
print "Pump on"
relay = utils.NMOS(16)
relay.on()
print "Peltier on"
t_amb = utils.Therm('28-000004e08693')
t_c_b = utils.Therm('28-000004e0f7cc')
t_c_m = utils.Therm('28-000004e0840a')
t_c_t = utils.Therm('28-000004e08e26')
t_hs = utils.Therm('28-000004e0804f')
try:
while(1):
t_amb.store_temp()
t_c_b.store_temp()
t_c_m.store_temp()
t_c_t.store_temp()
t_hs.store_temp()
# print "Ambient temperature: " + str(t_amb.store_temp())
# print "Down temperature in cooler: " + str(t_c_b.store_temp())
# print "Middle temperature in cooler: " + str(t_c_m.store_temp())
# print "Up temperture in cooler: " + str(t_c_t.store_temp())
# print "Heatsink temperature: " + str(t_hs.store_temp())
except KeyboardInterrupt:
print "Exiting gracefully"
relay.off()
print "Peltier off"
pump.off()
print "Pump off"
pwr.off()
print "Power off"
GPIO.cleanup()
print "Goodbye!"
| Wollert/beer | test_run.py | Python | mit | 1,166 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'chatter.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'', include('chatter.base.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# The documentation for authentication views can be found at:
# https://docs.djangoproject.com/en/1.7/topics/auth/default/#module-django.contrib.auth.views
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout_then_login', name='logout'),
url(r'^reset/$', 'password_reset', name='password_reset'),
url(r'^reset/done/$', 'password_reset_done', name='password_reset_done'),
url(
r'^reset/confirm/'
r'(?P<uidb64>[0-9A-Za-z_\-]+)/'
r'(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'password_reset_confirm',
name='password_reset-confirm'),
url(
r'^reset/complete/$',
'password_reset_complete',
name='password_reset_complete'),
)
| scott-w/pyne-django-tutorial | chatter/chatter/urls.py | Python | mit | 1,113 |
from tkinter import *
from PIL import Image, ImageTk
from mandelbrot import *
from julia_set import *
class App(object):
def __init__(self, master):
# CANVAS
self.ulx, self.uly, self.drx, self.dry, self.def_width = default_settings()[
:5]
self.image = ImageTk.PhotoImage(make_fractal(*default_settings()))
self.canvas = Canvas(master, width=self.image.width(),
height=self.image.height())
self.canvas.grid(column=2, row=1)
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
self.canvas.bind('<ButtonPress-1>', self.press)
self.canvas.bind('<ButtonRelease-1>', self.release)
self.canvas.bind('<B1-Motion>', self.motion)
# ITERATIONS
self.iterval = IntVar(value=50)
self.iterslider = Scale(master, from_=0, to=2000, variable=self.iterval,
orient=HORIZONTAL, length=250)
self.iterslider.grid(row=1, column=1)
self.iterslider.bind('<ButtonRelease-1>', self.update_image)
def press(self, event):
self.sx, self.sy = event.x, event.y
def release(self, event):
self.ex, self.ey = event.x, event.y
if self.ex == self.sx or self.ey == self.sy:
return
self.sx, self.ex = sorted([self.ex, self.sx])
self.sy, self.ey = sorted([self.ey, self.sy])
sysw = self.drx - self.ulx
sysh = self.uly - self.dry
imw, imh = self.image.width(), self.image.height()
oldx, oldy = self.ulx, self.dry
self.ulx = oldx + self.sx/imw*sysw
self.uly = oldy + self.ey/imh*sysh
self.drx = oldx + self.ex/imw*sysw
self.dry = oldy + self.sy/imh*sysh
self.update_image()
def motion(self, event):
if self.sx == -1:
return
ex, ey = event.x, event.y
try:
self.canvas.delete(self.rect)
except:
pass
finally:
self.rect = self.canvas.create_rectangle((self.sx, self.sy, ex, ey), fill='',
outline='white')
def update_image(self, *args):
img = make_fractal(self.ulx, self.uly, self.drx, self.dry, self.def_width,
self.iterval.get())
self.image = ImageTk.PhotoImage(img)
self.canvas.config(width=self.image.width(),
height=self.image.height())
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
root = Tk()
root.wm_title("Fractal Explorer")
app = App(root)
root.mainloop()
| KrozekGimVic/2013-Fraktali | main.py | Python | mit | 2,613 |
import unittest
import tests
import tests.test_logic
import tests.test_graph
import tests.test_output
| sambayless/monosat | src/monosat/api/python/tests/__init__.py | Python | mit | 102 |
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.models import User
from repository.models import PisiPackage
def show_user (request, name=None):
user = get_object_or_404 (User, username=name)
context = { 'user' : user }
packages = None
try:
packages = PisiPackage.objects.filter(known_user=user).order_by("-date_updated")
count = len(packages)
total_packages = len(PisiPackage.objects.all())
pct = float (float(count) / (total_packages)) * 100
packages = packages[:7]
context = { 'user': user, 'package_count': count, 'package_ratio': pct, 'packages': packages}
except Exception, e:
print e
pass
return render (request, "profiles/individ.html", context)
| SolusOS-discontinued/RepoHub | profiles/views.py | Python | mit | 785 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('photoplaces_web', '0003_photocluster_normalized_centers_dirty'),
]
operations = [
migrations.AddField(
model_name='normalizedphotoset',
name='hour_mean_natural',
field=models.FloatField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalizedphotoset',
name='month_mean_natural',
field=models.FloatField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='photocluster',
name='normalized_set',
field=models.OneToOneField(related_name='+', null=True, blank=True, to='photoplaces_web.NormalizedPhotoSet'),
preserve_default=True,
),
]
| joonamo/photoplaces | photoplaces/photoplaces_web/migrations/0004_auto_20141105_1236.py | Python | mit | 984 |
from app import app, grabber, merge, segment
from flask import render_template, request, url_for, jsonify
import cv2
import numpy as np
import os, re
def rm(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
@app.route('/')
@app.route('/index')
def home():
return render_template('index.html')
@app.route('/grabber/', methods=['POST'])
def doGrabber():
# clean up folders
rm('app/static/img', 'dg*')
rm('app/ma_prediction_400','dg*')
data = request.form
lat = data['lat']
lon = data['lon']
zoom = data['zoom']
with open('app/static/secrets.txt') as f: token = f.read()
# get the location from digital globe
g = grabber.Grabber('app/static/img', token,'png')
time = g.grab(lat, lon, zoom)
# 'smart' means that the image went through the neural net prediction script
smart_contours = segment.predict(time,'app/ma_prediction_400/dg%s.png'%(time), 'app/static/img/nn_dg'+time+'.png')
smart_areas = segment.get_areas(smart_contours.values())
# 'dumb' meanas that the segmentation was on the original image
dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
dumb_areas = segment.get_areas(dumb_contours.values())
# uses 'smart' locations to pick out contours in the 'dumb' image
buildings = merge.intersect(smart_contours, dumb_contours)
merge.mkimage('app/static/img/dg'+time+'.png','app/static/img/merge_dg'+time+'.png', buildings)
areas = segment.get_areas(buildings.values())
url_nn = url_for('static', filename='img/nn_base_dg'+time+'.png')
url_smart = url_for('static', filename='img/nn_dg'+time+'.png')
url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
url_merge = url_for('static', filename='img/merge_dg'+time+'.png')
# # for cameron
# dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
# dumb_areas = segment.get_areas(dumb_contours.values())
# areas = dumb_areas
# url_nn = ''
# url_smart = ''
# url_merge = ''
# url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
return jsonify(url_nn=url_nn, url_smart=url_smart, url_dumb=url_dumb, url_merge=url_merge,
areas=areas
)
| ncmatson/OSTE | app/views.py | Python | mit | 2,380 |
import logging
from vcftoolbox import Genotype
from puzzle.models import Genotype as puzzle_genotype
logger = logging.getLogger(__name__)
class GenotypeExtras(object):
"""Class to store methods that deals with genotyping"""
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
))
| robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/genotype.py | Python | mit | 1,708 |
from app.models import EMAIL_TYPES
from app.schema_validation.definitions import uuid, datetime, date
def email_types():
pattern = '|'.join(EMAIL_TYPES)
return {
"type": "string",
"pattern": pattern,
"validationMessage": "is not an email type",
}
post_create_email_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for creating email",
"type": "object",
"properties": {
"event_id": uuid,
"details": {"type": ["string", "null"]},
"extra_txt": {"type": ["string", "null"]},
"replace_all": {"type": "boolean"},
"email_type": email_types(),
"send_starts_at": date,
"expires": date
},
"required": ["email_type"]
}
post_preview_email_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for preview email",
"type": "object",
"properties": {
"event_id": uuid,
"details": {"type": ["string", "null"]},
"extra_txt": {"type": ["string", "null"]},
"replace_all": {"type": "boolean"},
"email_type": email_types()
},
"required": ["email_type"]
}
post_update_email_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for updating email",
"type": "object",
"properties": {
"event_id": uuid,
"details": {"type": ["string", "null"]},
"extra_txt": {"type": ["string", "null"]},
"replace_all": {"type": "boolean"},
"email_type": email_types(),
"send_starts_at": date,
"expires": date,
"reject_reason": {"type": ["string", "null"]},
},
}
post_import_email_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for importing emails",
"type": "object",
"properties": {
"id": {"format": "number", "type": "string"},
"eventdetails": {"type": "string"},
"extratxt": {"type": "string"},
"replaceAll": {"type": "string"},
"timestamp": datetime
},
"required": ["id", "timestamp"]
}
post_import_emails_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for importing emails",
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/email"
},
"definitions": {
"email": post_import_email_schema
}
}
post_import_email_member_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for importing emails",
"type": "object",
"properties": {
"id": {"format": "number", "type": "string"},
"mailinglistid": {"format": "number", "type": "string"},
"timestamp": datetime
},
"required": ["id", "mailinglistid", "timestamp"]
}
post_import_email_members_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for importing emails members",
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/email_member"
},
"definitions": {
"email_member": post_import_email_member_schema
}
}
post_send_message_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for send message",
"type": "object",
"properties": {
"name": {"type": "string"},
"email": {"format": "email", "type": "string"},
"reason": {"type": "string"},
"message": {"type": "string"}
},
"required": ["name", "email", "reason", "message"]
}
| NewAcropolis/api | app/routes/emails/schemas.py | Python | mit | 3,656 |
#!/usr/bin/env python
import dataproc
import itertools
import numpy as np
import random
import time
import sys
import tensorflow as tf
from collections import defaultdict
from ltrdnn import LTRDNN
flags = tf.flags
FLAGS = flags.FLAGS
# model related:
flags.DEFINE_integer('vocab_size', 1532783, 'vocabulary size')
flags.DEFINE_integer('emb_dim', 256, 'embedding dimension')
flags.DEFINE_integer('repr_dim', 256, 'sentence representing dimension')
flags.DEFINE_string('combiner', 'sum', 'how to combine words in a sentence')
# training related:
flags.DEFINE_string('train_file', '', 'training data file')
flags.DEFINE_string('valid_file', '', 'validation data file')
flags.DEFINE_string('test_file', '', 'testing data file')
flags.DEFINE_integer('train_bs', 128, 'train batch size')
flags.DEFINE_integer('max_epoch', 1, 'max epoch')
flags.DEFINE_integer('max_iter', 1000, 'max iteration')
flags.DEFINE_float('eps', 1.0, 'zero-loss threshold epsilon in hinge loss')
flags.DEFINE_integer('eval_steps', 20, 'every how many steps to evaluate')
flags.DEFINE_string('model_ckpt_file', './model_ckpt/model.ckpt', 'model file')
flags.DEFINE_string('embedding_file', './words_embedding', 'embedding file')
# log related:
flags.DEFINE_string('log_path', './log', 'log path')
def load_embedding(embf, vocab_size, emb_size):
"""load pretrained embedding mat from file.
"""
# create a random word_embedding list.
# emb = [np.random.uniform(-0.2, 0.2, emb_size) for i in range(vocab_size)]
emb = np.zeros((vocab_size, emb_size))
with open(embf) as f:
for nl, line in enumerate(f):
flds = line.rstrip(' \n').split(' ')
word_idx = int(flds[0])
vec = map(float, flds[1:])
emb[word_idx] = np.array(vec)
return np.array(emb)
def inp_fn(data):
"""Extract training data.
@data : line in training file.
@return : training data in required format
"""
def _random_choose(l): return random.sample(l, 1)[0]
sp_feed = defaultdict(list)
batch_size = len(data)
seq_len = 0
for i, inst in enumerate(data):
flds = inst.split('\t')
query = map(int, flds[0].split(' '))
pos_title_num = int(flds[1])
pos_titles = flds[2:2+pos_title_num]
neg_title_num = int(flds[2+pos_title_num])
neg_titles = flds[2+pos_title_num+1:]
pos_title = _random_choose(pos_titles)
pos_title = map(int, pos_title.split(' '))
neg_title = _random_choose(neg_titles)
neg_title = map(int, neg_title.split(' '))
seq_len = max(seq_len, len(query), len(pos_title), len(neg_title))
for j, word_id in enumerate(query):
sp_feed['qry_idx'].append([i, j])
sp_feed['qry_val'].append(word_id)
for j, word_id in enumerate(pos_title):
sp_feed['pos_idx'].append([i, j])
sp_feed['pos_val'].append(word_id)
for j, word_id in enumerate(neg_title):
sp_feed['neg_idx'].append([i, j])
sp_feed['neg_val'].append(word_id)
return (sp_feed['qry_idx'], sp_feed['qry_val'], [batch_size, seq_len]), \
(sp_feed['pos_idx'], sp_feed['pos_val'], [batch_size, seq_len]), \
(sp_feed['neg_idx'], sp_feed['neg_val'], [batch_size, seq_len])
def eval_fn(inst):
"""Extract evaluating data.
@inst : line in evaluating file.
@return : evaluating data in required format
"""
def _max_len(lst): return max([len(x) for x in lst])
flds = inst.split('\t')
qrys = flds[0:1]
pos_num = int(flds[1])
poss = flds[2:2+pos_num]
neg_num = int(flds[2+pos_num])
negs = flds[2+pos_num+1:]
qrys = [map(int, x.split(' ')) for x in qrys]
poss = [map(int, x.split(' ')) for x in poss]
negs = [map(int, x.split(' ')) for x in negs]
seq_len = max(_max_len(qrys), _max_len(poss), _max_len(negs))
batch_size = len(qrys) * len(poss) * len(negs)
sp_feed = defaultdict(list)
for i, (qry, pos, neg) in enumerate(itertools.product(qrys, poss, negs)):
for j, word_id in enumerate(qry):
sp_feed['qry_idx'].append([i, j])
sp_feed['qry_val'].append(word_id)
for j, word_id in enumerate(pos):
sp_feed['pos_idx'].append([i, j])
sp_feed['pos_val'].append(word_id)
for j, word_id in enumerate(neg):
sp_feed['neg_idx'].append([i, j])
sp_feed['neg_val'].append(word_id)
return (sp_feed['qry_idx'], sp_feed['qry_val'], [batch_size, seq_len]), \
(sp_feed['pos_idx'], sp_feed['pos_val'], [batch_size, seq_len]), \
(sp_feed['neg_idx'], sp_feed['neg_val'], [batch_size, seq_len])
train_freader = dataproc.BatchReader(FLAGS.train_file, FLAGS.max_epoch)
with open(FLAGS.valid_file) as f:
valid_data = [x.rstrip('\n') for x in f.readlines()]
valid_q, valid_pt, valid_nt = inp_fn(valid_data)
mdl = LTRDNN(
vocab_size=FLAGS.vocab_size,
emb_dim=FLAGS.emb_dim,
repr_dim=FLAGS.repr_dim,
combiner=FLAGS.combiner,
eps=FLAGS.eps)
sess = tf.Session()
file_writer = tf.summary.FileWriter(FLAGS.log_path, sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print 'loading pretrained embedding from file'
pretrained_emb = load_embedding(
FLAGS.embedding_file, FLAGS.vocab_size, FLAGS.emb_dim)
mdl.assign_embedding(sess, pretrained_emb)
metrics = ['loss']
print 'train begin...'
for niter in xrange(FLAGS.max_iter):
batch_data = train_freader.get_batch(FLAGS.train_bs)
if not batch_data:
break
train_q, train_pt, train_nt = inp_fn(batch_data)
mdl.train_step(sess, train_q, train_pt, train_nt)
if niter % FLAGS.eval_steps != 0:
continue
train_eval = mdl.eval_step(sess, train_q, train_pt, train_nt, metrics)
valid_eval = mdl.eval_step(sess, valid_q, valid_pt, valid_nt, metrics)
ntime = time.strftime('%Y%m%d_%H:%M:%S', time.localtime(time.time()))
print ntime, niter, \
'train_loss:', train_eval, 'valid_loss:', valid_eval
save_path = mdl.saver.save(
sess, FLAGS.model_ckpt_file, global_step=mdl.global_step,
write_meta_graph=False)
print 'model saved:', save_path
with open(FLAGS.test_file) as feval:
acc = mdl.pairwise_accuracy(sess, feval, eval_fn)
print 'pairwise accuracy:', acc
sess.close()
| kn45/LTR-DNN | train.py | Python | mit | 6,330 |
def transform(old):
return {value.lower(): score for score, values in
old.items() for value in values}
| rootulp/exercism | python/etl/etl.py | Python | mit | 119 |
from flask import Flask, request, redirect, session
import twilio.twiml
import navigation
SECRET_KEY = 'donuts'
logging = True
app = Flask(__name__)
app.config.from_object(__name__)
def log(mesagge=""):
if logging:
print mesagge
@app.route("/", methods=['GET', 'POST'])
def main_reply():
# Log values from request
from_number = request.values.get('From', None)
log(from_number)
recieved_message = request.values.get('Body')
log(recieved_message)
# pick reply to message
reply = navigation.choose_script(bodyText=recieved_message)
# trim the length of the reply to one text
if len(reply) > 160:
reply = reply[0:159]
if reply == "":
reply = "Error."
# get the response scheme from twilio and add reply as message body
resp = twilio.twiml.Response()
resp.message(reply.encode("utf-8"))
# log server reply
log(reply)
# store previous queries of the user in a cookie
searchs = session.get('searchs', [])
searchs.append(recieved_message)
replies = session.get('searchs', [])
replies.append(reply)
# Save the new cmds/searchs list in the session
session['searchs'] = searchs
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
| stefanp312/chat-bot | run.py | Python | mit | 1,264 |
import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("arandr"),
("atom"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cowsay"),
("cron"),
("curl"),
("deluge"),
("diod"),
("docker-ce"),
("dropbox"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("gnupg2"),
("gnupg-agent"),
("hardinfo"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("ipython"),
("jq"),
("language-pack-en-base"),
("laptop-mode-tools"),
("meld"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("pavucontrol"),
("pinta"),
("pulseaudio"),
("pulseaudio-module-x11"),
("pulseaudio-utils"),
("python"),
("python-pip"),
("scrot"),
("sl"),
("slack-desktop"),
("software-properties-common"),
("suckless-tools"),
("sysdig"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("virtualbox"),
("vlc"),
("wget"),
("wireshark"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
| wicksy/laptop-build | test/test_packages.py | Python | mit | 1,214 |
#coding=utf8
import thread, time, sys, os, platform
try:
import termios, tty
termios.tcgetattr, termios.tcsetattr
import threading
OS = 'Linux'
except (ImportError, AttributeError):
try:
import msvcrt
OS = 'Windows'
except ImportError:
raise Exception('Mac is currently not supported')
OS = 'Mac'
else:
getch = msvcrt.getwch
else:
def fn():
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
except:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
raise Exception
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = fn
CMD_HISTORY = 30
class ChatLikeCMD():
def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False):
self.strBuff = []
self.cmdBuff = []
self.historyCmd = -1
self.cursor = 0
self.inPip = [] if inPip == None else inPip
self.outPip = []
self.isLaunch = False
self.isPause = False
self.header = header
self.symbol = symbol
self.inputMaintain = inputMaintain
def reprint_input(self):
sys.stdout.write(self.header + self.symbol)
if self.strBuff:
for i in self.strBuff: sys.stdout.write(i)
sys.stdout.flush()
def getch(self):
c = getch()
return c if c != '\r' else '\n'
def get_history_command(self, direction):
if direction == 'UP':
if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1
else:
if self.historyCmd == 0: return ''
if self.historyCmd > 0: self.historyCmd -= 1
if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd]
def output_command(self, s):
self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding))
if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1]
self.cmdBuff.append(s)
def print_thread(self):
while self.isLaunch:
if self.inPip:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
print self.inPip.pop()
# linux special
sys.stdout.write('\r')
sys.stdout.flush()
self.reprint_input()
time.sleep(0.01)
def fast_input_test(self):
timer = threading.Timer(0.001, thread.interrupt_main)
c = None
try:
timer.start()
c = getch()
except:
pass
timer.cancel()
return c
def process_direction_char(self, c):
if OS == 'Windows':
if ord(c) == 72:
c = 'A'
elif ord(c) == 80:
c = 'B'
elif ord(c) == 77:
c = 'C'
elif ord(c) == 75:
c = 'D'
if ord(c) == 68: # LEFT
self.process_char('\b')
return
# cursor bugs
if self.cursor > 0:
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(75))
else:
sys.stdout.write(chr(27) + '[C')
self.cursor -= 1
elif ord(c) == 67: # RIGHT
return
# cursor bugs
if self.cursor < len(self.strBuff):
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(77))
else:
sys.stdout.write(chr(27) + '[D')
self.cursor += 1
elif ord(c) == 65: # UP
hc = self.get_history_command('UP')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
elif ord(c) == 66: # DOWN
hc = self.get_history_command('DOWN')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
else:
raise Exception(c)
def process_char(self, c):
if ord(c) == 27: # Esc
if OS == 'Linux':
fitc1 = self.fast_input_test()
if ord(fitc1) == 91:
fitc2 = self.fast_input_test()
if 65 <= ord(fitc2) <= 68:
self.process_direction_char(fitc2)
return
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.outPip.append(c)
time.sleep(0.02)
if 'fitc1' in dir():
self.process_char(fitc1)
self.cursor += 1
if 'fitc2' in dir():
self.process_char(fitc2)
self.cursor += 1
elif ord(c) == 3: # Ctrl+C
self.stop()
self.isPause = True
if raw_input('Exit?(y) ') == 'y':
sys.stdout.write('Command Line Exit')
else:
self.start()
self.isPause = False
elif ord(c) in (8, 127): # Backspace
if self.strBuff:
if ord(self.strBuff[-1]) < 128:
sys.stdout.write('\b \b')
else:
sys.stdout.write('\b\b \b')
if OS == 'Linux':
self.strBuff.pop()
self.strBuff.pop()
self.strBuff.pop()
self.cursor -= 1
elif c == '\n':
if self.strBuff:
if self.inputMaintain:
sys.stdout.write(c)
else:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.output_command(''.join(self.strBuff))
self.strBuff = []
self.historyCmd = -1
elif ord(c) == 224: # Windows direction
if OS == 'Windows':
direction = self.getch()
self.process_direction_char(direction)
else:
sys.stdout.write(c)
sys.stdout.flush()
self.strBuff.append(c)
self.cursor += 1
def command_thread(self):
c = None
while self.isLaunch:
c = self.getch()
self.process_char(c)
time.sleep(0.01)
def start(self):
self.isLaunch = True
thread.start_new_thread(self.print_thread, ())
self.reprint_input()
thread.start_new_thread(self.command_thread, ())
def stop(self):
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.isLaunch = False
def print_line(self, msg = None):
self.inPip.append(msg)
def clear(self):
os.system('cls' if platform.system() == 'Windows' else 'clear')
self.reprint_input()
def get_command_pip(self):
return self.outPip
def set_header(self, header):
self.header = header
if __name__ == '__main__':
c = ChatLikeCMD()
s = c.get_command_pip()
c.start()
def loopinput(c):
while True:
c.print_line('LOOP INPUT......')
time.sleep(3)
thread.start_new_thread(loopinput, (c,))
while c.isLaunch or c.isPause:
if s:
c.print_line(s.pop())
time.sleep(0.01)
| littlecodersh/EasierLife | Plugins/ChatLikeCMD/ChatLikeCMD.py | Python | mit | 7,974 |
class SquareFreeString:
def isSquareFree(self, s):
for i in range(0, len(s)):
for length in range(1, len(s)):
first = s[i:length + i]
second = s[i+length:i+length+length]
if second == first:
return "not square-free"
return "square-free"
| mikefeneley/topcoder | src/SRM-701/square_free_string.py | Python | mit | 340 |
import vtk
import math
def get_screenshot(renWin, filename):
renWin.Render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(filename)
writer.SetInput(w2if.GetOutput())
writer.Write()
renWin.Render()
###############################################################################
# setup transform
#
transform = vtk.vtkTransform()
transform.RotateWXYZ(180, 1, 0, 0)
move = [0, 0, 0]
transform.Translate(move)
#transformFilter = vtk.vtkTransformPolyDataFilter()
#transformFilter.SetTransform(transform)
transforms = []
transforms_filter = []
###############################################################################
# read obj file
#
obj_filename = '/mnt/data1/StandardBrain/SB/SB256.obj'
object = vtk.vtkOBJReader()
object.SetFileName(obj_filename)
objectSmoother = vtk.vtkSmoothPolyDataFilter()
objectSmoother.SetInputConnection(object.GetOutputPort())
objectSmoother.SetNumberOfIterations(100)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(objectSmoother.GetOutputPort())
transforms_filter[-1].Update()
objectMapper = vtk.vtkPolyDataMapper()
objectMapper.SetInputConnection(transforms_filter[-1].GetOutputPort())
objectActor = vtk.vtkActor()
objectActor.SetMapper(objectMapper)
#objectActor.GetProperty().SetRepresentationToWireframe();
objectActor.GetProperty().SetColor(0.5, 0.5, 0.5)
objectActor.GetProperty().SetOpacity(0.4)
#objectActor.GetProperty().SetOpacity(1.0)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(transforms_filter[-1].GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(1.0, 0.0, 0.0)
outlineActor.GetProperty().SetOpacity(0.2)
outlineActor.GetProperty().SetLineWidth(5)
line = vtk.vtkLineSource()
line.SetPoint1(0, -50, 0)
line.SetPoint2(100, -50, 0)
line.SetResolution(100)
line_mapper = vtk.vtkPolyDataMapper()
line_mapper.SetInputConnection(line.GetOutputPort())
line_actor = vtk.vtkActor()
line_actor.SetMapper(line_mapper)
###############################################################################
# read second obj file
#
filepos = '/mnt/data1/StandardBrain/SB/LALobj/'
obj_list = ['LAL1.obj','LAL2.obj','LAL3.obj','LAL4.obj','LAL5.obj', 'LAL1_flip.obj', 'LAL2_flip.obj', 'LAL3_flip.obj', 'LAL4_flip.obj', 'LAL5_flip.obj']
lut = vtk.vtkLookupTable()
lut.Build()
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetLookupTable(lut)
objs = []
objs_mapper = []
objs_actor = []
objs_smoother = []
for i, obj_name in enumerate(obj_list):
objs.append(vtk.vtkOBJReader())
objs[-1].SetFileName(filepos+obj_name)
objs_smoother.append(vtk.vtkSmoothPolyDataFilter())
objs_smoother[-1].SetInputConnection(objs[-1].GetOutputPort())
objs_smoother[-1].SetNumberOfIterations(50)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(objs_smoother[-1].GetOutputPort())
transforms_filter[-1].Update()
objs_mapper.append(vtk.vtkPolyDataMapper())
objs_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
objs_mapper[-1].SetLookupTable(lut)
objs_actor.append(vtk.vtkActor())
objs_actor[-1].SetMapper(objs_mapper[-1])
rgb = [0.8, 0.8, 0.8]
#lut.GetColor((i / float(len(obj_list))), rgb)
objs_actor[-1].GetProperty().SetColor(rgb)
objs_actor[-1].GetProperty().SetOpacity(0.3)
neuronpos = '/mnt/data1/StandardBrain/highres/'
neuron_list = ['0004.obj', '0004flip.obj',
'0005.obj', '0005flip.obj',
'0008.obj', '0008flip.obj',
'0009.obj', '0009flip.obj',
'0012.obj', '0012flip.obj',
'0017.obj', '0017flip.obj',
'0019.obj', '0019flip.obj',
'0021.obj', '0021flip.obj',
'0655.obj', '0655flip.obj',
'0661.obj', '0661flip.obj',
'0663.obj', '0663flip.obj',
'0664.obj', '0664flip.obj',
'0965.obj', '0965flip.obj',
'0969.obj', '0969flip.obj',
'0970.obj', '0970flip.obj',
'0973.obj', '0973flip.obj',
'0984.obj', '0984flip.obj',
'0986.obj', '0986flip.obj',
'9999.obj', '9999flip.obj',
]
#neuron_list = []
#neuron_list = ['0970.obj']
neurons = []
neurons_mapper = []
neurons_actor = []
neurons_smoother = []
for i, neuron_name in enumerate(neuron_list):
neurons.append(vtk.vtkOBJReader())
neurons[-1].SetFileName(neuronpos+neuron_name)
neurons_smoother.append(vtk.vtkSmoothPolyDataFilter())
neurons_smoother[-1].SetInputConnection(neurons[-1].GetOutputPort())
neurons_smoother[-1].SetNumberOfIterations(50)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(neurons_smoother[-1].GetOutputPort())
transforms_filter[-1].Update()
neurons_mapper.append(vtk.vtkPolyDataMapper())
neurons_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
neurons_mapper[-1].SetLookupTable(lut)
neurons_actor.append(vtk.vtkActor())
neurons_actor[-1].SetMapper(neurons_mapper[-1])
rgb = [0.0, 0.0, 0.0]
lut.GetColor( ((len(neuron_list) - i) / float(len(neuron_list))), rgb)
neurons_actor[-1].GetProperty().SetColor(rgb)
#neurons_actor[-1].GetProperty().SetColor(0.6, 0.2, 0.4)
if i%2 == 0:
neurons_actor[-1].GetProperty().SetOpacity(1)
else:
neurons_actor[-1].GetProperty().SetOpacity(0.2)
neurons_actor[-1].GetProperty().SetOpacity(1.0)
###############################################################################
# draw axis
#
axesActor = vtk.vtkAxesActor()
###############################################################################
# prepare rendering
#
'''
dist = 3000
camera = vtk.vtkCamera()
camera.SetPosition(512, -500, dist)
camera.SetFocalPoint(512, -500, 0)
camera.ComputeViewPlaneNormal()
camera.SetParallelProjection(1)
'''
ren = vtk.vtkRenderer()
ren.AddActor(objectActor)
#ren.AddActor(outlineActor)
#ren.AddActor(line_actor)
#ren.AddActor(scalar_bar)
for actor in objs_actor:
ren.AddActor(actor)
for actor in neurons_actor:
ren.AddActor(actor)
#ren.AddActor(axesActor)
ren.SetBackground(.0, .0, .0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName('Silkmoth Brain Viewer')
renWin.SetSize(2000, 1200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#ren.SetActiveCamera(camera)
#ren.ResetCamera()
'''
num_images = 120
camera = ren.GetActiveCamera()
ren.ResetCamera()
#camera.ParallelProjectionOn()
camera.SetClippingRange(1.0, 10000)
camera.Zoom(1)
for i in range(num_images):
get_screenshot(renWin, 'screenshot'+str(i)+'.png')
camera.Azimuth(360./num_images)
#ren.ResetCamera()
'''
iren.Start()
| DaisukeMiyamoto/visualize_silkmothbrain | draw_mothbrain.py | Python | mit | 7,186 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecurityGroupViewResult(Model):
"""The information about security rules applied to the specified VM.
:param network_interfaces: List of network interfaces on the specified VM.
:type network_interfaces:
list[~azure.mgmt.network.v2017_09_01.models.SecurityGroupNetworkInterface]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[SecurityGroupNetworkInterface]'},
}
def __init__(self, network_interfaces=None):
super(SecurityGroupViewResult, self).__init__()
self.network_interfaces = network_interfaces
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_group_view_result.py | Python | mit | 1,116 |
import networkx as nx
from networkx.algorithms import bipartite
class User():
def __init__(self,userId):
self.userId=userId
def __key(self):
return (self.userId)
def __eq__(self,x, y):
return x.__key() == y.__key()
def __hash__(self):
return hash(self.__key())
class Restaurant():
def __init__(self,restaurantId):
self.restaurantId=restaurantId
def __key(self):
return (self.restaurantId)
def __eq__(self,x, y):
return x.__key() == y.__key()
def __hash__(self):
return hash(self.__key()) | sheetal158/Opinion_Spam_Detection | bipartiteGraph.py | Python | mit | 632 |
import _plotly_utils.basevalidators
class ArrayminussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="arrayminussrc", parent_name="bar.error_x", **kwargs
):
super(ArrayminussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/error_x/_arrayminussrc.py | Python | mit | 429 |
from django import template
from .. import forms
register = template.Library()
@register.filter
def dyn_form(forms, pk):
return forms[pk]
| ainterr/scoring_engine | engine/templatetags/dyn_form.py | Python | mit | 144 |
from django.contrib import admin
from django.contrib.admin.filters import RelatedFieldListFilter
from .models import ClientLog, Client, Feedback
def client_id(obj):
return obj.client.externid
class AliveClientsRelatedFieldListFilter(RelatedFieldListFilter):
def __init__(self, field, request, *args, **kwargs):
field.rel.limit_choices_to = {'status': Client.STATUS_ALIVE }
super(AliveClientsRelatedFieldListFilter, self).__init__(field, request, *args, **kwargs)
class ClientLogAdmin(admin.ModelAdmin):
list_display = ('client', 'tag', 'log', 'updated')
list_filter = ('client', )
ordering = ('-updated',)
search_fields = ("client__ip", "client__externid", "log", "tag",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "client":
kwargs["queryset"] = Client.objects.filter(status = Client.STATUS_ALIVE)
return super(ClientLogAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(ClientLog, ClientLogAdmin)
class ClientAdmin(admin.ModelAdmin):
list_display = ("status", "externid", "ip", "updated", "created", "useragent")
list_filter = ("status", "useragent", "failures", "complets")
ordering = ("status", "-updated", "-created", )
search_fields = ("ip", "useragent", "externid", )
admin.site.register(Client, ClientAdmin)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ("id", "useremail", "ip", "created")
ordering = ("-id",)
admin.site.register(Feedback, FeedbackAdmin)
| ddalex/p9 | sign/admin.py | Python | mit | 1,555 |
# coding=utf-8
""" Module holding tools for ee.ImageCollections """
import ee
import ee.data
import pandas as pd
import math
from . import date, ee_list
from . import image as image_module
from . import collection as eecollection
from ..utils import castImage
from .. import composite
def add(collection, image):
""" Add an Image to the Collection
**SERVER SIDE**
"""
# TODO: handle a list of images
collist = collection.toList(collection.size())
append = collist.add(image)
return ee.ImageCollection.fromImages(append)
def allMasked(collection):
""" Get a mask which indicates pixels that are masked in all images (0) and
pixels that have a valid pixel in at least one image (1) """
masks = collection.map(lambda i: i.mask())
masksum = ee.Image(masks.sum())
return ee.Image(masksum.gt(0))
def containsAllBands(collection, bands):
""" Filter a collection with images containing all bands specified in
parameter `bands` """
bands = ee.List(bands)
# add bands as metadata
collection = collection.map(
lambda i: ee.Image(i).set('_BANDS_', ee.Image(i).bandNames()))
band0 = ee.String(bands.get(0))
rest = ee.List(bands.slice(1))
filt0 = ee.Filter.listContains(leftField='_BANDS_', rightValue=band0)
# Get filter
def wrap(band, filt):
band = ee.String(band)
filt = ee.Filter(filt)
newfilt = ee.Filter.listContains(leftField='_BANDS_', rightValue=band)
return ee.Filter.And(filt, newfilt)
filt = ee.Filter(rest.iterate(wrap, filt0))
return collection.filter(filt)
def containsAnyBand(collection, bands):
""" Filter a collection with images cotaining any of the bands specified in
parameter `bands` """
bands = ee.List(bands)
# add bands as metadata
collection = collection.map(
lambda i: ee.Image(i).set('_BANDS_', ee.Image(i).bandNames()))
band0 = ee.String(bands.get(0))
rest = ee.List(bands.slice(1))
filt0 = ee.Filter.listContains(leftField='_BANDS_', rightValue=band0)
# Get filter
def wrap(band, filt):
band = ee.String(band)
filt = ee.Filter(filt)
newfilt = ee.Filter.listContains(leftField='_BANDS_', rightValue=band)
return ee.Filter.Or(filt, newfilt)
filt = ee.Filter(rest.iterate(wrap, filt0))
return collection.filter(filt)
def getId(collection):
""" Get the ImageCollection id.
**CLIENT SIDE**
:type collection: ee.ImageCollection
:return: the collection's id
:rtype: str
"""
return collection.limit(0).getInfo()['id']
def getImage(collection, index):
""" Get an Image using its collection index """
collist = collection.toList(collection.size())
return ee.Image(collist.get(index))
def wrapper(f, *arg, **kwargs):
""" Wrap a function and its arguments into a mapping function for
ImageCollections. The first parameter of the functions must be an Image,
and it must return an Image.
:param f: the function to be wrapped
:type f: function
:return: a function to use in ee.ImageCollection.map
:rtype: function
"""
def wrap(img):
return f(img, *arg, **kwargs)
return wrap
def enumerateProperty(collection, name='enumeration'):
"""
:param collection:
:param name:
:return:
"""
enumerated = eecollection.enumerate(collection)
def over_list(l):
l = ee.List(l)
index = ee.Number(l.get(0))
element = l.get(1)
return ee.Image(element).set(name, index)
imlist = enumerated.map(over_list)
return ee.ImageCollection(imlist)
def enumerateSimple(collection, name='ENUM'):
""" Simple enumeration of features inside a collection. Each feature stores
its enumeration, so if the order of features changes over time, the numbers
will not be in order """
size = collection.size()
collist = collection.toList(size)
seq = ee.List.sequence(0, size.subtract(1))
def wrap(n):
n = ee.Number(n).toInt()
feat = collist.get(n)
return ee.Image(feat).set(name, n)
fc = ee.ImageCollection.fromImages(seq.map(wrap))
return ee.ImageCollection(fc.copyProperties(source=collection))
def fillWithLast(collection, reverse=False, proxy=-999):
""" Fill each masked pixels with the last available not masked pixel. If reverse, it goes backwards.
Images must contain a valid date (system:time_start property by default) """
axis = 0
def shift(array):
if reverse:
rigth = array.arraySlice(axis, 1)
last = array.arraySlice(axis, -1)
return rigth.arrayCat(last, axis)
else:
left = array.arraySlice(axis, 0, -1)
first = array.arraySlice(axis, 0, 1)
return first.arrayCat(left, axis)
def move(array):
shifted = shift(array)
masked = array.neq(proxy)
maskednot = array.eq(proxy)
t1 = array.multiply(masked)
t2 = shifted.multiply(maskednot)
final = t1.add(t2)
return final
def fill(array, size):
size = ee.Number(size)
indices = ee.List.sequence(0, size.subtract(1))
def wrap(i, a):
a = ee.Image(a)
return move(a)
return ee.Image(indices.iterate(wrap, array))
collection = collection.map(
lambda i: image_module.emptyBackground(i, proxy).copyProperties(
source=i, properties=i.propertyNames()))
bands = ee.Image(collection.first()).bandNames()
size = collection.size()
array = collection.toArray()
fill_array = fill(array, size)
props = aggregate_array_all(collection)
indices = ee.List.sequence(0, size.subtract(1))
def wrap(index):
index = ee.Number(index).toInt()
sliced = fill_array.arraySlice(axis, index, index.add(1))
im = sliced.arrayProject([1]).arrayFlatten([bands])
prop = ee.Dictionary(props.get(index))
im = ee.Image(im.setMulti(prop))
return im.updateMask(im.neq(proxy))
return ee.ImageCollection.fromImages(indices.map(wrap))
def mergeGeometries(collection):
""" Merge the geometries of many images. Return ee.Geometry """
imlist = collection.toList(collection.size())
first = ee.Image(imlist.get(0))
rest = imlist.slice(1)
def wrap(img, ini):
ini = ee.Geometry(ini)
img = ee.Image(img)
geom = img.geometry()
union = geom.union(ini)
return union.dissolve()
return ee.Geometry(rest.iterate(wrap, first.geometry()))
def mosaicSameDay(collection, qualityBand=None):
""" Return a collection where images from the same day are mosaicked
:param qualityBand: the band that holds the quality score for mosaiking.
If None it will use the simplier mosaic() function
:type qualityBand: str
:return: a new image collection with 1 image per day. The only property
kept is `system:time_start`
:rtype: ee.ImageCollection
"""
all_dates = collection.aggregate_array('system:time_start')
def overdates(d, l):
l = ee.List(l)
date = ee.Date(d)
day = date.get('day')
month = date.get('month')
year = date.get('year')
clean_date = ee.Date.fromYMD(year, month, day)
condition = l.contains(clean_date)
return ee.Algorithms.If(condition, l, l.add(clean_date))
date_list = ee.List(all_dates.iterate(overdates, ee.List([])))
first_img = ee.Image(collection.first())
bands = first_img.bandNames()
def make_col(date):
date = ee.Date(date)
filtered = collection.filterDate(date, date.advance(1, 'day'))
if qualityBand:
mosaic = filtered.qualityMosaic(qualityBand)
else:
mosaic = filtered.mosaic()
mosaic = mosaic.set('system:time_start', date.millis(),
'system:footprint', mergeGeometries(filtered))
# mosaic = mosaic.rename(bands)
mosaic = mosaic.select(bands)
def reproject(bname, mos):
mos = ee.Image(mos)
mos_bnames = mos.bandNames()
bname = ee.String(bname)
proj = first_img.select(bname).projection()
newmos = ee.Image(ee.Algorithms.If(
mos_bnames.contains(bname),
image_module.replace(mos, bname, mos.select(bname).setDefaultProjection(proj)),
mos))
return newmos
mosaic = ee.Image(bands.iterate(reproject, mosaic))
return mosaic
new_col = ee.ImageCollection.fromImages(date_list.map(make_col))
return new_col
def reduceEqualInterval(collection, interval=30, unit='day', reducer=None,
start_date=None, end_date=None):
""" Reduce an ImageCollection into a new one that has one image per
reduced interval, for example, one image per month.
:param collection: the collection
:type collection: ee.ImageCollection
:param interval: the interval to reduce
:type interval: int
:param unit: unit of the interval. Can be 'day', 'month', 'year'
:param reducer: the reducer to apply where images overlap. If None, uses
a median reducer
:type reducer: ee.Reducer
:param start_date: fix the start date. If None, uses the date of the first
image in the collection
:type start_date: ee.Date
:param end_date: fix the end date. If None, uses the date of the last image
in the collection
:type end_date: ee.Date
:return:
"""
interval = int(interval) # force to int
first = ee.Image(collection.sort('system:time_start').first())
bands = first.bandNames()
if not start_date:
start_date = first.date()
if not end_date:
last = ee.Image(collection.sort('system:time_start', False).first())
end_date = last.date()
if not reducer:
reducer = ee.Reducer.median()
def apply_reducer(red, col):
return ee.Image(col.reduce(red))
ranges = date.daterangeList(start_date, end_date, interval, unit)
def over_ranges(drange, ini):
ini = ee.List(ini)
drange = ee.DateRange(drange)
start = drange.start()
end = drange.end()
filtered = collection.filterDate(start, end)
condition = ee.Number(filtered.size()).gt(0)
def true():
image = apply_reducer(reducer, filtered)\
.set('system:time_start', end.millis())\
.set('reduced_from', start.format())\
.set('reduced_to', end.format())
# rename to original names
image = image.select(image.bandNames(), bands)
result = ini.add(image)
return result
return ee.List(ee.Algorithms.If(condition, true(), ini))
imlist = ee.List(ranges.iterate(over_ranges, ee.List([])))
return ee.ImageCollection.fromImages(imlist)
def makeEqualInterval(collection, interval=1, unit='month'):
""" Make a list of image collections filtered by the given interval,
for example, one month. Starts from the end of the parsed collection
:param collection: the collection
:type collection: ee.ImageCollection
:param interval: the interval
:type interval: int
:param unit: unit of the interval. Can be 'day', 'month', 'year'
:rtype: ee.List
"""
interval = int(interval) # force to int
collist = collection.sort('system:time_start').toList(collection.size())
start_date = ee.Image(collist.get(0)).date()
end_date = ee.Image(collist.get(-1)).date()
ranges = date.daterangeList(start_date, end_date, interval, unit)
def over_ranges(drange, ini):
ini = ee.List(ini)
drange = ee.DateRange(drange)
start = drange.start()
end = drange.end()
filtered = collection.filterDate(start, end)
condition = ee.Number(filtered.size()).gt(0)
return ee.List(ee.Algorithms.If(condition, ini.add(filtered), ini))
imlist = ee.List(ranges.iterate(over_ranges, ee.List([])))
return imlist
def makeDayIntervals(collection, interval=30, reverse=False, buffer='second'):
""" Make day intervals """
interval = int(interval)
collection = collection.sort('system:time_start', True)
start = collection.first().date()
end = collection.sort('system:time_start', False).first().date()
ranges = date.dayRangeIntervals(start, end, interval, reverse, buffer)
def over_ranges(drange, ini):
ini = ee.List(ini)
drange = ee.DateRange(drange)
start = drange.start()
end = drange.end()
filtered = collection.filterDate(start, end)
condition = ee.Number(filtered.size()).gt(0)
return ee.List(ee.Algorithms.If(condition, ini.add(filtered), ini))
imlist = ee.List(ranges.iterate(over_ranges, ee.List([])))
return imlist
def reduceDayIntervals(collection, reducer, interval=30, reverse=False,
buffer='second'):
""" Reduce Day Intervals
:param reducer: a function that takes as only argument a collection
and returns an image
:type reducer: function
:return: an image collection
:rtype: ee.ImageCollection
"""
intervals = makeDayIntervals(collection, interval, reverse, buffer)
reduced = intervals.map(reducer)
return ee.ImageCollection.fromImages(reduced)
def getValues(collection, geometry, scale=None, reducer=None,
id='system:index', properties=None, side='server',
maxPixels=1e7, bestEffort=False, tileScale=1):
""" Return all values of all bands of an image collection in the
specified geometry
:param geometry: Point from where to get the info
:type geometry: ee.Geometry
:param scale: The scale to use in the reducer. It defaults to 10 due
to the minimum scale available in EE (Sentinel 10m)
:type scale: int
:param id: image property that will be the key in the result dict
:type id: str
:param properties: image properties that will be added to the resulting
dict
:type properties: list
:param side: 'server' or 'client' side
:type side: str
:return: Values of all bands in the ponit
:rtype: dict
"""
if reducer is None:
reducer = ee.Reducer.mean()
if not scale:
scale = 1
else:
scale = int(scale)
if not properties:
properties = []
properties = ee.List(properties)
def listval(img, it):
theid = ee.Algorithms.String(img.get(id))
values = img.reduceRegion(
reducer, geometry, scale, maxPixels=maxPixels,
bestEffort=bestEffort, tileScale=tileScale
)
values = ee.Dictionary(values)
img_props = img.propertyNames()
def add_properties(prop, ini):
ini = ee.Dictionary(ini)
condition = img_props.contains(prop)
def true():
value = img.get(prop)
return ini.set(prop, value)
return ee.Algorithms.If(condition, true(), ini)
with_prop = ee.Dictionary(properties.iterate(add_properties, values))
return ee.Dictionary(it).set(theid, with_prop)
result = collection.iterate(listval, ee.Dictionary({}))
result = ee.Dictionary(ee.Algorithms.If(collection.size().neq(0),
result, {}))
if side == 'server':
return result
elif side == 'client':
return result.getInfo()
else:
raise ValueError("side parameter must be 'server' or 'client'")
def outliers(collection, bands, sigma=2, updateMask=False):
""" Compute outliers by:
outlier = value > mean+(sigma*stddev)
outlier = value < mean-(sigma*stddev)
Example (sigma = 1):
- values = [1, 5, 6, 4, 7, 10]
- mean = 5.5
- std dev = 3
- mean + (sigma*stddev) = 8.5
- mean - (sigma*stddev) = 2.5
- outliers = values between 2.5 and 8.5 = [1, 10]
if `updateMask` is False return the passed collection in which each image
have new bands (a mask) corresponding to the passed dict and a suffix '_outlier'
else return the passed collection with the passed bands masked if are
outliers (the outlier band is not returned).
idea from: https://www.kdnuggets.com/2017/02/removing-outliers-standard-deviation-python.html
"""
bands = bands or ee.Image(collection.first()).bandNames()
bands = ee.List(bands)
forstats = collection.select(bands)
mean = forstats.mean()
stddev = forstats.reduce(ee.Reducer.stdDev())
imin = mean.subtract(stddev.multiply(sigma))
imax = mean.add(stddev.multiply(sigma))
def getOutlier(im, imin, imax):
ismin = im.lt(imin)
ismax = im.gt(imax)
outlier = ismin.Or(ismax)
return outlier
def overcol(im):
outs = getOutlier(im.select(bands), imin, imax)
if updateMask:
ibands = im.select(bands)
ibands = ibands.updateMask(outs.Not())
else:
ibands = image_module.addSuffix(outs, '_outlier')
return im.addBands(ibands, overwrite=True)
return collection.map(overcol)
def data2pandas(data):
"""
Convert data coming from tools.imagecollection.get_values to a
pandas DataFrame
:type data: dict
:rtype: pandas.DataFrame
"""
# Indices
# header
allbands = [val.keys() for bands, val in data.items()]
header = []
for bandlist in allbands:
for band in bandlist:
if band not in header:
header.append(band)
data_dict = {}
indices = []
for i, head in enumerate(header):
band_data = []
for iid, val in data.items():
if i == 0:
indices.append(iid)
band_data.append(val[head])
data_dict[head] = band_data
df = pd.DataFrame(data=data_dict, index=indices)
return df
def parametrizeProperty(collection, property, range_from, range_to,
pattern='{property}_PARAMETRIZED'):
""" Parametrize a property
:param collection: the ImageCollection
:param range_from: the original property range
:param range_to: the desired property range
:param property: the name of the property
:param pattern: the name of the resulting property. Wherever it says
'property' will be replaced with the passed property.
:return: the parsed collection in which every image has a new
parametrized property
"""
name = pattern.replace('{property}', property)
original_range = range_from if isinstance(range_from, ee.List) \
else ee.List(range_from)
final_range = range_to if isinstance(range_to, ee.List) \
else ee.List(range_to)
# original min and max
min0 = ee.Number(original_range.get(0))
max0 = ee.Number(original_range.get(1))
# range from min to max
rango0 = max0.subtract(min0)
# final min max images
min1 = ee.Number(final_range.get(0))
max1 = ee.Number(final_range.get(1))
rango1 = max1.subtract(min1)
def wrap(img):
value = ee.Number(img.get(property))
percent = value.subtract(min0).divide(rango0)
final = percent.multiply(rango1).add(min1)
return img.set(name, final)
return collection.map(wrap)
def linearFunctionBand(collection, band, range_min=None, range_max=None,
mean=None, output_min=None, output_max=None,
name='linear_function'):
""" Apply a linear function over the bands across every image of the
ImageCollection using the following formula:
- a = abs(val-mean)
- b = output_max-output_min
- c = abs(range_max-mean)
- d = abs(range_min-mean)
- e = max(c, d)
f(x) = a*(-1)*(b/e)+output_max
:param band: the band to process
:param range_min: the minimum pixel value in the parsed band. If None, it
will be computed reducing the collection
:param range_max: the maximum pixel value in the parsed band. If None, it
will be computed reducing the collection
:param output_min: the minimum value that will take the resulting band.
:param output_max: the minimum value that will take the resulting band.
:param mean: the value on the given range that will take the `output_max`
value
:param name: the name of the resulting band
:return: the parsed collection in which every image will have an extra band
that results of applying the linear function over every pixel in the
image
:rtype: ee.ImageCollection
"""
if range_min is None:
range_min = ee.Image(collection.select(band).min()).rename('imin')
else:
range_min = castImage(range_min)
if range_max is None:
range_max = ee.Image(collection.select(band).max()).rename('imax')
else:
range_max = castImage(range_max)
def to_map(img):
result = image_module.linearFunction(img, band, range_min, range_max,
mean, output_min, output_max,
name)
return img.addBands(result.rename(name))
collection = collection.map(to_map)
return collection
def linearFunctionProperty(collection, property, range_min=None,
range_max=None, mean=None, output_min=None,
output_max=None, name='LINEAR_FUNCTION'):
""" Apply a linear function over the properties across every image of the
ImageCollection using the following formula:
- a = abs(val-mean)
- b = output_max-output_min
- c = abs(range_max-mean)
- d = abs(range_min-mean)
- e = max(c, d)
f(x) = a*(-1)*(b/e)+output_max
:param property: the property to process
:param range_min: the minimum pixel value in the parsed band. If None, it
will be computed reducing the collection
:param range_max: the maximum pixel value in the parsed band. If None, it
will be computed reducing the collection
:param output_min: the minimum value that will take the resulting band.
:param output_max: the minimum value that will take the resulting band.
:param mean: the value on the given range that will take the `output_max`
value
:param name: the name of the resulting band
:return: the parsed collection in which every image will have an extra
property that results of applying the linear function over every pixel
in the image
:rtype: ee.ImageCollection
"""
if range_min is None:
imin = ee.Number(collection.aggregate_min(property))
else:
imin = ee.Number(range_min)
if range_max is None:
imax = ee.Number(collection.aggregate_max(property))
else:
imax = ee.Number(range_max)
if mean is None:
imean = imax
else:
imean = ee.Number(mean)
if output_max is None:
output_max = imax
else:
output_max = ee.Number(output_max)
if output_min is None:
output_min = imin
else:
output_min = ee.Number(output_min)
a = imax.subtract(imean).abs()
b = imin.subtract(imean).abs()
t = a.max(b)
def to_map(img):
val = ee.Number(img.get(property))
a = val.subtract(imean).abs().multiply(-1)
b = output_max.subtract(output_min)
c = b.divide(t)
d = a.multiply(c)
result = d.add(output_max)
return img.set(name, result)
collection = collection.map(to_map)
return collection
def linearInterpolation(collection, date_property='system:time_start'):
def _addTime(collection):
def wrap(i):
sec = ee.Number(i.get(date_property))
isec = image_module.empty(sec, i.bandNames())
isec_suffix = image_module.addSuffix(isec, '_tmpTime')
m = i.mask()
isec_masked = isec.updateMask(m)
isec_masked_suffix = image_module.addSuffix(isec_masked,
'_maskedTime')
return i.addBands(isec_suffix).addBands(isec_masked_suffix)
return collection.map(wrap)
# get the mask for the final result
finalmask = allMasked(collection)
if date_property != 'system:time_start':
collection = collection.sort(date_property)
# add time bands
collection = _addTime(collection)
filled = fillWithLast(collection, False)
filled_back = fillWithLast(collection, True)
condition = ee.Filter.equals(leftField='system:index',
rightField='system:index')
match1 = ee.Join.saveFirst('filled').apply(
primary=collection,
secondary=filled,
condition=condition
)
match2 = ee.Join.saveFirst('filled_back').apply(
primary=match1,
secondary=filled_back,
condition=condition
)
def wrap(image):
o = ee.Image(image)
bands = o.bandNames()
masked = o.mask().Not()
f = ee.Image(image.get('filled')).unmask()
fb = ee.Image(image.get('filled_back')).unmask()
# filters
filter0 = ee.Filter.stringContains('item', 'maskedTime')
filter1 = ee.Filter.stringContains('item', 'maskedTime').Not()
filter2 = ee.Filter.stringContains('item', 'tmpTime').Not()
# get all deltas (including delta x)
dy = ee.Image(fb.subtract(f)).unmask()
dx_bands = bands.filter(filter0)
# select only delta x for each band
dx = dy.select(dx_bands)
# get original bands
original_bands = bands.filter(filter1).filter(filter2)
# get delta for original bands
delta = dy.select(original_bands)
# now that we have delta x and delta for the original bands
# get the slope
slope = delta.divide(dx).unmask()
# filled original bands
fo = f.select(original_bands)
# filled back original bands
fob = fb.select(original_bands)
# original bands
oo = o.select(original_bands)
# masked original bands
mo = masked.select(original_bands)
t = o.select('.+_tmpTime').subtract(f.select('.+_maskedTime'))
fill = fo.add(slope.multiply(t)).unmask()
fill2 = fob.where(fill, fill)
fill3 = fo.where(fill2, fill2)
final = oo.unmask().where(mo, fill3)
final = image_module.deleteProperties(final)
final = final.select(original_bands) \
.copyProperties(o, exclude=['filled', 'filled_back']) \
.set(date_property, o.get(date_property)) \
.set('system:index', o.get('system:index'))
return ee.Image(final).updateMask(finalmask)
return ee.ImageCollection(match2.map(wrap))
def gaussFunctionBand(collection, band, range_min=None, range_max=None,
mean=0, output_min=None, output_max=1, std=None,
stretch=1, name='gauss'):
""" Compute a Guass function using a specified band over an
ImageCollection. See: https://en.wikipedia.org/wiki/Gaussian_function
:param band: the name of the band to use
:type band: str
:param range_min: the minimum pixel value in the parsed band. If None, it
will be computed
:param range_max: the maximum pixel value in the parsed band. If None, it
will be computed
:param mean: the position of the center of the peak. Defaults to 0
:type mean: int or float
:param std: the standard deviation value. Defaults to range/4
:type std: int or float
:param output_max: height of the curve's peak
:type output_max: int or float
:param output_min: the desired minimum of the curve
:type output_min: int or float
:param stretch: a stretching value. As bigger as stretch
:type stretch: int or float
:param name: the name of the resulting band
:return: the parsed collection in which every image will have an extra band
that results of applying the gauss function over every pixel in the
image
:rtype: ee.ImageCollection
"""
if range_min is None:
range_min = ee.Image(collection.min())
else:
range_min = castImage(range_min)
if range_max is None:
range_max = ee.Image(collection.max())
else:
range_max = castImage(range_max)
def to_map(img):
result = image_module.gaussFunction(img, band,
range_min=range_min,
range_max=range_max,
mean=mean, std=std,
output_min=output_min,
output_max=output_max,
stretch=stretch,
name=name)
return img.addBands(result)
collection = collection.map(to_map)
return collection
def gaussFunctionProperty(collection, property, range_min=None,
range_max=None, mean=0, output_min=None,
output_max=1, std=None, stretch=1,
name='GAUSS'):
""" Compute a Guass function using a specified property over an
ImageCollection. See: https://en.wikipedia.org/wiki/Gaussian_function
:param collection:
:type collection: ee.ImageCollection
:param property: the name of the property to use
:type property: str
:param range_min: the minimum pixel value in the parsed band. If None, it
will be computed
:param range_max: the maximum pixel value in the parsed band. If None, it
will be computed
:param mean: the position of the center of the peak. Defaults to 0
:type mean: int or float
:param std: the standard deviation value. Defaults to range/4
:type std: int or float
:param output_max: height of the curve's peak
:type output_max: int or float
:param output_min: the desired minimum of the curve
:type output_min: int or float
:param stretch: a stretching value. As bigger as stretch
:type stretch: int or float
:param name: the name of the resulting property
:return: the parsed collection in which every image will have an extra
property that results of applying the linear function over every pixel
in the image
:rtype: ee.ImageCollection
"""
if range_min is None:
range_min = ee.Number(collection.aggregate_min(property))
else:
range_min = ee.Number(range_min)
if range_max is None:
range_max = ee.Number(collection.aggregate_max(property))
else:
range_max = ee.Number(range_max)
mean = ee.Number(mean)
output_max = ee.Number(output_max)
if std is None:
std = range_max.subtract(range_min).divide(4)
else:
std = ee.Number(std)
stretch = ee.Number(stretch)
def to_map(img):
def compute_gauss(value):
a = value.subtract(mean).pow(2)
b = std.pow(2).multiply(-2)
c = a.divide(b).multiply(stretch)
d = c.exp()
return d.multiply(output_max)
no_parametrized = compute_gauss(ee.Number(img.get(property)))
if output_min is None:
return img.set(name, no_parametrized)
else:
min_result = compute_gauss(range_min)
max_result = compute_gauss(range_max)
min_result_final = min_result.min(max_result)
e = no_parametrized.subtract(min_result_final)
f = output_max.subtract(min_result_final)
g = output_max.subtract(output_min)
parametrized = e.divide(f).multiply(g).add(output_min)
return img.set(name, parametrized)
collection = collection.map(to_map)
return collection
def normalDistributionProperty(collection, property, mean=None, std=None,
name='NORMAL_DISTRIBUTION'):
""" Compute a normal distribution using a specified property, over an
ImageCollection. For more see:
https://en.wikipedia.org/wiki/Normal_distribution
:param property: the name of the property to use
:type property: str
:param mean: the mean value. If None it will be computed from the source.
defaults to None.
:type mean: float
:param std: the standard deviation value. If None it will be computed from
the source. Defaults to None.
:type std: float
"""
if mean is None:
imean = ee.Number(collection.aggregate_mean(property))
else:
imean = ee.Number(mean)
if std is None:
istd = ee.Number(collection.aggregate_total_sd(property))
else:
istd = ee.Number(std)
imax = ee.Number(1)\
.divide(istd.multiply(ee.Number(2).multiply(math.pi).sqrt()))
return gaussFunctionProperty(collection, property, mean=imean,
output_max=imax, std=istd, name=name)
def normalDistributionBand(collection, band, mean=None, std=None,
name='normal_distribution'):
""" Compute a normal distribution using a specified band, over an
ImageCollection. For more see:
https://en.wikipedia.org/wiki/Normal_distribution
:param band: the name of the property to use
:type band: str
:param mean: the mean value. If None it will be computed from the source.
defaults to None.
:type mean: float
:param std: the standard deviation value. If None it will be computed from
the source. Defaults to None.
:type std: float
"""
if mean is None:
imean = ee.Image(collection.mean())
else:
imean = ee.Image.constant(mean)
if std is None:
istd = ee.Image(collection.reduce(ee.Reducer.stdDev()))
else:
istd = ee.Image.constant(std)
ipi = ee.Image.constant(math.pi)
imax = ee.Image(1) \
.divide(istd.multiply(ee.Image.constant(2).multiply(ipi).sqrt()))
return gaussFunctionBand(collection, band, mean=imean,
output_max=imax, std=istd, name=name)
def maskedSize(collection):
""" return an image with the percentage of masked pixels. 100% means all
pixels are masked """
mask = collection.map(lambda i: i.mask().Not())
def wrap(i):
onemore = i.add(1)
return onemore.divide(onemore)
total = mask.map(wrap)
masksum = mask.sum()
totalsum = total.sum()
return masksum.divide(totalsum).multiply(100).toInt()
def area_under_curve(collection, band, x_property=None, name='area_under'):
""" Compute the area under the curve taking the x axis from an image
property. If not specified, it'll use `system:time_start` """
x_property = x_property or "system:time_start"
max_x = collection.aggregate_max(x_property)
min_x = collection.aggregate_min(x_property)
total_lapsed = ee.Number(max_x).subtract(ee.Number(min_x))
def cummulative(image, cumm):
cumm = ee.List(cumm)
def true(i, c):
c = ee.List(c)
last = ee.Image(c.get(-1))
lapsed = ee.Number(image.get(x_property)).subtract(
ee.Number(last.get(x_property)))
lapsed_percent = lapsed.divide(total_lapsed)
rise = i.select(band).subtract(last.select(band)).divide(2)
toadd = i.select(band).add(rise).multiply(lapsed_percent).rename(
name).toFloat()
return c.add(i.addBands(toadd))
def false(i, c):
toadd = i.addBands(ee.Image(0).rename(name).toFloat())
return c.add(toadd)
return ee.List(ee.Algorithms.If(cumm.size(), true(image, cumm),
false(image, cumm)))
final = ee.List(collection.iterate(cummulative, ee.List([])))
final_ic = ee.ImageCollection.fromImages(final).select(name)
return ee.Image(final_ic.reduce(ee.Reducer.sum()))
def moving_average(collection, back=5, reducer=None,
use_original=True):
""" Compute the moving average over a time series
:param back: number of images back to use for computing the stats
:type back: int
:param reducer: the reducer to apply. Default is ee.Reducer.mean()
:type reducer: ee.Reducer
:param use_original: if True, computes the stats over the last original
values, otherwise, computes the stats over the last computed values
:type use_original: bool
"""
if reducer is None:
reducer = ee.Reducer.mean()
def wrap(i, d):
d = ee.Dictionary(d)
i = ee.Image(i)
original = ee.List(d.get('original'))
stats = ee.List(d.get('stats'))
def true(im, di):
original_true = ee.List(di.get('original'))
stats_true = ee.List(di.get('stats'))
original_true = original_true.add(im)
tocompute = original_true if use_original else stats_true.add(im)
tempcol = ee.ImageCollection.fromImages(tocompute.slice(back * -1))
stats = tempcol.reduce(reducer)
stats = stats.rename(im.bandNames())
stats = ee.Image(stats.copyProperties(im, properties=im.propertyNames()))
return ee.Dictionary({
'original': original_true,
'stats': stats_true.add(stats)
})
def false(im, di):
original2 = ee.List(di.get('original'))
stats2 = ee.List(di.get('stats'))
condition2 = original2.size().gt(0)
def true2(ima, dic):
original_true2 = ee.List(dic.get('original'))
original_true2 = original_true2.add(ima)
stats_true2 = ee.List(dic.get('stats'))
tocompute = original_true2 if use_original else stats_true2.add(ima)
tempcol2 = ee.ImageCollection.fromImages(tocompute)
stats2 = tempcol2.reduce(reducer)
stats2 = stats2.rename(ima.bandNames())
stats2 = ee.Image(stats2.copyProperties(ima, properties=ima.propertyNames()))
return ee.Dictionary({
'original': original_true2,
'stats': stats_true2.add(stats2)
})
def false2(ima, dic):
# first element
original_false2 = ee.List(dic.get('original'))
stats_false2 = ee.List(dic.get('stats'))
return ee.Dictionary({
'original': original_false2.add(ima),
'stats': stats_false2.add(ima)
})
return ee.Dictionary(
ee.Algorithms.If(condition2, true2(im, di), false2(im, di)))
condition = original.size().gte(back)
return ee.Dictionary(
ee.Algorithms.If(condition, true(i, d), false(i, d)))
final = ee.Dictionary(
collection.iterate(wrap, ee.Dictionary({'original': [], 'stats': []})))
return ee.ImageCollection.fromImages(ee.List(final.get('stats')))
def aggregate_array_all(collection):
""" Aggregate array in all images and return a list of dicts """
props = collection.first().propertyNames()
allprops = props.map(lambda p: collection.aggregate_array(p))
transposed = ee_list.transpose(allprops)
return transposed.map(lambda ps: ee.Dictionary.fromLists(props, ps))
| gee-community/gee_tools | geetools/tools/imagecollection.py | Python | mit | 39,458 |
# -*- coding: utf-8 -*-
"""
Tests for the backported class:`str` class.
"""
from __future__ import absolute_import, unicode_literals, print_function
from future.builtins import *
from future import utils
from future.tests.base import unittest, expectedFailurePY2
import os
TEST_UNICODE_STR = u'ℝεα∂@ßʟ℮ ☂ℯṧт υηḯ¢☺ḓ℮'
class TestStr(unittest.TestCase):
def test_str(self):
self.assertFalse(str is bytes)
self.assertEqual(str('blah'), u'blah') # u'' prefix: Py3.3 and Py2 only
self.assertEqual(str(b'1234'), "b'1234'")
def test_bool_str(self):
s1 = str(u'abc')
s2 = u'abc'
s3 = str(u'')
s4 = u''
self.assertEqual(bool(s1), bool(s2))
self.assertEqual(bool(s3), bool(s4))
def test_os_path_join(self):
"""
Issue #15: can't os.path.join(u'abc', str(u'def'))
"""
self.assertEqual(os.path.join(u'abc', str(u'def')),
u'abc{0}def'.format(os.sep))
def test_str_encode_utf8(self):
b = str(TEST_UNICODE_STR).encode('utf-8')
self.assertTrue(isinstance(b, bytes))
self.assertFalse(isinstance(b, str))
s = b.decode('utf-8')
self.assertTrue(isinstance(s, str))
self.assertEqual(s, TEST_UNICODE_STR)
def test_str_encode_cp1251(self):
b1 = b'\xcd\xeb\xff'
s1 = str(b1, 'cp1251')
self.assertEqual(s1, u'Нля')
b2 = bytes(b'\xcd\xeb\xff')
s2 = str(b2, 'cp1251')
self.assertEqual(s2, u'Нля')
def test_str_encode_decode_with_py2_str_arg(self):
# Try passing a standard Py2 string (as if unicode_literals weren't imported)
b = str(TEST_UNICODE_STR).encode(utils.bytes_to_native_str(b'utf-8'))
self.assertTrue(isinstance(b, bytes))
self.assertFalse(isinstance(b, str))
s = b.decode(utils.bytes_to_native_str(b'utf-8'))
self.assertTrue(isinstance(s, str))
self.assertEqual(s, TEST_UNICODE_STR)
def test_str_encode_decode_big5(self):
a = u'Unicode string: \u5b54\u5b50'
self.assertEqual(str(a), a.encode('big5').decode('big5'))
def test_str_empty(self):
"""
str() -> u''
"""
self.assertEqual(str(), u'')
def test_str_iterable_of_ints(self):
self.assertEqual(str([65, 66, 67]), '[65, 66, 67]')
self.assertNotEqual(str([65, 66, 67]), 'ABC')
def test_str_str(self):
self.assertEqual(str('ABC'), u'ABC')
self.assertEqual(str('ABC'), 'ABC')
def test_str_is_str(self):
s = str(u'ABC')
self.assertTrue(str(s) is s)
self.assertEqual(repr(str(s)), "'ABC'")
def test_str_fromhex(self):
self.assertFalse(hasattr(str, 'fromhex'))
def test_str_hasattr_decode(self):
"""
This test tests whether hasattr(s, 'decode') is False, like it is on Py3.
Sometimes code (such as http.client in Py3.3) checks hasattr(mystring,
'decode') to determine if a string-like thing needs encoding. It would
be nice to have this return False so the string can be treated on Py2
like a Py3 string.
"""
s = str(u'abcd')
self.assertFalse(hasattr(s, 'decode'))
self.assertTrue(hasattr(s, 'encode'))
def test_isinstance_str(self):
self.assertTrue(isinstance(str('blah'), str))
def test_isinstance_str_subclass(self):
"""
Issue #89
"""
value = str(u'abc')
class Magic(str):
pass
self.assertTrue(isinstance(value, str))
self.assertFalse(isinstance(value, Magic))
def test_str_getitem(self):
s = str('ABCD')
self.assertNotEqual(s[0], 65)
self.assertEqual(s[0], 'A')
self.assertEqual(s[-1], 'D')
self.assertEqual(s[0:1], 'A')
self.assertEqual(s[:], u'ABCD')
@unittest.expectedFailure
def test_u_literal_creates_newstr_object(self):
"""
It would nice if the u'' or '' literal syntax could be coaxed
into producing our new str objects somehow ...
"""
s = u'ABCD'
self.assertTrue(isinstance(s, str))
self.assertFalse(repr(b).startswith('b'))
def test_repr(self):
s = str('ABCD')
self.assertFalse(repr(s).startswith('b'))
def test_str(self):
b = str('ABCD')
self.assertTrue(str(b), 'ABCD')
def test_str_setitem(self):
s = 'ABCD'
with self.assertRaises(TypeError):
s[0] = b'B'
def test_str_iteration(self):
s = str('ABCD')
for item in s:
self.assertFalse(isinstance(item, int))
self.assertTrue(isinstance(item, str))
self.assertNotEqual(list(s), [65, 66, 67, 68])
self.assertEqual(list(s), ['A', 'B', 'C', 'D'])
def test_str_plus_bytes(self):
s = str(u'ABCD')
b = b'EFGH'
# We allow this now:
# with self.assertRaises(TypeError):
# s + b
# str objects don't have an __radd__ method, so the following
# does not raise a TypeError. Is this a problem?
# with self.assertRaises(TypeError):
# b + s
# Now with our custom bytes object:
b2 = bytes(b'EFGH')
with self.assertRaises(TypeError):
s + b2
with self.assertRaises(TypeError):
b2 + s
def test_str_plus_str(self):
s1 = str('ABCD')
s2 = s1 + s1
self.assertEqual(s2, u'ABCDABCD')
self.assertTrue(isinstance(s2, str))
s3 = s1 + u'ZYXW'
self.assertEqual(s3, 'ABCDZYXW')
self.assertTrue(isinstance(s3, str))
s4 = 'ZYXW' + s1
self.assertEqual(s4, 'ZYXWABCD')
self.assertTrue(isinstance(s4, str))
def test_str_join_str(self):
s = str(' * ')
strings = ['AB', 'EFGH', 'IJKL', TEST_UNICODE_STR]
result = s.join(strings)
self.assertEqual(result, 'AB * EFGH * IJKL * ' + TEST_UNICODE_STR)
self.assertTrue(isinstance(result, str))
def test_str_join_bytes(self):
s = str('ABCD')
byte_strings1 = [b'EFGH', u'IJKL']
# We allow this on Python 2 for compatibility with old libraries:
if utils.PY2:
self.assertEqual(s.join(byte_strings1), u'EFGHABCDIJKL')
byte_strings2 = [bytes(b'EFGH'), u'IJKL']
with self.assertRaises(TypeError):
s.join(byte_strings2)
def test_str_join_staticmethod(self):
"""
Issue #33
"""
c = str.join('-', ['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_join_staticmethod_workaround_1(self):
"""
Issue #33
"""
c = str('-').join(['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_join_staticmethod_workaround_2(self):
"""
Issue #33
"""
c = str.join(str('-'), ['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_replace(self):
s = str('ABCD')
c = s.replace('A', 'F')
self.assertEqual(c, 'FBCD')
self.assertTrue(isinstance(c, str))
with self.assertRaises(TypeError):
s.replace(bytes(b'A'), u'F')
with self.assertRaises(TypeError):
s.replace(u'A', bytes(b'F'))
def test_str_partition(self):
s1 = str('ABCD')
parts = s1.partition('B')
self.assertEqual(parts, ('A', 'B', 'CD'))
self.assertTrue(all([isinstance(p, str) for p in parts]))
s2 = str('ABCDABCD')
parts = s2.partition('B')
self.assertEqual(parts, ('A', 'B', 'CDABCD'))
def test_str_rpartition(self):
s2 = str('ABCDABCD')
parts = s2.rpartition('B')
self.assertEqual(parts, ('ABCDA', 'B', 'CD'))
self.assertTrue(all([isinstance(p, str) for p in parts]))
def test_str_contains_something(self):
s = str('ABCD')
self.assertTrue('A' in s)
if utils.PY2:
self.assertTrue(b'A' in s)
with self.assertRaises(TypeError):
bytes(b'A') in s
with self.assertRaises(TypeError):
65 in s # unlike bytes
self.assertTrue('AB' in s)
self.assertFalse(str([65, 66]) in s) # unlike bytes
self.assertFalse('AC' in s)
self.assertFalse('Z' in s)
def test_str_index(self):
s = str('ABCD')
self.assertEqual(s.index('B'), 1)
with self.assertRaises(TypeError):
s.index(67)
with self.assertRaises(TypeError):
s.index(bytes(b'C'))
def test_startswith(self):
s = str('abcd')
self.assertTrue(s.startswith('a'))
self.assertTrue(s.startswith(('a', 'd')))
self.assertTrue(s.startswith(str('ab')))
if utils.PY2:
# We allow this, because e.g. Python 2 os.path.join concatenates
# its arg with a byte-string '/' indiscriminately.
self.assertFalse(s.startswith(b'A'))
self.assertTrue(s.startswith(b'a'))
with self.assertRaises(TypeError) as cm:
self.assertFalse(s.startswith(bytes(b'A')))
with self.assertRaises(TypeError) as cm:
s.startswith((bytes(b'A'), bytes(b'B')))
with self.assertRaises(TypeError) as cm:
s.startswith(65)
def test_join(self):
sep = str('-')
self.assertEqual(sep.join('abcd'), 'a-b-c-d')
if utils.PY2:
sep.join(b'abcd')
with self.assertRaises(TypeError) as cm:
sep.join(bytes(b'abcd'))
def test_endswith(self):
s = str('abcd')
self.assertTrue(s.endswith('d'))
self.assertTrue(s.endswith(('b', 'd')))
self.assertTrue(s.endswith(str('cd')))
self.assertFalse(s.endswith(('A', 'B')))
if utils.PY2:
self.assertFalse(s.endswith(b'D'))
self.assertTrue(s.endswith((b'D', b'd')))
with self.assertRaises(TypeError) as cm:
s.endswith(65)
with self.assertRaises(TypeError) as cm:
s.endswith((bytes(b'D'),))
def test_split(self):
s = str('ABCD')
self.assertEqual(s.split('B'), ['A', 'CD'])
if utils.PY2:
self.assertEqual(s.split(b'B'), ['A', 'CD'])
with self.assertRaises(TypeError) as cm:
s.split(bytes(b'B'))
def test_rsplit(self):
s = str('ABCD')
self.assertEqual(s.rsplit('B'), ['A', 'CD'])
if utils.PY2:
self.assertEqual(s.rsplit(b'B'), ['A', 'CD'])
with self.assertRaises(TypeError) as cm:
s.rsplit(bytes(b'B'))
def test_eq_bytes(self):
s = str('ABCD')
b = bytes(b'ABCD')
self.assertNotEqual(s, b)
self.assertNotEqual(str(''), bytes(b''))
native_s = 'ABCD'
native_b = b'ABCD'
self.assertFalse(b == native_s)
self.assertTrue(b != native_s)
# Fails on Py2:
# self.assertNotEqual(native_s, b)
# with no obvious way to change this.
# For backward compatibility with broken string-handling code in
# Py2 libraries, we allow the following:
if utils.PY2:
self.assertTrue(native_b == s)
self.assertFalse(s != native_b)
def test_eq(self):
s = str('ABCD')
self.assertEqual('ABCD', s)
self.assertEqual(s, 'ABCD')
self.assertEqual(s, s)
self.assertTrue(u'ABCD' == s)
if utils.PY2:
self.assertTrue(b'ABCD' == s)
else:
self.assertFalse(b'ABCD' == s)
self.assertFalse(bytes(b'ABCD') == s)
# We want to ensure comparison against unknown types return
# NotImplemented so that the interpreter can rerun the test with the
# other class. We expect the operator to return False if both return
# NotImplemented.
class OurCustomString(object):
def __init__(self, string):
self.string = string
def __eq__(self, other):
return NotImplemented
our_str = OurCustomString("foobar")
new_str = str("foobar")
self.assertFalse(our_str == new_str)
self.assertFalse(new_str == our_str)
self.assertIs(new_str.__eq__(our_str), NotImplemented)
self.assertIs(our_str.__eq__(new_str), NotImplemented)
def test_hash(self):
s = str('ABCD')
self.assertIsInstance(hash(s),int)
def test_ne(self):
s = str('ABCD')
self.assertNotEqual('A', s)
self.assertNotEqual(s, 'A')
self.assertNotEqual(s, 5)
self.assertNotEqual(2.7, s)
self.assertNotEqual(s, ['A', 'B', 'C', 'D'])
if utils.PY2:
self.assertFalse(b'ABCD' != s)
else:
self.assertTrue(b'ABCD' != s)
self.assertTrue(bytes(b'ABCD') != s)
def test_cmp(self):
s = str(u'ABC')
with self.assertRaises(TypeError):
s > 3
with self.assertRaises(TypeError):
s < 1000
with self.assertRaises(TypeError):
s <= 3
with self.assertRaises(TypeError):
s >= int(3)
with self.assertRaises(TypeError):
s < 3.3
with self.assertRaises(TypeError):
s > (3.3 + 3j)
with self.assertRaises(TypeError):
s >= (1, 2)
with self.assertRaises(TypeError):
s <= [1, 2]
def test_mul(self):
s = str(u'ABC')
c = s * 4
self.assertTrue(isinstance(c, str))
self.assertEqual(c, u'ABCABCABCABC')
d = s * int(4)
self.assertTrue(isinstance(d, str))
self.assertEqual(d, u'ABCABCABCABC')
if utils.PY2:
e = s * long(4)
self.assertTrue(isinstance(e, str))
self.assertEqual(e, u'ABCABCABCABC')
with self.assertRaises(TypeError):
s * 3.3
with self.assertRaises(TypeError):
s * (3.3 + 3j)
def test_rmul(self):
s = str(u'XYZ')
c = 3 * s
self.assertTrue(isinstance(c, str))
self.assertEqual(c, u'XYZXYZXYZ')
d = s * int(3)
self.assertTrue(isinstance(d, str))
self.assertEqual(d, u'XYZXYZXYZ')
if utils.PY2:
e = long(3) * s
self.assertTrue(isinstance(e, str))
self.assertEqual(e, u'XYZXYZXYZ')
with self.assertRaises(TypeError):
3.3 * s
with self.assertRaises(TypeError):
(3.3 + 3j) * s
@unittest.skip('Fails on Python <= 2.7.6 due to string subclass slicing bug')
def test_slice(self):
"""
Do slices return newstr objects?
"""
s = str(u'abcd')
self.assertEqual(s[:2], u'ab')
self.assertEqual(type(s[:2]), str)
self.assertEqual(s[-2:], u'cd')
self.assertEqual(type(s[-2:]), str)
@unittest.skip('Fails on Python <= 2.7.6 due to string subclass slicing bug')
def test_subclassing(self):
"""
Can newstr be subclassed and do str methods then return instances of
the same class? (This is the Py3 behaviour).
"""
class SubClass(str):
pass
s = SubClass(u'abcd')
self.assertEqual(type(s), SubClass)
self.assertEqual(type(s + s), str)
self.assertEqual(type(s[0]), str)
self.assertEqual(type(s[:2]), str)
self.assertEqual(type(s.join([u'_', u'_', u'_'])), str)
def test_subclassing_2(self):
"""
Tests __new__ method in subclasses. Fails in versions <= 0.11.4
"""
class SubClass(str):
def __new__(cls, *args, **kwargs):
self = str.__new__(cls, *args, **kwargs)
assert type(self) == SubClass
return self
s = SubClass(u'abcd')
self.assertTrue(True)
# From Python 3.3: test_unicode.py
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
type2test = str
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
@expectedFailurePY2
def test_multiple_inheritance(self):
"""
Issue #96 (for newstr instead of newobject)
"""
if utils.PY2:
from collections import Container
else:
from collections.abc import Container
class Base(str):
pass
class Foo(Base, Container):
def __contains__(self, item):
return False
@expectedFailurePY2
def test_with_metaclass_and_str(self):
"""
Issue #91 (for newstr instead of newobject)
"""
from future.utils import with_metaclass
class MetaClass(type):
pass
class TestClass(with_metaclass(MetaClass, str)):
pass
def test_surrogateescape_encoding(self):
"""
Tests whether surrogateescape encoding works correctly.
"""
pairs = [(u'\udcc3', b'\xc3'),
(u'\udcff', b'\xff')]
for (s, b) in pairs:
encoded = str(s).encode('utf-8', 'surrogateescape')
self.assertEqual(b, encoded)
self.assertTrue(isinstance(encoded, bytes))
self.assertEqual(s, encoded.decode('utf-8', 'surrogateescape'))
if __name__ == '__main__':
unittest.main()
| PythonCharmers/python-future | tests/test_future/test_str.py | Python | mit | 19,879 |
import re
from PyQt4.QtGui import QSyntaxHighlighter, QColor, QFont, QTextCharFormat
from PyQt4.QtCore import Qt
from .mdx_strkundr import DEL_RE, INS_RE, STRONG_RE, EMPH_RE
class MikiHighlighter(QSyntaxHighlighter):
WORDS = r'(?iu)[\w\']+'
def __init__(self, parent=None):
super(MikiHighlighter, self).__init__(parent)
baseFontSize = 12
NUM = 15
self.patterns = []
regexp = [0] * NUM
font = [0]*NUM
color = [0]*NUM
# 0: html tags - <pre></pre>
regexp[0] = '</?[^>]+>'
font[0] = QFont("monospace", baseFontSize, -1)
color[0] = QColor("#A40000")
# 1: h1 - #
regexp[1] = '^#[^#]+'
color[1] = QColor("#4E9A06")
font[1] = QFont("decorative", 2*baseFontSize, QFont.Bold)
# 2: h2 - ##
regexp[2] = '^##[^#]+'
color[2] = QColor("#4E9A06")
font[2] = QFont("serif", 5.0/3*baseFontSize, QFont.Bold)
# 3: h3 - ###
regexp[3] = '^###[^#]+'
color[3] = QColor("#4E9A06")
font[3] = QFont("serif", 4.0/3*baseFontSize, QFont.Bold)
# 4: h4 and more - ####
regexp[4] = '^####.+'
color[4] = QColor("#4E9A06")
font[4] = QFont("serif", baseFontSize, QFont.Bold)
# 5: html symbols - >
regexp[5] = '&[^; ].+;'
color[5] = QColor("#A40000")
font[5] = QFont("monospace", baseFontSize, -1)
# 6: html comments - <!-- -->
regexp[6] = '<!--.+-->'
color[6] = QColor("#888A85")
font[6] = QFont(None, baseFontSize, -1)
# 7: delete - ~~delete~~
regexp[7] = DEL_RE
color[7] = QColor("#888A85")
font[7] = QFont(None, baseFontSize, -1)
# 8: insert - __insert__
regexp[8] = INS_RE
font[8] = QFont(None, baseFontSize, -1)
font[8].setUnderline(True)
# 9: strong - **strong**
regexp[9] = STRONG_RE
color[9] = QColor("#F57900")
font[9] = QFont(None, baseFontSize, QFont.Bold)
# 10: emphasis - //emphasis//
regexp[10] = EMPH_RE
color[10] = QColor("#F57900")
font[10] = QFont(None, baseFontSize, -1, True)
# 11: links - (links) after [] or links after []:
regexp[11] = r'(?<=(\]\())[^\(\)]*(?=\))'
font[11] = QFont(None, baseFontSize, -1, True)
font[11].setUnderline(True)
#.setUnderlineColor("#204A87")
# 12: link/image references - [] or ![]
regexp[12] = r'!?\[[^\[\]]*\]'
color[12] = QColor("#204A87")
font[12] = QFont(None, baseFontSize, -1)
# 13: blockquotes and lists - > or - or *
regexp[13] = r'(^>+)|(^- )|(^\* )'
color[13] = QColor("#F57900")
font[13] = QFont(None, baseFontSize, -1)
# 14: fence - ``` or ~~~
regexp[14] = '^(?:~{3,}|`{3,}).*$'
color[14] = QColor("#F57900")
font[14] = QFont(None, baseFontSize, QFont.Bold)
for i in range(NUM):
p = re.compile(regexp[i])
f = QTextCharFormat()
if font[i] != 0:
f.setFont(font[i])
if color[i] != 0:
f.setForeground(color[i])
self.patterns.append((p, f))
self.speller = parent.speller
fenced_font = QFont("monospace", baseFontSize, -1)
self.fenced_block = re.compile("^(?:~{3,}|`{3,}).*$")
self.fenced_format = QTextCharFormat()
self.fenced_format.setFont(fenced_font)
def highlightSpellcheck(self, text):
for word_object in re.finditer(self.WORDS, str(text)):
if not word_object.group():
# don't bother with empty words
continue
if self.speller and not self.speller.check(word_object.group()):
current_format = self.format(word_object.start())
current_format.setUnderlineColor(Qt.red)
current_format.setUnderlineStyle(QTextCharFormat.SpellCheckUnderline)
self.setFormat(word_object.start(),
word_object.end() - word_object.start(), current_format)
def highlightBlock(self, text):
# highlight patterns
for i in range(0, len(self.patterns)):
p = self.patterns[i]
for match in p[0].finditer(text):
self.setFormat(
match.start(), match.end() - match.start(), p[1])
# escape highlights in fenced_block
m = self.fenced_block.match(text)
self.setCurrentBlockState(0)
if self.previousBlockState() != 1:
if m:
self.setCurrentBlockState(1)
else:
if m:
self.setCurrentBlockState(0)
else:
self.setCurrentBlockState(1)
self.setFormat(0, len(text), self.fenced_format)
self.highlightSpellcheck(text)
| ckolumbus/mikidown | mikidown/highlighter.py | Python | mit | 4,883 |
#!/usr/bin/env python
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
while 1:
data = conn.recv(BUFFER_SIZE)
if not data: break
print "received data:", data
conn.send(data) # echo
conn.close()
| sean666888/The-Bomb | s.py | Python | mit | 430 |
from django.test import TestCase
from httmock import HTTMock
from jenkinsapi.jenkins import Jenkins
from jenkins.models import Build, JobType
from .helpers import mock_url
from .factories import BuildFactory, JenkinsServerFactory
class JenkinsServerTest(TestCase):
def test_get_client(self):
"""
JenkinsServer.get_client should return a Jenkins client configured
appropriately.
"""
server = JenkinsServerFactory.create()
mock_request = mock_url(
r"\/api\/python$", "fixture1")
with HTTMock(mock_request):
client = server.get_client()
self.assertIsInstance(client, Jenkins)
class BuildTest(TestCase):
def test_ordering(self):
"""Builds should be ordered in reverse build order by default."""
builds = BuildFactory.create_batch(5)
build_numbers = sorted([x.number for x in builds], reverse=True)
self.assertEqual(
build_numbers,
list(Build.objects.all().values_list("number", flat=True)))
class JobTypeTest(TestCase):
def test_instantiation(self):
"""We can create JobTypes."""
JobType.objects.create(
name="my-test", config_xml="testing xml")
| timrchavez/capomastro | jenkins/tests/test_models.py | Python | mit | 1,239 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.hoverlabel"
_path_str = "scatterternary.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/python-api | packages/python/plotly/plotly/graph_objs/scatterternary/hoverlabel/_font.py | Python | mit | 11,245 |
# -*- coding: utf-8 -*-
""" Sahana Eden Document Library
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3DocumentLibrary",
"doc_image_represent"]
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DocumentLibrary(S3Model):
names = ["doc_entity",
"doc_document",
"doc_image"]
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
add_component = self.add_component
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = Storage(asset_asset=T("Asset"),
irs_ireport=T("Incident Report"),
project_project=T("Project"),
project_activity=T("Project Activity"),
project_task=T("Task"),
hms_hospital=T("Hospital"))
tablename = "doc_entity"
doc_entity = self.super_entity(tablename, "doc_id", entity_types)
# Components
add_component("doc_document", doc_entity=self.super_key(doc_entity))
add_component("doc_image", doc_entity=self.super_key(doc_entity))
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
table = define_table(tablename,
super_link("site_id", "org_site"),
super_link("doc_id", doc_entity),
Field("file", "upload", autodelete=True),
Field("name", length=128,
notnull=True,
# Allow Name to be added onvalidation
requires = IS_NULL_OR(IS_LENGTH(128)),
label=T("Name")),
Field("url", label=T("URL"),
requires = IS_NULL_OR(IS_URL()),
represent = lambda url: \
url and A(url,_href=url) or NONE),
person_id(label=T("Author"),
comment=person_comment(T("Author"),
T("The Author of this Document (optional)"))),
organisation_id(
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
),
s3_date(label = T("Date Published")),
location_id(),
s3_comments(),
#Field("entered", "boolean", label=T("Entered")),
Field("checksum", readable=False, writable=False),
*s3_meta_fields())
# Field configuration
table.file.represent = lambda file, table=table: \
self.doc_file_represent(file, table)
#table.location_id.readable = False
#table.location_id.writable = False
#table.entered.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Entered"),
# T("Has data from this Reference Document been entered into Sahana?")))
# CRUD Strings
ADD_DOCUMENT = T("Add Reference Document")
crud_strings[tablename] = Storage(
title_create = ADD_DOCUMENT,
title_display = T("Document Details"),
title_list = T("Documents"),
title_update = T("Edit Document"),
title_search = T("Search Documents"),
subtitle_create = T("Add New Document"),
label_list_button = T("List Documents"),
label_create_button = ADD_DOCUMENT,
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Search Method?
# Resource Configuration
configure(tablename,
onvalidation=self.document_onvalidation)
# ---------------------------------------------------------------------
# Images
#
# @ToDo: Field to determine which is the default image to use for
# e.g. a Map popup (like the profile picture)
# readable/writable=False except in the cases where-needed
#
doc_image_type_opts = {
1:T("Photograph"),
2:T("Map"),
3:T("Document Scan"),
99:T("other")
}
tablename = "doc_image"
table = define_table(tablename,
super_link("site_id", "org_site"),
super_link("pe_id", "pr_pentity"),
super_link("doc_id", doc_entity),
Field("file", "upload", autodelete=True,
requires = IS_NULL_OR(
IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)
)),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(current.request.folder,
"uploads",
"images")),
Field("name", length=128,
notnull=True,
# Allow Name to be added onvalidation
requires = IS_NULL_OR(IS_LENGTH(128)),
label=T("Name")),
Field("url", label=T("URL"),
requires = IS_NULL_OR(IS_URL())),
Field("type", "integer",
requires = IS_IN_SET(doc_image_type_opts, zero=None),
default = 1,
label = T("Image Type"),
represent = lambda opt: doc_image_type_opts.get(opt, UNKNOWN_OPT)),
person_id(label=T("Author")),
organisation_id(
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
),
location_id(),
s3_date(label = T("Date Taken")),
s3_comments(),
Field("checksum", readable=False, writable=False),
*s3_meta_fields())
# Field configuration
table.file.represent = doc_image_represent
# CRUD Strings
ADD_IMAGE = T("Add Photo")
crud_strings[tablename] = Storage(
title_create = ADD_IMAGE,
title_display = T("Photo Details"),
title_list = T("Photos"),
title_update = T("Edit Photo"),
title_search = T("Search Photos"),
subtitle_create = T("Add New Photo"),
label_list_button = T("List Photos"),
label_create_button = ADD_IMAGE,
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Search Method
# Resource Configuration
configure(tablename,
onvalidation=lambda form: \
self.document_onvalidation(form, document=False))
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return Storage()
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(file, table):
""" File representation """
if file:
return A(table.file.retrieve(file)[0],
_href=URL(c="default", f="download", args=[file]))
else:
return current.messages.NONE
# -------------------------------------------------------------------------
@staticmethod
def document_represent(id):
""" Foreign key representation """
if not id:
return current.messages.NONE
db = current.db
table = db.doc_document
record = db(table.id == id).select(table.name,
limitby=(0, 1)).first()
try:
return A(record.name,
_href = URL(c="doc", f="document", args=[id], extension=""),
_target = "blank")
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
import cgi
T = current.T
db = current.db
vars = form.vars
if document:
tablename = "doc_document"
msg = T("Either file upload or document URL required.")
else:
tablename = "doc_image"
msg = T("Either file upload or image URL required.")
table = db[tablename]
doc = vars.file
url = vars.url
if not hasattr(doc, "file"):
id = current.request.post_vars.id
if id:
record = db(table.id == id).select(table.file,
limitby=(0, 1)).first()
if record:
doc = record.file
if not hasattr(doc, "file") and not doc and not url:
form.errors.file = msg
form.errors.url = msg
# Do a checksum on the file to see if it's a duplicate
if isinstance(doc, cgi.FieldStorage) and doc.filename:
f = doc.file
vars.checksum = doc_checksum(f.read())
f.seek(0)
if not vars.name:
vars.name = doc.filename
if vars.checksum is not None:
# Duplicate allowed if original version is deleted
query = ((table.checksum == vars.checksum) & \
(table.deleted == False))
result = db(query).select(table.name,
limitby=(0, 1)).first()
if result:
doc_name = result.name
form.errors["file"] = "%s %s" % \
(T("This file already exists on the server as"), doc_name)
return
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
@param filename: name of the image file
"""
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#import uuid
#anchor = "zoom-media-image-%s" % uuid.uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# END =========================================================================
| anubhav929/eden | modules/eden/doc.py | Python | mit | 14,543 |
from tornado import websocket, web, ioloop, autoreload
import json
import sys
new_msg=''
old_msg=''
def send_response():
print 'msg'
if new_msg<>old_msg:
print new_msg
class SocketHandler(websocket.WebSocketHandler):
''' websocket handler '''
def open(self):
''' ran once an open ws connection is made'''
self.send('Opened')
socket=self
def send(self,message):
self.write_message(message)
def on_close(self):
''' on close event, triggered once a connection is closed'''
self.send('Closed')
app = web.Application([
(r'/ws', SocketHandler),
])
if __name__ == '__main__':
app.listen(8888)
autoreload.add_reload_hook(send_response)
autoreload.start()
new_msg='boo'
ioloop.IOLoop.instance().start()
| mehmoodz/Influenzr | publishers/resources/simple_ws.py | Python | mit | 818 |
from Direction import Direction
class Position:
def __init__(self, direction=Direction.north):
self.x_coord = 0
self.y_coord = 0
self.direction = direction
def turn(self, turn):
self.direction = self.direction.turn(turn)
def walk_forward(self, steps):
self.x_coord += steps * Direction.get_i_component(self.direction)
self.y_coord += steps * Direction.get_j_component(self.direction)
def location(self):
return self.x_coord, self.y_coord | tmct/adventOfCode2016 | problems/1/Position.py | Python | mit | 514 |
'''
Created on Aug 21, 2014
@author: Dean4Devil
'''
import mysql.connector
from pycore.sql_util import MySQLHelper
class SubmitTree():
'A tree of all submits to that standard. I.e. OpenDriver is a tree, OpenDriver 0.2 is a submit.'
def __init__(self, identifier):
'Create a new Tree in memory.'
self.sql_helper = MySQLHelper("oetf_submits")
if self.sql_helper.check_exists(identifier):
self.tree = self.sql_helper.query_data(identifier, "*", delimiter="", order="id", row_num=0)
else:
# First submit in that tree. Table does not exist yet.
table = (
"CREATE TABLE IF NOT EXISTS `{}` (".format(identifier),
"`id` int(11) NOT NULL AUTO_INCREMENT,",
"`version` varchar(32) COLLATE utf8mb4_bin NOT NULL",
"`comment` text COLLATE utf8mb4_bin NOT NULL,",
"`content` text COLLATE utf8mb4_bin NOT NULL,",
"`published_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP",
"PRIMARY KEY (`id`)",
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1 ;")
con = self.sql_helper.return_con()
cur = con.cursor()
cur.execute(table)
self.tree = []
cur.close()
con.close()
class Submit():
'Submit element'
| OpenEngeneeringTaskForce/OETFDev | pycore/submit_util.py | Python | mit | 1,401 |
import matplotlib
import matplotlib.pyplot as plt
decisionNoe = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNoe(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType,
arrowprops=arrow_args)
def createPlot():
fig = plt.figure(1, facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111, frameon=False)
plotNoe('Decision Node', (0.5, 0.1), (0.1, 0.5), decisionNoe)
plotNoe('Lea fNode', (0.8, 0.1), (0.3, 0.8), leafNode)
plt.show()
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees = [{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}]
return listOfTrees[i]
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0]
cntrPt = (plotTree.xoff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yoff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNoe(firstStr, cntrPt, parentPt, decisionNoe)
secondDict = myTree[firstStr]
plotTree.yoff = plotTree.yoff - 1.0 / plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key))
else:
plotTree.xoff = plotTree.xoff + 1.0 / plotTree.totalW
plotNoe(secondDict[key], (plotTree.xoff, plotTree.yoff), cntrPt, leafNode)
plotMidText((plotTree.xoff, plotTree.yoff), cntrPt, str(key))
plotTree.yoff = plotTree.yoff + 1.0 / plotTree.totalD
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xoff = - 0.5 / plotTree.totalW
plotTree.yoff = 1.0
plotTree(inTree, (0.5, 1.0), '')
plt.show()
| qiyuangong/Machine_Learning_in_Action_QYG | 03_ID3/treePlotter.py | Python | mit | 3,229 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Unitecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Unitecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| unitecoin-org/Unitecoin | contrib/bitrpc/bitrpc.py | Python | mit | 7,840 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module will define useful objects for conditional analysis
"""
import collections
import numpy as np
import pandas as pd
from tunacell.base.datatools import Coordinates
# define an object to handle heterogeneous types of time series
class TimeSeries(object):
"""Object that decorates the data with other useful attributes.
Parameters
----------
ts : :class:`Coordinates` instance, or 2d structured ndarray
better to use Coordinates, so that names can be carried along
ids : sequence of cell identifiers from which data was collected
index_cycles : sequence of couples (index_first, index_last)
that delimit data corresponding to cell id, must be same length as ids
slices : sequence of slice objects
each item can be used to slice the entire table
time_bounds : sequence of couples of floats
for each cell, first element is the lower bound of cell cycle, the
second element is the upper bound of cell cycle, must be same length
as ids
select_ids : sequences of True/False values corresponding whether or
not to include data from cell id in timeseries, must be same length as
ids
"""
def __init__(self, ts=[], ids=[], index_cycles=[], slices=None,
time_bounds=[], select_ids={}, container_label=None,
experiment_label=None):
# ts is a Coordinates instance
self.container_label = container_label
self.experiment_label = experiment_label
if isinstance(ts, Coordinates):
self._timeseries = ts
# ts is a numpy array (structured if possible)
elif isinstance(ts, np.ndarray):
# convert structured arrays to 2d ndarrays
if ts.dtype.names is not None:
_arr = ts.view((float, len(ts.dtype.names)))
_x_name, _y_name = ts.dtype.names[:2] # take only first 2 cols
else:
_arr = ts
_x_name, _y_name = 'x', 'y'
_x = _arr[:, 0]
_y = _arr[:, 1]
self._timeseries = Coordinates(_x, _y,
x_name=_x_name, y_name=_y_name)
# ... list of couples
elif isinstance(ts, collections.Iterable):
_ts = list(ts)
_x, _y = map(np.array, zip(*_ts))
self._timeseries = Coordinates(_x, _y)
self.time_bounds = time_bounds
self.slices = []
if index_cycles: # array indices corresponding to (first, last) frame for each cell
self.index_cycles = index_cycles
slices = []
for item in index_cycles:
if item is None:
slices.append(None)
# indices are reported as a single None
# when no data is reported for a given cell
else:
i, j = item
if j is not None:
slices.append(slice(i, j+1))
else:
slices.append(slice(i, None))
self.slices = slices
elif slices is not None:
self.slices = slices
index_cycles = []
for item in slices:
if item is None:
index_cycles.append(None)
else:
if item.stop is not None:
index_cycles.append((item.start, item.stop - 1))
else:
index_cycles.append((item.start, None))
self.index_cycles = index_cycles
self.ids = ids
if len(select_ids.keys()) > 0: # master is already defined
self.selections = select_ids
else: # nothing is defined, we define master here
self.selections = {'master': [True for _ in self.ids]}
return
def use_condition(self, condition_label='master',
sharp_tleft=None, sharp_tright=None):
"""Get conditioned timeseries.
Parameter
---------
condition_label : str (default 'master')
must be a key of dictionary self.selections, and corresponds to
the repr of a given :class:`FilterSet` instance.
sharp_left : float (default None)
sharp lower bound for cell cycle timing. USE ONLY FOR CELL CYCLE
OBSERVABLES
sharp_right : float (default None)
sharp upper bound for cell cycle timing. USE ONLY FOR CELL CYCLE
OBSERVABLES
Returns
-------
Coordinates instance made of valid (x, y) points
"""
selection = self.selections[condition_label]
xs, ys = [], []
for index, cid in enumerate(self.ids):
if selection[index] and self.slices[index] is not None:
if sharp_tleft is not None:
if self.time_bounds[index][0] < sharp_tleft:
continue
if sharp_tright is not None:
if self.time_bounds[index][1] > sharp_tright:
continue
xs.append(self.timeseries.x[self.slices[index]])
ys.append(self.timeseries.y[self.slices[index]])
if len(xs) > 0:
_x = np.concatenate(xs)
_y = np.concatenate(ys)
else:
_x = []
_y = []
out = Coordinates(_x, _y, x_name=self.timeseries.x_name,
y_name=self.timeseries.y_name)
return out
@property
def timeseries(self):
return self._timeseries
#
# @timeseries.setter
# def timeseries(self, ts):
# self._timeseries = ts
# def __getitem__(self, key):
# return self.timeseries[key]
def __repr__(self):
return repr(self.timeseries)
def as_text(self, sep='\t', cell_sep='\n', print_labels=False):
"""Export TimeSeries as text arrays
Parameters
----------
sep : str (default '\t')
how to separate columns
cell_sep : str (default '\n')
how to separate cells (default: one blank line)
print_labels : bool {False, True}
first line is labels, followed by empty line
"""
printout = ''
labels = [self.timeseries.x_name,
self.timeseries.y_name,
'cellID',
'containerID',
'experimentID']
if print_labels and labels is not None:
printout += '\t'.join(labels) + '\n'
printout += '\n'
for index, sl in enumerate(self.slices):
chunk = ''
x = self.timeseries.x[sl]
y = self.timeseries.y[sl]
ids = len(x) * [self.ids[index]]
container_id = len(x) * [self.container_label, ]
exp_id = len(x) * [self.experiment_label, ]
for line in zip(x, y, ids, container_id, exp_id):
chunk += '{}'.format(sep).join(['{}'.format(item) for item in line]) + '\n'
printout += chunk
printout += cell_sep
return printout.lstrip().rstrip() # remove empty lines at beginning/end
def to_dataframe(self, start_index=0, sharp_tleft=None, sharp_tright=None):
dic = {}
dic[self.timeseries.x_name] = [] # self.timeseries.x
dic[self.timeseries.y_name] = [] # self.timeseries.y
dic['cellID'] = []
dic['containerID'] = []
dic['experimentID'] = []
for key in self.selections.keys():
if key == 'master':
continue
dic[key] = []
size = 0
# add cell ID, container ID, experiment ID, and TRUE/FALSE for each cdt
for index, sl in enumerate(self.slices):
# collect only if within bounds
if sharp_tleft is not None:
if self.time_bounds[index][0] < sharp_tleft:
continue
if sharp_tright is not None:
if self.time_bounds[index][1] > sharp_tright:
continue
_x = self.timeseries.x[sl]
_y = self.timeseries.y[sl]
dic[self.timeseries.x_name].extend(_x)
dic[self.timeseries.y_name].extend(_y)
dic['cellID'].extend(len(_x) * [self.ids[index], ])
dic['containerID'].extend(len(_x) * [self.container_label, ])
dic['experimentID'].extend(len(_x) * [self.experiment_label, ])
# True/False for each
for key, values in self.selections.items():
# master: all True, useless to printout
if key == 'master':
continue
val = values[index]
dic[key].extend(len(_x) * [val, ])
size += len(_x)
df = pd.DataFrame(dic, index=range(start_index, start_index + size))
return df
| LeBarbouze/tunacell | tunacell/base/timeseries.py | Python | mit | 8,945 |
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="histogram2d.colorbar.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2d/colorbar/tickformatstop/_templateitemname.py | Python | mit | 494 |
# -*- coding: utf-8 -*-
import typing as t
from PyQt5.QtCore import (
Qt,
QObject,
QSortFilterProxyModel,
QAbstractTableModel,
QModelIndex,
QVariant,
)
from . import utils
from . import settings
COLUMN_DATE = 0
COLUMN_DURATION = 1
class RecordsSortProxyModel(QSortFilterProxyModel):
def lessThan(self, left: QModelIndex, right: QModelIndex) -> bool:
left_record = self.sourceModel().data(left, Qt.UserRole)
right_record = self.sourceModel().data(right, Qt.UserRole)
if left.column() == COLUMN_DATE:
return left_record['timestamp'] < right_record['timestamp']
elif left.column() == COLUMN_DURATION:
return left_record['duration'] < right_record['duration']
return False
class RecordsTableModel(QAbstractTableModel):
"""Records table model
The model using TinyDB for storing records.
"""
def __init__(self, parent: t.Optional[QObject] = None):
super().__init__(parent)
self._records = []
self._record_count = 0
self._settings = settings.Settings(self)
self._table_dt_format = self._settings.get_record_table_datetime_format()
def set_records(self, records: list):
self.beginResetModel()
self._records = records
self._record_count = len(records)
self.endResetModel()
def rowCount(self, parent_index: QModelIndex = QModelIndex()) -> int:
return self._record_count
def columnCount(self, parent_index: QModelIndex = QModelIndex()) -> int:
return 2
def headerData(self, section: int, orientation: Qt.Orientation, role: int = Qt.DisplayRole) -> t.Any:
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
return {
COLUMN_DATE: self.tr('Date'),
COLUMN_DURATION: self.tr('Duration'),
}.get(section, QVariant())
if orientation == Qt.Vertical:
return str(section + 1)
return QVariant()
def data(self, index: QModelIndex, role: int = Qt.DisplayRole) -> t.Any:
if not index.isValid():
return QVariant()
row = index.row()
column = index.column()
if row < 0 or row >= self._record_count:
return QVariant()
record = self._records[row]
if role == Qt.DisplayRole:
record_date = utils.format_timestamp(
record['timestamp'], self._table_dt_format)
record_duration = utils.format_duration(record['duration'])
return {
COLUMN_DATE: record_date,
COLUMN_DURATION: record_duration,
}.get(column, QVariant())
if role == Qt.UserRole:
return record
return QVariant()
| espdev/VoiceRecorder | voicerecorder/recordsmodel.py | Python | mit | 2,817 |
class CSharpReference():
def __init__(self,):
self.reference_object = None
self.line_in_file = -1
self.file_name = ''
| adrianogil/SublimeUnityIntel | unityparser/csharp/csharp_reference.py | Python | mit | 147 |
intervals = [[10,20],[6,15],[0,22]]
print(sorted(intervals)) | jackchi/interview-prep | leetcode/meeting_rooms.py | Python | mit | 61 |
from conans import ConanFile, AutoToolsBuildEnvironment, tools
import os
class LibHaruConn(ConanFile):
name = "libharu"
version = "2.3.0"
license = "ZLIB https://github.com/libharu/libharu/blob/master/LICENCE"
url = "https://github.com/trigger-happy/conan-packages"
description = "C library for generating PDF documents"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
pkgLink = 'https://github.com/libharu/libharu/archive/RELEASE_2_3_0.tar.gz'
self.run("curl -JOL " + pkgLink)
self.run("tar xf libharu-RELEASE_2_3_0.tar.gz")
def build(self):
env_build = AutoToolsBuildEnvironment(self)
install_prefix=os.getcwd()
with tools.chdir("libharu-RELEASE_2_3_0"):
with tools.environment_append(env_build.vars):
self.run("touch include/config.h.in")
self.run("aclocal")
self.run("libtoolize")
self.run("automake --add-missing")
self.run("autoconf")
self.run("./configure --prefix={0}".format(install_prefix))
self.run("make install")
def package(self):
self.copy("lib/*", dst="lib", keep_path=False)
self.copy("include/*", dst=".", keep_path=True)
def package_info(self):
self.cpp_info.libs = ["hpdf"]
| trigger-happy/conan-packages | libharu/conanfile.py | Python | mit | 1,457 |
import sublime
import sublime_plugin
OPTIONS_LAST_REGEX = "jump_caret_last_regex"
class CaretJumpCommand(sublime_plugin.TextCommand):
def run(self, edit, jump=True, jump_to=None, repeat_previous_jump=False):
view = self.view
def get_next_sels(user_input):
new_sels = []
for sel in view.sel():
next_sel = view.find(user_input, sel.end(), sublime.IGNORECASE)
if next_sel.begin() != -1:
new_sels.append(next_sel)
return new_sels
def jump_last_regex():
last_reg = self.view.settings().get(OPTIONS_LAST_REGEX)
if last_reg:
select_next_regex(last_reg)
def select_next_regex(user_input):
view.erase_regions("caret_jump_preview")
if not user_input:
# jump_last_regex()
return
self.view.settings().set(OPTIONS_LAST_REGEX, user_input)
new_sels = get_next_sels(user_input)
if jump and new_sels:
view.sel().clear()
view.sel().add_all(new_sels)
def input_changed(user_input):
new_sels = get_next_sels(user_input)
view.add_regions("caret_jump_preview",
new_sels,
"source, text",
"dot",
sublime.DRAW_OUTLINED)
def input_canceled():
view.erase_regions("caret_jump_preview")
selection = view.substr(view.sel()[0]) if view.sel() else ""
if jump_to:
select_next_regex(jump_to)
elif repeat_previous_jump:
jump_last_regex()
else:
default = selection if selection \
else self.view.settings().get(OPTIONS_LAST_REGEX, "")
view.window().show_input_panel("Seach for",
default,
select_next_regex,
input_changed,
input_canceled)
| r-stein/sublime-text-caret-jump | caret_jump.py | Python | mit | 2,134 |
import argparse
import glob
import re
import cPickle as pickle
from dicom.sequence import Sequence
from log import print_to_file
from paths import LOGS_PATH, TRAIN_DATA_PATH, TEST_DATA_PATH
def read_slice(path):
return pickle.load(open(path))['data']
def convert_to_number(value):
value = str(value)
try:
if "." in value:
return float(value)
else:
return int(value)
except:
pass
return value
def clean_metadata(metadatadict):
# Do cleaning
keys = sorted(list(metadatadict.keys()))
for key in keys:
value = metadatadict[key]
if key == 'PatientAge':
metadatadict[key] = int(value[:-1])
if key == 'PatientSex':
metadatadict[key] = 1 if value == 'F' else -1
else:
if isinstance(value, Sequence):
#convert to list
value = [i for i in value]
if isinstance(value, (list,)):
metadatadict[key] = [convert_to_number(i) for i in value]
else:
metadatadict[key] = convert_to_number(value)
return metadatadict
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = clean_metadata(d)
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + r'/*.pkl'),
key=lambda x: int(re.search(r'/*_(\d+)\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(((4ch)|(2ch)|(sax))_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
patient_data.append({'metadata': metadata,
'slice_id': slice_id})
print slice_id
return patient_data, pid
def get_metadata(data_path):
patient_paths = sorted(glob.glob(data_path + '*/study'))
metadata_dict = {}
for p in patient_paths:
patient_data, pid = get_patient_data(p)
print "patient", pid
metadata_dict[pid] = dict()
for pd in patient_data:
metadata_dict[pid][pd['slice_id']] = pd['metadata']
filename = data_path.split('/')[-2] + '_metadata.pkl'
with open(filename, 'w') as f:
pickle.dump(metadata_dict, f)
print 'saved to ', filename
return metadata_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [TRAIN_DATA_PATH, TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_metadata.log"
with print_to_file(log_path):
for d in data_paths:
get_metadata(d)
print "log saved to '%s'" % log_path
| 317070/kaggle-heart | generate_metadata_pkl.py | Python | mit | 2,940 |
#!/usr/bin/env python
'''
Based on:
Gilson, Matthieu, Tomoki Fukai, and Anthony N. Burkitt.
Spectral Analysis of Input Spike Trains by Spike-Timing-Dependent Plasticity.
PLoS Comput Biol 8, no. 7 (July 5, 2012): e1002584. doi:10.1371/journal.pcbi.1002584.
Author: Aditya Gilra, Jun 2016. (with inputs from Matthieu Gilson)
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
prefs.codegen.target = 'weave'
#prefs.codegen.target = 'cython'
import random
import time
# http://stackoverflow.com/questions/31057197/should-i-use-random-seed-or-numpy-random-seed-to-control-random-number-gener
np.random.seed(0) # set seed for reproducibility of simulations
random.seed(0) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 100.0*second
defaultclock.dt = simdt # set Brian's sim time step
simdtraw = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
taudelay = 0.75*ms # synaptic delay
tauA = 1*ms # synaptic epsp tauA
tauB = 5*ms # synaptic epsp tauB
eqs_neurons='''
dA/dt=-A/tauA : 1
dB/dt=-B/tauB : 1
rho_out = (A-B)/(tauA-tauB) : Hz
'''
# ###########################################
# Network parameters: numbers
# ###########################################
Npools = 4 # Number of correlated pools
Ninp = 500 # Number of neurons per pool
nu0 = 10*Hz # spiking rate of inputs
# ###########################################
# Network parameters: synapses
# ###########################################
Q = array([[sqrt(0.4),sqrt(0.1),0.,0.],\
[0.,sqrt(0.2),sqrt(0.2),0.],\
[0.,0.,sqrt(0.1),sqrt(0.1)]])
corr = dot(transpose(Q),Q)
print "Correlation matrix between pools is\n",corr
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
eta = 2e-4 # learning rate (as in paper)
Apre_tau = 17*ms # STDP Apre (LTP) time constant
Apost_tau = 34*ms # STDP Apost (LTD) time constant
stdp_eqns = ''' w : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
'''
Apre0 = 1.0 # incr in Apre (LTP), on pre-spikes;
# at spike coincidence, delta w = -Apre0*eta
Apost0 = 0.55 # incr in Apost (LTD) on post spike
wmax = 0.04 # max weight (hard bound)
winit = wmax/2. # initial weights are from 0 to winit
w0 = wmax/2.
pre_eqns = 'Apre+=Apre0; w+=-eta*Apost;'\
' w=clip(w,0,wmax)'
post_eqns = 'Apost+=Apost0; w += eta*Apre;'\
' w=clip(w,0,wmax)'
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
# post-synaptic neuron
P=NeuronGroup(1,model=eqs_neurons,\
threshold='rand()<rho_out*dt',method='euler')
# ###########################################
# Stimuli
# ###########################################
#inputs rates for absence of correlated events such that all neurons have same firing rate F
baseline_input_rates = np.zeros(Npools*Ninp)*Hz
for i_gp in range(Npools):
baseline_input_rates[i_gp*Ninp:(i_gp+1)*Ninp] = nu0*(1.-Q[:,i_gp].sum())
print baseline_input_rates[i_gp*Ninp]
# 3 driving spike trains
Pinp0=NeuronGroup(3,model='spikerate : Hz',\
threshold='rand()<spikerate*dt')
Pinp0.spikerate = nu0
Pinps = [Pinp0[:1],Pinp0[1:2],Pinp0[2:]]
# Npools number of Ninp spike trains
Pinp1=NeuronGroup(Npools*Ninp,model='spikerate : Hz',\
threshold='rand()<spikerate*dt')
Pinp1.spikerate = baseline_input_rates
Ppools = []
for k in range(Npools):
Ppools.append(Pinp1[k*Ninp:(k+1)*Ninp])
inpconns = []
def correlate_spike_trains(PR,P,l,csqrt):
con = Synapses(PR,P,'',on_pre='spikerate+='+str(csqrt)+'/dt')
con.connect(True)
con.delay = 0.*ms
con1 = Synapses(PR,P,'',on_pre='spikerate='+str(baseline_input_rates[l*Ninp]/Hz)+'*Hz')
con1.connect(True)
con1.delay = simdt
inpconns.append((con,con1))
for k in range(3):
for l in range(Npools):
if Q[k,l]!=0.:
correlate_spike_trains(Pinps[k],Ppools[l],l,Q[k,l])
# ###########################################
# Connecting the network
# ###########################################
con = Synapses(Pinp1,P,stdp_eqns,\
on_pre='A+=w*0.05;B+=w*0.05;'+pre_eqns,on_post=post_eqns,
method='euler')
con.connect(True)
con.delay = uniform(size=(Npools*Ninp,))*1.*ms + 4.*ms
con.w = uniform(size=(Npools*Ninp,))*2*winit
# ###########################################
# Setting up monitors
# ###########################################
sm = SpikeMonitor(P)
sminp1 = SpikeMonitor(Pinp1)
# Population monitor
popm = PopulationRateMonitor(P)
popminp1 = PopulationRateMonitor(Pinp1)
# voltage monitor
sm_rho = StateMonitor(P,'rho_out',record=[0])
# weights monitor
wm = StateMonitor(con,'w',record=range(Npools*Ninp), dt=1*second)
# ###########################################
# Simulate
# ###########################################
# a simple run would not include the monitors
net = Network(collect()) # collects Brian2 objects in current context
net.add(inpconns)
print "Setup complete, running for",simtime,"at dt =",simdtraw,"s."
t1 = time.time()
net.run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
cols = ['r','b','g','c']
# raster plot
subplot(231)
plot(sminp1.t/second,sminp1.i,',')
xlim([0,1])
xlabel("time (s)")
# weight evolution
subplot(232)
meanpoolw = []
for k in range(Npools):
meanpoolw.append(mean(wm.w[k*Ninp:(k+1)*Ninp,:],axis=0)/w0)
plot(wm.t/second,meanpoolw[-1],'-'+cols[k],lw=(4-k))
xlabel("time (s)")
ylabel("PCA-weight")
title('weight evolution')
# plot output firing rate sm_rho.rho_out[nrn_idx,time_idx]
subplot(234)
plot(sm_rho.t/second,sm_rho.rho_out[0]/Hz,'-')
xlim([0,simtime/second])
xlabel("")
# plot final weights wm.w[syn_idx,time_idx]
subplot(233)
plot(range(Npools*Ninp),wm.w[:,-1],'.')
for k in range(Npools):
meanpoolw_end = mean(wm.w[k*Ninp:(k+1)*Ninp,-1])
plot([k*Ninp,(k+1)*Ninp],[meanpoolw_end,meanpoolw_end],'-'+cols[k],lw=3)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title("end wts")
# plot averaged weights over last 50s (weights are sampled per second)
subplot(236)
plot(range(Npools*Ninp),mean(wm.w[:,-50:],axis=1),'.')
for k in range(Npools):
meanpoolw_end = mean(wm.w[k*Ninp:(k+1)*Ninp,-50:])
plot([k*Ninp,(k+1)*Ninp],[meanpoolw_end,meanpoolw_end],'-'+cols[k],lw=3)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title("mean (50s) end wts")
fig = figure()
# plot eigenvectors of corr = Q^T Q matrix
w,v = np.linalg.eig(corr)
subplot(131)
#plot(v)
for k in range(Npools):
plot(v[:,k],'.-'+cols[k],lw=4-k)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title('eigenvectors of corr matrix')
# weight evolution along eigenvectors of corr matrix
subplot(132)
for k in range(Npools):
plot(wm.t/second,dot(v[:,k],meanpoolw),'-'+cols[k],lw=(4-k))
xlabel('time (s)')
ylabel("weight (/w0)")
title('weights along PCs')
subplot(133)
hist(wm.w[:,-1],bins=200)
#fig.tight_layout()
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
show()
| h-mayorquin/camp_india_2016 | tutorials/LTPinnetworks2/Step2aCorr_Gilson_etal_2012.py | Python | mit | 8,226 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import (InlineKeyboardButton, InlineQueryResultGame,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_game():
return InlineQueryResultGame(TestInlineQueryResultGame.id,
TestInlineQueryResultGame.game_short_name,
reply_markup=TestInlineQueryResultGame.reply_markup)
class TestInlineQueryResultGame:
id = 'id'
type = 'game'
game_short_name = 'game short name'
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_game):
assert inline_query_result_game.type == self.type
assert inline_query_result_game.id == self.id
assert inline_query_result_game.game_short_name == self.game_short_name
assert inline_query_result_game.reply_markup.to_dict() == \
self.reply_markup.to_dict()
def test_to_json(self, inline_query_result_game):
json.loads(inline_query_result_game.to_json())
def test_to_dict(self, inline_query_result_game):
inline_query_result_game_dict = inline_query_result_game.to_dict()
assert isinstance(inline_query_result_game_dict, dict)
assert inline_query_result_game_dict['type'] == inline_query_result_game.type
assert inline_query_result_game_dict['id'] == inline_query_result_game.id
assert inline_query_result_game_dict['game_short_name'] == \
inline_query_result_game.game_short_name
assert inline_query_result_game_dict['reply_markup'] == \
inline_query_result_game.reply_markup.to_dict()
def test_equality(self):
a = InlineQueryResultGame(self.id, self.game_short_name)
b = InlineQueryResultGame(self.id, self.game_short_name)
c = InlineQueryResultGame(self.id, "")
d = InlineQueryResultGame("", self.game_short_name)
e = InlineQueryResultVoice(self.id, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| rogerscristo/BotFWD | env/lib/python3.6/site-packages/pytests/test_inlinequeryresultgame.py | Python | mit | 3,198 |
#!/usr/bin/env python
"""
This module has a function that matches django fields to the corresponding
random value generator.
"""
from django.db.models.fields import BigIntegerField
from django.db.models.fields import BinaryField
from django.db.models.fields import BooleanField
from django.db.models.fields import CharField
from django.db.models.fields import CommaSeparatedIntegerField
from django.db.models.fields import DateField
from django.db.models.fields import DateTimeField
from django.db.models.fields import DecimalField
from django.db.models.fields import DurationField
from django.db.models.fields import EmailField
from django.db.models.fields import FilePathField
from django.db.models.fields import FloatField
from django.db.models.fields import GenericIPAddressField
from django.db.models.fields import IntegerField
from django.db.models.fields import NullBooleanField
from django.db.models.fields import PositiveIntegerField
from django.db.models.fields import PositiveSmallIntegerField
from django.db.models.fields import SlugField
from django.db.models.fields import SmallIntegerField
from django.db.models.fields import TextField
from django.db.models.fields import TimeField
from django.db.models.fields import URLField
from django.db.models.fields import UUIDField
from values_generator import generate_big_integer
from values_generator import generate_boolean
from values_generator import generate_comma_separated_int
from values_generator import generate_date_time
from values_generator import generate_decimal
from values_generator import generate_email
from values_generator import generate_file_path
from values_generator import generate_float
from values_generator import generate_int
from values_generator import generate_ip
from values_generator import generate_positive_integer
from values_generator import generate_small_integer
from values_generator import generate_string
from values_generator import generate_text
from values_generator import generate_url
from values_generator import generate_uuid
def generate_random_values(field, size=100):
""" Generate random values
Generate a list of random values for a given field.
Args:
field : The field to get values for.
size : The size of the output list.
Note:
The size of the output list might be less than 'size', if the total
number of the possible values are less than 'size', like in Booleans.
Returns:
A list of random values generated for the given field.
"""
return list(set([generate_random_value(field) for _ in xrange(size)]))
def generate_random_value(field):
""" Generate random value
Generate a random value for a given field, by matching to the corresponding
random generator in values_generator.
Args:
field : The field to get values for.
Returns:
A random value generated for the given field.
Note:
The fields ImageField, FileField, BinaryField are not handled yet.
"""
# TODO(mostafa-mahmoud): ImageField, FileField, BinaryField
if isinstance(field, BigIntegerField):
return generate_big_integer()
elif isinstance(field, EmailField):
return generate_email(field.max_length)
elif isinstance(field, BooleanField):
return generate_boolean(field.null)
elif isinstance(field, CommaSeparatedIntegerField):
return generate_comma_separated_int(field.max_length)
elif isinstance(field, DecimalField):
return generate_decimal(field.max_digits, field.decimal_places)
elif isinstance(field, DateTimeField):
return generate_date_time()
elif isinstance(field, DateField):
return generate_date_time().date()
elif isinstance(field, FloatField):
return generate_float()
elif isinstance(field, NullBooleanField):
return generate_boolean(null_allowed=True)
elif isinstance(field, PositiveSmallIntegerField):
return abs(generate_small_integer())
elif isinstance(field, PositiveIntegerField):
return generate_positive_integer()
elif isinstance(field, URLField):
return generate_url(field.max_length)
elif isinstance(field, SlugField):
return generate_string(field.max_length, special=['_', '-'])
elif isinstance(field, TextField):
return generate_text(field.max_length)
elif isinstance(field, SmallIntegerField):
return generate_small_integer()
elif isinstance(field, TimeField):
return generate_date_time().time()
elif isinstance(field, IntegerField):
return generate_int()
elif isinstance(field, GenericIPAddressField):
return generate_ip()
elif isinstance(field, DurationField):
t1 = generate_date_time()
t2 = generate_date_time()
if t1 < t2:
return t2 - t1
else:
return t1 - t2
elif isinstance(field, CharField):
return generate_string(field.max_length)
elif isinstance(field, UUIDField):
return generate_uuid()
elif isinstance(field, FilePathField):
return generate_file_path()
| aelguindy/djenerator | djenerator/fields_generator.py | Python | mit | 5,111 |
"""This module provides an interface to the program Reduce.
Requires the reduce executable and reduce_wwPDB_het_dict.txt located
in a directory specified in global_settings. These can be downloaded
from: http://kinemage.biochem.duke.edu/software/reduce.php
For more information on Reduce, see [1].
References
----------
.. [1] Word, et al.(1999) Asparagine and glutamine: using hydrogen atom
contacts in the choice of sidechain amide orientation" J. Mol. Biol.
285, 1735-1747.
"""
import subprocess
import tempfile
from pathlib import Path
from settings import global_settings
def run_reduce(input_file, path=True):
""" Runs reduce on a pdb or mmol file at the specified path.
Notes
-----
Runs Reduce programme to add missing protons to a PDB file.
Parameters
----------
input_file : str
Path to file to add protons to or structure in mmol/pdb format.
path : bool, optional
True if input_file is a path.
Returns
-------
reduce_mmol : str
Structure file with protons added.
reduce_message : str
Messages generated while running Reduce.
Raises
------
FileNotFoundError
Raised if the executable cannot be found.
"""
if path:
input_path = Path(input_file)
if not input_path.exists():
print('No file found at', path)
return None, None
else:
pathf = tempfile.NamedTemporaryFile()
encoded_input = input_file.encode()
pathf.write(encoded_input)
pathf.seek(0)
file_path = pathf.name
input_path = Path(file_path)
reduce_folder = Path(global_settings['reduce']['folder'])
reduce_exe = reduce_folder / global_settings['reduce']['path']
reduce_dict = reduce_folder / 'reduce_wwPDB_het_dict.txt'
try:
reduce_output = subprocess.run(
[str(reduce_exe), '-build', '-DB',
str(reduce_dict), str(input_path)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError as e:
raise FileNotFoundError(
'The Reduce executable cannot be found. Ensure the '
'location and filename are specified in settings.')
try:
reduced_mmol = reduce_output.stdout.decode()
except UnicodeDecodeError:
print("Reduce could not detect any missing protons in the protein. "
"Using the original structure.")
if path:
reduced_mmol = input_path.read_text()
else:
reduced_mmol = input_file
reduce_message = reduce_output.stderr.decode()
if 'could not open' in reduce_message:
print('Caution: the Reduce connectivity dictionary could not be '
'found. Some protons may be missing. See notes.')
return reduced_mmol, reduce_message
def reduce_output_path(path=None, pdb_name=None):
"""Defines location of Reduce output files relative to input files."""
if not path:
if not pdb_name:
raise NameError(
"Cannot save an output for a temporary file without a PDB"
"code specified")
pdb_name = pdb_name.lower()
output_path = Path(global_settings['structural_database']['path'],
pdb_name[1:3].lower(), pdb_name[:4].lower(),
'reduce', pdb_name + '_reduced.mmol')
else:
input_path = Path(path)
if len(input_path.parents) > 1:
output_path = input_path.parents[1] / 'reduce' / \
(input_path.stem + '_reduced' + input_path.suffix)
else:
output_path = input_path.parent / \
(input_path.stem + '_reduced' + input_path.suffix)
return output_path
def output_reduce(input_file, path=True, pdb_name=None, force=False):
"""Runs Reduce on a pdb or mmol file and creates a new file with the output.
Parameters
----------
input_file : str or pathlib.Path
Path to file to run Reduce on.
path : bool
True if input_file is a path.
pdb_name : str
PDB ID of protein. Required if providing string not path.
force : bool
True if existing reduce outputs should be overwritten.
Returns
-------
output_path : pathlib.Path
Location of output file.
"""
if path:
output_path = reduce_output_path(path=input_file)
else:
output_path = reduce_output_path(pdb_name=pdb_name)
if output_path.exists() and not force:
return output_path
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
output_path.parent.mkdir(exist_ok=True)
output_path.write_text(reduce_mmol)
return output_path
def output_reduce_list(path_list, force=False):
"""Generates structure file with protons from a list of structure files."""
output_paths = []
for path in path_list:
output_path = output_reduce(path, force=force)
if output_path:
output_paths.append(output_path)
return output_paths
def assembly_plus_protons(input_file, path=True, pdb_name=None,
save_output=False, force_save=False):
"""Returns an Assembly with protons added by Reduce.
Notes
-----
Looks for a pre-existing Reduce output in the standard location before
running Reduce. If the protein contains oligosaccharides or glycans,
use reduce_correct_carbohydrates.
Parameters
----------
input_file : str or pathlib.Path
Location of file to be converted to Assembly or PDB file as string.
path : bool
Whether we are looking at a file or a pdb string. Defaults to file.
pdb_name : str
PDB ID of protein. Required if providing string not path.
save_output : bool
If True will save the generated assembly.
force_save : bool
If True will overwrite existing reduced assembly.
Returns
-------
reduced_assembly : AMPAL Assembly
Assembly of protein with protons added by Reduce.
"""
from ampal.pdb_parser import convert_pdb_to_ampal
if path:
input_path = Path(input_file)
if not pdb_name:
pdb_name = input_path.stem[:4]
reduced_path = reduce_output_path(path=input_path)
if reduced_path.exists() and not save_output and not force_save:
reduced_assembly = convert_pdb_to_ampal(
str(reduced_path), pdb_id=pdb_name)
return reduced_assembly
if save_output:
reduced_path = output_reduce(
input_file, path=path, pdb_name=pdb_name, force=force_save)
reduced_assembly = convert_pdb_to_ampal(str(reduced_path), path=True)
else:
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
reduced_assembly = convert_pdb_to_ampal(
reduce_mmol, path=False, pdb_id=pdb_name)
return reduced_assembly
__author__ = 'Kieran L. Hudson, Gail J. Bartlett'
| woolfson-group/isambard | isambard/external_programs/reduce.py | Python | mit | 7,041 |
from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def __str__():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file checker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
"""String details of the secret and/or how it was found"""
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
| CrowdStrike/mellon | mellon/interfaces.py | Python | mit | 4,243 |
#!/usr/bin/env python
"""PyQt4 port of the layouts/flowlayout example from Qt v4.x"""
from PyQt4 import QtCore, QtGui
# ------------------------------------------------------------------------
class FlowLayout(QtGui.QLayout):
"""
Standard PyQt examples FlowLayout modified to work with a scollable parent
"""
def __init__(self, parent=None, margin=0, spacing=-1):
super(FlowLayout, self).__init__(parent)
if parent is not None:
self.setMargin(margin)
self.setSpacing(spacing)
self.itemList = []
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList[index]
return None
def takeAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList.pop(index)
return None
def expandingDirections(self):
return QtCore.Qt.Orientations(QtCore.Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QtCore.QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super(FlowLayout, self).setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QtCore.QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
size += QtCore.QSize(2 * self.margin(), 2 * self.margin())
return size
def minimumSize(self):
w = self.geometry().width()
h = self.doLayout(QtCore.QRect(0, 0, w, 0), True)
return QtCore.QSize(w + 2 * self.margin(), h + 2 * self.margin())
def doLayout(self, rect, testOnly=False):
"""
"""
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
wid = item.widget()
spaceX = self.spacing()# + wid.style().layoutSpacing(QtGui.QSizePolicy.PushButton, QtGui.QSizePolicy.PushButton, QtCore.Qt.Horizontal)
spaceY = self.spacing()# + wid.style().layoutSpacing(QtGui.QSizePolicy.PushButton, QtGui.QSizePolicy.PushButton, QtCore.Qt.Vertical)
nextX = x + item.sizeHint().width() + spaceX
if nextX - spaceX > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + spaceY
nextX = x + item.sizeHint().width() + spaceX
lineHeight = 0
if not testOnly:
item.setGeometry(QtCore.QRect(QtCore.QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
# ------------------------------------------------------------------------
class ResizeScrollArea(QtGui.QScrollArea):
"""
A QScrollArea that propagates the resizing to any FlowLayout children.
"""
def __init(self, parent=None):
QtGui.QScrollArea.__init__(self, parent)
def resizeEvent(self, event):
wrapper = self.findChild(QtGui.QWidget)
flow = wrapper.findChild(FlowLayout)
if wrapper and flow:
width = self.viewport().width()
height = flow.heightForWidth(width)
size = QtCore.QSize(width, height)
point = self.viewport().rect().topLeft()
flow.setGeometry(QtCore.QRect(point, size))
self.viewport().update()
super(ResizeScrollArea, self).resizeEvent(event)
# ------------------------------------------------------------------------
class ScrollingFlowWidget(QtGui.QWidget):
"""
A resizable and scrollable widget that uses a flow layout.
Use its addWidget() method to flow children into it.
"""
def __init__(self,parent=None):
super(ScrollingFlowWidget,self).__init__(parent)
grid = QtGui.QGridLayout(self)
scroll = ResizeScrollArea()
self._wrapper = QtGui.QWidget(scroll)
self.flowLayout = FlowLayout(self._wrapper)
self._wrapper.setLayout(self.flowLayout)
scroll.setWidget(self._wrapper)
scroll.setWidgetResizable(True)
grid.addWidget(scroll)
def addWidget(self, widget):
self.flowLayout.addWidget(widget)
widget.setParent(self._wrapper)
# ------------------------------------------------------------------------
if __name__ == '__main__':
import sys
import random
class ExampleScroller(ScrollingFlowWidget):
def sizeHint(self):
return QtCore.QSize(500,300)
class ExampleWindow(QtGui.QWidget):
def __init__(self):
super(ExampleWindow, self).__init__()
self.scroller = ExampleScroller(self)
self.setLayout( QtGui.QVBoxLayout(self) )
self.layout().addWidget(self.scroller)
for w in range( random.randint(25,50)):
words = " ".join([ "".join([ chr(random.choice(range(ord('a'),ord('z'))))
for x in range( random.randint(2,9) ) ])
for n in range(random.randint(1,5)) ]).title()
widget = QtGui.QPushButton(words)
widget.setFixedHeight( 20 )
widget.setCursor( QtCore.Qt.PointingHandCursor )
widget.setCheckable( True )
self.scroller.addWidget(widget)
self.setWindowTitle("Scrolling Flow Layout")
app = QtGui.QApplication(sys.argv)
mainWin = ExampleWindow()
mainWin.show()
sys.exit(app.exec_()) | tommo/gii | lib/gii/qt/controls/FlowLayout.py | Python | mit | 5,065 |
##########################################################################
#
# Copyright 2011 Jose Fonseca
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""d3d9types.h"""
from winapi import *
D3DCOLOR = Alias("D3DCOLOR", DWORD)
D3DVECTOR = Struct("D3DVECTOR", [
(Float, "x"),
(Float, "y"),
(Float, "z"),
])
D3DCOLORVALUE = Struct("D3DCOLORVALUE", [
(Float, "r"),
(Float, "g"),
(Float, "b"),
(Float, "a"),
])
D3DRECT = Struct("D3DRECT", [
(LONG, "x1"),
(LONG, "y1"),
(LONG, "x2"),
(LONG, "y2"),
])
D3DMATRIX = Struct("D3DMATRIX", [
(Array(Array(Float, 4), "4"), "m"),
])
D3DVIEWPORT9 = Struct("D3DVIEWPORT9", [
(DWORD, "X"),
(DWORD, "Y"),
(DWORD, "Width"),
(DWORD, "Height"),
(Float, "MinZ"),
(Float, "MaxZ"),
])
D3DCLIPPLANE = Flags(DWORD, [
"D3DCLIPPLANE0",
"D3DCLIPPLANE1",
"D3DCLIPPLANE2",
"D3DCLIPPLANE3",
"D3DCLIPPLANE4",
"D3DCLIPPLANE5",
])
D3DCS = Flags(DWORD, [
"D3DCS_ALL",
"D3DCS_LEFT",
"D3DCS_RIGHT",
"D3DCS_TOP",
"D3DCS_BOTTOM",
"D3DCS_FRONT",
"D3DCS_BACK",
"D3DCS_PLANE0",
"D3DCS_PLANE1",
"D3DCS_PLANE2",
"D3DCS_PLANE3",
"D3DCS_PLANE4",
"D3DCS_PLANE5",
])
D3DCLIPSTATUS9 = Struct("D3DCLIPSTATUS9", [
(DWORD, "ClipUnion"),
(DWORD, "ClipIntersection"),
])
D3DMATERIAL9 = Struct("D3DMATERIAL9", [
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Ambient"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Emissive"),
(Float, "Power"),
])
D3DLIGHTTYPE = Enum("D3DLIGHTTYPE", [
"D3DLIGHT_POINT",
"D3DLIGHT_SPOT",
"D3DLIGHT_DIRECTIONAL",
])
D3DLIGHT9 = Struct("D3DLIGHT9", [
(D3DLIGHTTYPE, "Type"),
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Ambient"),
(D3DVECTOR, "Position"),
(D3DVECTOR, "Direction"),
(Float, "Range"),
(Float, "Falloff"),
(Float, "Attenuation0"),
(Float, "Attenuation1"),
(Float, "Attenuation2"),
(Float, "Theta"),
(Float, "Phi"),
])
D3DCLEAR = Flags(DWORD, [
"D3DCLEAR_TARGET",
"D3DCLEAR_ZBUFFER",
"D3DCLEAR_STENCIL",
])
D3DSHADEMODE = Enum("D3DSHADEMODE", [
"D3DSHADE_FLAT",
"D3DSHADE_GOURAUD",
"D3DSHADE_PHONG",
])
D3DFILLMODE = Enum("D3DFILLMODE", [
"D3DFILL_POINT",
"D3DFILL_WIREFRAME",
"D3DFILL_SOLID",
])
D3DBLEND = Enum("D3DBLEND", [
"D3DBLEND_ZERO",
"D3DBLEND_ONE",
"D3DBLEND_SRCCOLOR",
"D3DBLEND_INVSRCCOLOR",
"D3DBLEND_SRCALPHA",
"D3DBLEND_INVSRCALPHA",
"D3DBLEND_DESTALPHA",
"D3DBLEND_INVDESTALPHA",
"D3DBLEND_DESTCOLOR",
"D3DBLEND_INVDESTCOLOR",
"D3DBLEND_SRCALPHASAT",
"D3DBLEND_BOTHSRCALPHA",
"D3DBLEND_BOTHINVSRCALPHA",
"D3DBLEND_BLENDFACTOR",
"D3DBLEND_INVBLENDFACTOR",
"D3DBLEND_SRCCOLOR2",
"D3DBLEND_INVSRCCOLOR2",
])
D3DBLENDOP = Enum("D3DBLENDOP", [
"D3DBLENDOP_ADD",
"D3DBLENDOP_SUBTRACT",
"D3DBLENDOP_REVSUBTRACT",
"D3DBLENDOP_MIN",
"D3DBLENDOP_MAX",
])
D3DTEXTUREADDRESS = Enum("D3DTEXTUREADDRESS", [
"D3DTADDRESS_WRAP",
"D3DTADDRESS_MIRROR",
"D3DTADDRESS_CLAMP",
"D3DTADDRESS_BORDER",
"D3DTADDRESS_MIRRORONCE",
])
D3DCULL = Enum("D3DCULL", [
"D3DCULL_NONE",
"D3DCULL_CW",
"D3DCULL_CCW",
])
D3DCMPFUNC = Enum("D3DCMPFUNC", [
"D3DCMP_NEVER",
"D3DCMP_LESS",
"D3DCMP_EQUAL",
"D3DCMP_LESSEQUAL",
"D3DCMP_GREATER",
"D3DCMP_NOTEQUAL",
"D3DCMP_GREATEREQUAL",
"D3DCMP_ALWAYS",
])
D3DSTENCILOP = Enum("D3DSTENCILOP", [
"D3DSTENCILOP_KEEP",
"D3DSTENCILOP_ZERO",
"D3DSTENCILOP_REPLACE",
"D3DSTENCILOP_INCRSAT",
"D3DSTENCILOP_DECRSAT",
"D3DSTENCILOP_INVERT",
"D3DSTENCILOP_INCR",
"D3DSTENCILOP_DECR",
])
D3DFOGMODE = Enum("D3DFOGMODE", [
"D3DFOG_NONE",
"D3DFOG_EXP",
"D3DFOG_EXP2",
"D3DFOG_LINEAR",
])
D3DZBUFFERTYPE = Enum("D3DZBUFFERTYPE", [
"D3DZB_FALSE",
"D3DZB_TRUE",
"D3DZB_USEW",
])
D3DPRIMITIVETYPE = Enum("D3DPRIMITIVETYPE", [
"D3DPT_POINTLIST",
"D3DPT_LINELIST",
"D3DPT_LINESTRIP",
"D3DPT_TRIANGLELIST",
"D3DPT_TRIANGLESTRIP",
"D3DPT_TRIANGLEFAN",
])
D3DTRANSFORMSTATETYPE = Enum("D3DTRANSFORMSTATETYPE", [
"D3DTS_VIEW",
"D3DTS_PROJECTION",
"D3DTS_TEXTURE0",
"D3DTS_TEXTURE1",
"D3DTS_TEXTURE2",
"D3DTS_TEXTURE3",
"D3DTS_TEXTURE4",
"D3DTS_TEXTURE5",
"D3DTS_TEXTURE6",
"D3DTS_TEXTURE7",
"D3DTS_WORLD",
"D3DTS_WORLD1",
"D3DTS_WORLD2",
"D3DTS_WORLD3",
])
D3DMATERIALCOLORSOURCE = Enum("D3DMATERIALCOLORSOURCE", [
"D3DMCS_MATERIAL",
"D3DMCS_COLOR1",
"D3DMCS_COLOR2",
])
D3DWRAPCOORD = Flags(DWORD, [
"D3DWRAPCOORD_0",
"D3DWRAPCOORD_1",
"D3DWRAPCOORD_2",
"D3DWRAPCOORD_3",
])
D3DCOLORWRITEENABLE = Flags(DWORD, [
"D3DCOLORWRITEENABLE_RED",
"D3DCOLORWRITEENABLE_GREEN",
"D3DCOLORWRITEENABLE_BLUE",
"D3DCOLORWRITEENABLE_ALPHA",
])
D3DDEGREETYPE = Enum("D3DDEGREETYPE", [
"D3DDEGREE_LINEAR",
"D3DDEGREE_QUADRATIC",
"D3DDEGREE_CUBIC",
"D3DDEGREE_QUINTIC",
])
D3DPATCHEDGESTYLE = Enum("D3DPATCHEDGESTYLE", [
"D3DPATCHEDGE_DISCRETE",
"D3DPATCHEDGE_CONTINUOUS",
])
D3DVERTEXBLENDFLAGS = Enum("D3DVERTEXBLENDFLAGS", [
"D3DVBF_DISABLE",
"D3DVBF_1WEIGHTS",
"D3DVBF_2WEIGHTS",
"D3DVBF_3WEIGHTS",
"D3DVBF_TWEENING",
"D3DVBF_0WEIGHTS",
])
D3DDEBUGMONITORTOKENS = Enum("D3DDEBUGMONITORTOKENS", [
"D3DDMT_ENABLE",
"D3DDMT_DISABLE",
])
# TODO: Convert these to/from actual floats
FLOAT_AS_DWORD = DWORD
D3DRENDERSTATETYPE, D3DRENDERSTATEVALUE = EnumPolymorphic("D3DRENDERSTATETYPE", "State", [
("D3DRS_ZENABLE", D3DZBUFFERTYPE),
("D3DRS_FILLMODE", D3DFILLMODE),
("D3DRS_SHADEMODE", D3DSHADEMODE),
("D3DRS_ZWRITEENABLE", BOOL),
("D3DRS_ALPHATESTENABLE", BOOL),
("D3DRS_LASTPIXEL", BOOL),
("D3DRS_SRCBLEND", D3DBLEND),
("D3DRS_DESTBLEND", D3DBLEND),
("D3DRS_CULLMODE", D3DCULL),
("D3DRS_ZFUNC", D3DCMPFUNC),
("D3DRS_ALPHAREF", DWORD),
("D3DRS_ALPHAFUNC", D3DCMPFUNC),
("D3DRS_DITHERENABLE", BOOL),
("D3DRS_ALPHABLENDENABLE", BOOL),
("D3DRS_FOGENABLE", BOOL),
("D3DRS_SPECULARENABLE", BOOL),
("D3DRS_FOGCOLOR", D3DCOLOR),
("D3DRS_FOGTABLEMODE", D3DFOGMODE),
("D3DRS_FOGSTART", FLOAT_AS_DWORD),
("D3DRS_FOGEND", FLOAT_AS_DWORD),
("D3DRS_FOGDENSITY", FLOAT_AS_DWORD),
("D3DRS_RANGEFOGENABLE", BOOL),
("D3DRS_STENCILENABLE", BOOL),
("D3DRS_STENCILFAIL", D3DSTENCILOP),
("D3DRS_STENCILZFAIL", D3DSTENCILOP),
("D3DRS_STENCILPASS", D3DSTENCILOP),
("D3DRS_STENCILFUNC", D3DCMPFUNC),
("D3DRS_STENCILREF", DWORD),
("D3DRS_STENCILMASK", DWORD),
("D3DRS_STENCILWRITEMASK", DWORD),
("D3DRS_TEXTUREFACTOR", D3DCOLOR),
("D3DRS_WRAP0", D3DWRAPCOORD),
("D3DRS_WRAP1", D3DWRAPCOORD),
("D3DRS_WRAP2", D3DWRAPCOORD),
("D3DRS_WRAP3", D3DWRAPCOORD),
("D3DRS_WRAP4", D3DWRAPCOORD),
("D3DRS_WRAP5", D3DWRAPCOORD),
("D3DRS_WRAP6", D3DWRAPCOORD),
("D3DRS_WRAP7", D3DWRAPCOORD),
("D3DRS_CLIPPING", BOOL),
("D3DRS_LIGHTING", BOOL),
("D3DRS_AMBIENT", D3DCOLOR),
("D3DRS_FOGVERTEXMODE", D3DFOGMODE),
("D3DRS_COLORVERTEX", BOOL),
("D3DRS_LOCALVIEWER", BOOL),
("D3DRS_NORMALIZENORMALS", BOOL),
("D3DRS_DIFFUSEMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_SPECULARMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_AMBIENTMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_EMISSIVEMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_VERTEXBLEND", D3DVERTEXBLENDFLAGS),
("D3DRS_CLIPPLANEENABLE", D3DCLIPPLANE),
("D3DRS_POINTSIZE", FLOAT_AS_DWORD),
("D3DRS_POINTSIZE_MIN", FLOAT_AS_DWORD),
("D3DRS_POINTSPRITEENABLE", BOOL),
("D3DRS_POINTSCALEENABLE", BOOL),
("D3DRS_POINTSCALE_A", FLOAT_AS_DWORD),
("D3DRS_POINTSCALE_B", FLOAT_AS_DWORD),
("D3DRS_POINTSCALE_C", FLOAT_AS_DWORD),
("D3DRS_MULTISAMPLEANTIALIAS", BOOL),
("D3DRS_MULTISAMPLEMASK", DWORD),
("D3DRS_PATCHEDGESTYLE", D3DPATCHEDGESTYLE),
("D3DRS_DEBUGMONITORTOKEN", D3DDEBUGMONITORTOKENS),
("D3DRS_POINTSIZE_MAX", FLOAT_AS_DWORD),
("D3DRS_INDEXEDVERTEXBLENDENABLE", BOOL),
("D3DRS_COLORWRITEENABLE", DWORD),
("D3DRS_TWEENFACTOR", FLOAT_AS_DWORD),
("D3DRS_BLENDOP", D3DBLENDOP),
("D3DRS_POSITIONDEGREE", D3DDEGREETYPE),
("D3DRS_NORMALDEGREE", D3DDEGREETYPE),
("D3DRS_SCISSORTESTENABLE", BOOL),
("D3DRS_SLOPESCALEDEPTHBIAS", FLOAT_AS_DWORD),
("D3DRS_ANTIALIASEDLINEENABLE", BOOL),
("D3DRS_MINTESSELLATIONLEVEL", FLOAT_AS_DWORD),
("D3DRS_MAXTESSELLATIONLEVEL", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_X", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_Y", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_Z", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_W", FLOAT_AS_DWORD),
("D3DRS_ENABLEADAPTIVETESSELLATION", BOOL),
("D3DRS_TWOSIDEDSTENCILMODE", BOOL),
("D3DRS_CCW_STENCILFAIL", D3DSTENCILOP),
("D3DRS_CCW_STENCILZFAIL", D3DSTENCILOP),
("D3DRS_CCW_STENCILPASS", D3DSTENCILOP),
("D3DRS_CCW_STENCILFUNC", D3DCMPFUNC),
("D3DRS_COLORWRITEENABLE1", D3DCOLORWRITEENABLE),
("D3DRS_COLORWRITEENABLE2", D3DCOLORWRITEENABLE),
("D3DRS_COLORWRITEENABLE3", D3DCOLORWRITEENABLE),
("D3DRS_BLENDFACTOR", D3DCOLOR),
("D3DRS_SRGBWRITEENABLE", BOOL),
("D3DRS_DEPTHBIAS", FLOAT_AS_DWORD),
("D3DRS_WRAP8", D3DWRAPCOORD),
("D3DRS_WRAP9", D3DWRAPCOORD),
("D3DRS_WRAP10", D3DWRAPCOORD),
("D3DRS_WRAP11", D3DWRAPCOORD),
("D3DRS_WRAP12", D3DWRAPCOORD),
("D3DRS_WRAP13", D3DWRAPCOORD),
("D3DRS_WRAP14", D3DWRAPCOORD),
("D3DRS_WRAP15", D3DWRAPCOORD),
("D3DRS_SEPARATEALPHABLENDENABLE", BOOL),
("D3DRS_SRCBLENDALPHA", D3DBLEND),
("D3DRS_DESTBLENDALPHA", D3DBLEND),
("D3DRS_BLENDOPALPHA", D3DBLENDOP),
# XXX: D3DRENDERSTATE_WRAPBIAS + n
], DWORD)
D3DTSS_TCI = Flags(DWORD, [
#"D3DTSS_TCI_PASSTHRU", # 0
"D3DTSS_TCI_CAMERASPACENORMAL",
"D3DTSS_TCI_CAMERASPACEPOSITION",
"D3DTSS_TCI_CAMERASPACEREFLECTIONVECTOR",
"D3DTSS_TCI_SPHEREMAP",
])
D3DTEXTUREOP = Enum("D3DTEXTUREOP", [
"D3DTOP_DISABLE",
"D3DTOP_SELECTARG1",
"D3DTOP_SELECTARG2",
"D3DTOP_MODULATE",
"D3DTOP_MODULATE2X",
"D3DTOP_MODULATE4X",
"D3DTOP_ADD",
"D3DTOP_ADDSIGNED",
"D3DTOP_ADDSIGNED2X",
"D3DTOP_SUBTRACT",
"D3DTOP_ADDSMOOTH",
"D3DTOP_BLENDDIFFUSEALPHA",
"D3DTOP_BLENDTEXTUREALPHA",
"D3DTOP_BLENDFACTORALPHA",
"D3DTOP_BLENDTEXTUREALPHAPM",
"D3DTOP_BLENDCURRENTALPHA",
"D3DTOP_PREMODULATE",
"D3DTOP_MODULATEALPHA_ADDCOLOR",
"D3DTOP_MODULATECOLOR_ADDALPHA",
"D3DTOP_MODULATEINVALPHA_ADDCOLOR",
"D3DTOP_MODULATEINVCOLOR_ADDALPHA",
"D3DTOP_BUMPENVMAP",
"D3DTOP_BUMPENVMAPLUMINANCE",
"D3DTOP_DOTPRODUCT3",
"D3DTOP_MULTIPLYADD",
"D3DTOP_LERP",
])
# XXX: Actually a mixture of enums and flags
D3DTA = FakeEnum(DWORD, [
"D3DTA_DIFFUSE",
"D3DTA_CURRENT",
"D3DTA_TEXTURE",
"D3DTA_TFACTOR",
"D3DTA_SPECULAR",
"D3DTA_TEMP",
"D3DTA_CONSTANT",
#"D3DTA_COMPLEMENT",
#"D3DTA_ALPHAREPLICATE",
])
D3DTEXTURETRANSFORMFLAGS = Enum("D3DTEXTURETRANSFORMFLAGS", [
"D3DTTFF_DISABLE",
"D3DTTFF_COUNT1",
"D3DTTFF_COUNT2",
"D3DTTFF_COUNT3",
"D3DTTFF_COUNT4",
"D3DTTFF_PROJECTED",
])
D3DTEXTUREFILTERTYPE = Enum("D3DTEXTUREFILTERTYPE", [
"D3DTEXF_NONE",
"D3DTEXF_POINT",
"D3DTEXF_LINEAR",
"D3DTEXF_ANISOTROPIC",
"D3DTEXF_PYRAMIDALQUAD",
"D3DTEXF_GAUSSIANQUAD",
"D3DTEXF_CONVOLUTIONMONO",
])
D3DTEXTURESTAGESTATETYPE, D3DTEXTURESTAGESTATEVALUE = EnumPolymorphic("D3DTEXTURESTAGESTATETYPE", "Type", [
("D3DTSS_COLOROP", D3DTEXTUREOP),
("D3DTSS_COLORARG1", D3DTA),
("D3DTSS_COLORARG2", D3DTA),
("D3DTSS_ALPHAOP", D3DTEXTUREOP),
("D3DTSS_ALPHAARG1", D3DTA),
("D3DTSS_ALPHAARG2", D3DTA),
("D3DTSS_BUMPENVMAT00", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT01", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT10", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT11", FLOAT_AS_DWORD),
("D3DTSS_TEXCOORDINDEX", D3DTSS_TCI),
("D3DTSS_BUMPENVLSCALE", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVLOFFSET", FLOAT_AS_DWORD),
("D3DTSS_TEXTURETRANSFORMFLAGS", D3DTEXTURETRANSFORMFLAGS),
("D3DTSS_COLORARG0", D3DTA,),
("D3DTSS_ALPHAARG0", D3DTA,),
("D3DTSS_RESULTARG", D3DTA,),
("D3DTSS_CONSTANT", D3DCOLOR),
], DWORD)
D3DSAMPLERSTATETYPE, D3DSAMPLERSTATEVALUE = EnumPolymorphic("D3DSAMPLERSTATETYPE", "Type", [
("D3DSAMP_ADDRESSU", D3DTEXTUREADDRESS),
("D3DSAMP_ADDRESSV", D3DTEXTUREADDRESS),
("D3DSAMP_ADDRESSW", D3DTEXTUREADDRESS),
("D3DSAMP_BORDERCOLOR", D3DCOLOR),
("D3DSAMP_MAGFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MINFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MIPFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MIPMAPLODBIAS", FLOAT_AS_DWORD),
("D3DSAMP_MAXMIPLEVEL", DWORD),
("D3DSAMP_MAXANISOTROPY", DWORD),
("D3DSAMP_SRGBTEXTURE", BOOL),
("D3DSAMP_ELEMENTINDEX", DWORD),
("D3DSAMP_DMAPOFFSET", DWORD),
], DWORD)
D3DPV = Flags(DWORD, [
"D3DPV_DONOTCOPYDATA",
])
# XXX: Actually a mixture of enums and flags
D3DFVF = Flags(DWORD, [
"D3DFVF_RESERVED0",
"D3DFVF_XYZ",
"D3DFVF_XYZRHW",
"D3DFVF_XYZB1",
"D3DFVF_XYZB2",
"D3DFVF_XYZB3",
"D3DFVF_XYZB4",
"D3DFVF_XYZB5",
"D3DFVF_XYZW",
"D3DFVF_NORMAL",
"D3DFVF_PSIZE",
"D3DFVF_DIFFUSE",
"D3DFVF_SPECULAR",
#"D3DFVF_TEX0",
#"D3DFVF_TEX1",
#"D3DFVF_TEX2",
#"D3DFVF_TEX3",
#"D3DFVF_TEX4",
#"D3DFVF_TEX5",
#"D3DFVF_TEX6",
#"D3DFVF_TEX7",
#"D3DFVF_TEX8",
"D3DFVF_LASTBETA_UBYTE4",
"D3DFVF_LASTBETA_D3DCOLOR",
"D3DFVF_RESERVED2",
#"D3DFVF_TEXCOORDSIZE1(0)",
#"D3DFVF_TEXCOORDSIZE2(0)",
#"D3DFVF_TEXCOORDSIZE3(0)",
#"D3DFVF_TEXCOORDSIZE4(0)",
#"D3DFVF_TEXCOORDSIZE1(1)",
#"D3DFVF_TEXCOORDSIZE2(1)",
#"D3DFVF_TEXCOORDSIZE3(1)",
#"D3DFVF_TEXCOORDSIZE4(1)",
#"D3DFVF_TEXCOORDSIZE1(2)",
#"D3DFVF_TEXCOORDSIZE2(2)",
#"D3DFVF_TEXCOORDSIZE3(2)",
#"D3DFVF_TEXCOORDSIZE4(2)",
#"D3DFVF_TEXCOORDSIZE1(3)",
#"D3DFVF_TEXCOORDSIZE2(3)",
#"D3DFVF_TEXCOORDSIZE3(3)",
#"D3DFVF_TEXCOORDSIZE4(3)",
])
D3DDECLUSAGE = FakeEnum(BYTE, [
"D3DDECLUSAGE_POSITION",
"D3DDECLUSAGE_BLENDWEIGHT",
"D3DDECLUSAGE_BLENDINDICES",
"D3DDECLUSAGE_NORMAL",
"D3DDECLUSAGE_PSIZE",
"D3DDECLUSAGE_TEXCOORD",
"D3DDECLUSAGE_TANGENT",
"D3DDECLUSAGE_BINORMAL",
"D3DDECLUSAGE_TESSFACTOR",
"D3DDECLUSAGE_POSITIONT",
"D3DDECLUSAGE_COLOR",
"D3DDECLUSAGE_FOG",
"D3DDECLUSAGE_DEPTH",
"D3DDECLUSAGE_SAMPLE",
])
D3DDECLMETHOD = FakeEnum(BYTE, [
"D3DDECLMETHOD_DEFAULT",
"D3DDECLMETHOD_PARTIALU",
"D3DDECLMETHOD_PARTIALV",
"D3DDECLMETHOD_CROSSUV",
"D3DDECLMETHOD_UV",
"D3DDECLMETHOD_LOOKUP",
"D3DDECLMETHOD_LOOKUPPRESAMPLED",
])
D3DDECLTYPE = FakeEnum(BYTE, [
"D3DDECLTYPE_FLOAT1",
"D3DDECLTYPE_FLOAT2",
"D3DDECLTYPE_FLOAT3",
"D3DDECLTYPE_FLOAT4",
"D3DDECLTYPE_D3DCOLOR",
"D3DDECLTYPE_UBYTE4",
"D3DDECLTYPE_SHORT2",
"D3DDECLTYPE_SHORT4",
"D3DDECLTYPE_UBYTE4N",
"D3DDECLTYPE_SHORT2N",
"D3DDECLTYPE_SHORT4N",
"D3DDECLTYPE_USHORT2N",
"D3DDECLTYPE_USHORT4N",
"D3DDECLTYPE_UDEC3",
"D3DDECLTYPE_DEC3N",
"D3DDECLTYPE_FLOAT16_2",
"D3DDECLTYPE_FLOAT16_4",
"D3DDECLTYPE_UNUSED",
])
D3DVERTEXELEMENT9 = Struct("D3DVERTEXELEMENT9", [
(WORD, "Stream"),
(WORD, "Offset"),
(D3DDECLTYPE, "Type"), # BYTE
(D3DDECLMETHOD, "Method"), # BYTE
(D3DDECLUSAGE, "Usage"), # BYTE
(BYTE, "UsageIndex"),
])
D3DBASISTYPE = Enum("D3DBASISTYPE", [
"D3DBASIS_BEZIER",
"D3DBASIS_BSPLINE",
"D3DBASIS_CATMULL_ROM",
])
D3DSTATEBLOCKTYPE = Enum("D3DSTATEBLOCKTYPE", [
"D3DSBT_ALL",
"D3DSBT_PIXELSTATE",
"D3DSBT_VERTEXSTATE",
])
D3DDEVTYPE = Enum("D3DDEVTYPE", [
"D3DDEVTYPE_HAL",
"D3DDEVTYPE_REF",
"D3DDEVTYPE_SW",
"D3DDEVTYPE_NULLREF",
])
D3DMULTISAMPLE_TYPE = Enum("D3DMULTISAMPLE_TYPE", [
"D3DMULTISAMPLE_NONE",
"D3DMULTISAMPLE_NONMASKABLE",
"D3DMULTISAMPLE_2_SAMPLES",
"D3DMULTISAMPLE_3_SAMPLES",
"D3DMULTISAMPLE_4_SAMPLES",
"D3DMULTISAMPLE_5_SAMPLES",
"D3DMULTISAMPLE_6_SAMPLES",
"D3DMULTISAMPLE_7_SAMPLES",
"D3DMULTISAMPLE_8_SAMPLES",
"D3DMULTISAMPLE_9_SAMPLES",
"D3DMULTISAMPLE_10_SAMPLES",
"D3DMULTISAMPLE_11_SAMPLES",
"D3DMULTISAMPLE_12_SAMPLES",
"D3DMULTISAMPLE_13_SAMPLES",
"D3DMULTISAMPLE_14_SAMPLES",
"D3DMULTISAMPLE_15_SAMPLES",
"D3DMULTISAMPLE_16_SAMPLES",
])
D3DFORMAT = Enum("D3DFORMAT", [
"D3DFMT_UNKNOWN",
"D3DFMT_R8G8B8",
"D3DFMT_A8R8G8B8",
"D3DFMT_X8R8G8B8",
"D3DFMT_R5G6B5",
"D3DFMT_X1R5G5B5",
"D3DFMT_A1R5G5B5",
"D3DFMT_A4R4G4B4",
"D3DFMT_R3G3B2",
"D3DFMT_A8",
"D3DFMT_A8R3G3B2",
"D3DFMT_X4R4G4B4",
"D3DFMT_A2B10G10R10",
"D3DFMT_A8B8G8R8",
"D3DFMT_X8B8G8R8",
"D3DFMT_G16R16",
"D3DFMT_A2R10G10B10",
"D3DFMT_A16B16G16R16",
"D3DFMT_A8P8",
"D3DFMT_P8",
"D3DFMT_L8",
"D3DFMT_A8L8",
"D3DFMT_A4L4",
"D3DFMT_V8U8",
"D3DFMT_L6V5U5",
"D3DFMT_X8L8V8U8",
"D3DFMT_Q8W8V8U8",
"D3DFMT_V16U16",
"D3DFMT_A2W10V10U10",
"D3DFMT_UYVY",
"D3DFMT_R8G8_B8G8",
"D3DFMT_YUY2",
"D3DFMT_G8R8_G8B8",
"D3DFMT_DXT1",
"D3DFMT_DXT2",
"D3DFMT_DXT3",
"D3DFMT_DXT4",
"D3DFMT_DXT5",
"D3DFMT_D16_LOCKABLE",
"D3DFMT_D32",
"D3DFMT_D15S1",
"D3DFMT_D24S8",
"D3DFMT_D24X8",
"D3DFMT_D24X4S4",
"D3DFMT_D16",
"D3DFMT_D32F_LOCKABLE",
"D3DFMT_D24FS8",
"D3DFMT_D32_LOCKABLE",
"D3DFMT_S8_LOCKABLE",
"D3DFMT_L16",
"D3DFMT_VERTEXDATA",
"D3DFMT_INDEX16",
"D3DFMT_INDEX32",
"D3DFMT_Q16W16V16U16",
"D3DFMT_MULTI2_ARGB8",
"D3DFMT_R16F",
"D3DFMT_G16R16F",
"D3DFMT_A16B16G16R16F",
"D3DFMT_R32F",
"D3DFMT_G32R32F",
"D3DFMT_A32B32G32R32F",
"D3DFMT_CxV8U8",
"D3DFMT_A1",
"D3DFMT_A2B10G10R10_XR_BIAS",
"D3DFMT_BINARYBUFFER",
# Unofficial formats
"D3DFMT_ATI1N",
"D3DFMT_ATI2N",
"D3DFMT_AYUV",
"D3DFMT_DF16",
"D3DFMT_DF24",
"D3DFMT_INTZ",
"D3DFMT_NULL",
"D3DFMT_NV12",
"D3DFMT_YV12",
"D3DFMT_RAWZ",
])
D3DDISPLAYMODE = Struct("D3DDISPLAYMODE", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "RefreshRate"),
(D3DFORMAT, "Format"),
])
D3DCREATE = Flags(DWORD, [
"D3DCREATE_FPU_PRESERVE",
"D3DCREATE_MULTITHREADED",
"D3DCREATE_PUREDEVICE",
"D3DCREATE_SOFTWARE_VERTEXPROCESSING",
"D3DCREATE_HARDWARE_VERTEXPROCESSING",
"D3DCREATE_MIXED_VERTEXPROCESSING",
"D3DCREATE_DISABLE_DRIVER_MANAGEMENT",
"D3DCREATE_ADAPTERGROUP_DEVICE",
"D3DCREATE_DISABLE_DRIVER_MANAGEMENT_EX",
"D3DCREATE_NOWINDOWCHANGES",
"D3DCREATE_DISABLE_PSGP_THREADING",
"D3DCREATE_ENABLE_PRESENTSTATS",
"D3DCREATE_DISABLE_PRINTSCREEN",
"D3DCREATE_SCREENSAVER",
])
D3DDEVICE_CREATION_PARAMETERS = Struct("D3DDEVICE_CREATION_PARAMETERS", [
(UINT, "AdapterOrdinal"),
(D3DDEVTYPE, "DeviceType"),
(HWND, "hFocusWindow"),
(D3DCREATE, "BehaviorFlags"),
])
D3DSWAPEFFECT = Enum("D3DSWAPEFFECT", [
"D3DSWAPEFFECT_DISCARD",
"D3DSWAPEFFECT_FLIP",
"D3DSWAPEFFECT_COPY",
])
D3DPOOL = Enum("D3DPOOL", [
"D3DPOOL_DEFAULT",
"D3DPOOL_MANAGED",
"D3DPOOL_SYSTEMMEM",
"D3DPOOL_SCRATCH",
])
D3DPRESENT = FakeEnum(DWORD, [
"D3DPRESENT_RATE_DEFAULT",
])
D3DPRESENTFLAG = Flags(DWORD, [
"D3DPRESENTFLAG_LOCKABLE_BACKBUFFER",
"D3DPRESENTFLAG_DISCARD_DEPTHSTENCIL",
"D3DPRESENTFLAG_DEVICECLIP",
"D3DPRESENTFLAG_VIDEO",
"D3DPRESENTFLAG_NOAUTOROTATE",
"D3DPRESENTFLAG_UNPRUNEDMODE",
])
D3DPRESENT_INTERVAL = Flags(DWORD, [
"D3DPRESENT_INTERVAL_DEFAULT", # 0
"D3DPRESENT_INTERVAL_ONE",
"D3DPRESENT_INTERVAL_TWO",
"D3DPRESENT_INTERVAL_THREE",
"D3DPRESENT_INTERVAL_FOUR",
"D3DPRESENT_INTERVAL_IMMEDIATE",
])
D3DPRESENT_PARAMETERS = Struct("D3DPRESENT_PARAMETERS", [
(UINT, "BackBufferWidth"),
(UINT, "BackBufferHeight"),
(D3DFORMAT, "BackBufferFormat"),
(UINT, "BackBufferCount"),
(D3DMULTISAMPLE_TYPE, "MultiSampleType"),
(DWORD, "MultiSampleQuality"),
(D3DSWAPEFFECT, "SwapEffect"),
(HWND, "hDeviceWindow"),
(BOOL, "Windowed"),
(BOOL, "EnableAutoDepthStencil"),
(D3DFORMAT, "AutoDepthStencilFormat"),
(D3DPRESENTFLAG, "Flags"),
(UINT, "FullScreen_RefreshRateInHz"),
(D3DPRESENT_INTERVAL, "PresentationInterval"),
])
D3DGAMMARAMP = Struct("D3DGAMMARAMP", [
(Array(WORD, 256), "red"),
(Array(WORD, 256), "green"),
(Array(WORD, 256), "blue"),
])
D3DBACKBUFFER_TYPE = Enum("D3DBACKBUFFER_TYPE", [
"D3DBACKBUFFER_TYPE_MONO",
"D3DBACKBUFFER_TYPE_LEFT",
"D3DBACKBUFFER_TYPE_RIGHT",
])
D3DRESOURCETYPE = Enum("D3DRESOURCETYPE", [
"D3DRTYPE_SURFACE",
"D3DRTYPE_VOLUME",
"D3DRTYPE_TEXTURE",
"D3DRTYPE_VOLUMETEXTURE",
"D3DRTYPE_CUBETEXTURE",
"D3DRTYPE_VERTEXBUFFER",
"D3DRTYPE_INDEXBUFFER",
])
D3DUSAGE = Flags(DWORD, [
"D3DUSAGE_RENDERTARGET",
"D3DUSAGE_DEPTHSTENCIL",
"D3DUSAGE_WRITEONLY",
"D3DUSAGE_SOFTWAREPROCESSING",
"D3DUSAGE_DONOTCLIP",
"D3DUSAGE_POINTS",
"D3DUSAGE_RTPATCHES",
"D3DUSAGE_NPATCHES",
"D3DUSAGE_DYNAMIC",
"D3DUSAGE_AUTOGENMIPMAP",
"D3DUSAGE_RESTRICTED_CONTENT",
"D3DUSAGE_RESTRICT_SHARED_RESOURCE",
"D3DUSAGE_RESTRICT_SHARED_RESOURCE_DRIVER",
"D3DUSAGE_DMAP",
"D3DUSAGE_QUERY_LEGACYBUMPMAP",
"D3DUSAGE_QUERY_SRGBREAD",
"D3DUSAGE_QUERY_FILTER",
"D3DUSAGE_QUERY_SRGBWRITE",
"D3DUSAGE_QUERY_POSTPIXELSHADER_BLENDING",
"D3DUSAGE_QUERY_VERTEXTEXTURE",
"D3DUSAGE_QUERY_WRAPANDMIP",
"D3DUSAGE_NONSECURE",
"D3DUSAGE_TEXTAPI",
])
D3DCUBEMAP_FACES = Enum("D3DCUBEMAP_FACES", [
"D3DCUBEMAP_FACE_POSITIVE_X",
"D3DCUBEMAP_FACE_NEGATIVE_X",
"D3DCUBEMAP_FACE_POSITIVE_Y",
"D3DCUBEMAP_FACE_NEGATIVE_Y",
"D3DCUBEMAP_FACE_POSITIVE_Z",
"D3DCUBEMAP_FACE_NEGATIVE_Z",
])
D3DLOCK = Flags(DWORD, [
"D3DLOCK_READONLY",
"D3DLOCK_DISCARD",
"D3DLOCK_NOOVERWRITE",
"D3DLOCK_NOSYSLOCK",
"D3DLOCK_DONOTWAIT",
"D3DLOCK_NO_DIRTY_UPDATE",
])
D3DVERTEXBUFFER_DESC = Struct("D3DVERTEXBUFFER_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Size"),
(DWORD, "FVF"),
])
D3DINDEXBUFFER_DESC = Struct("D3DINDEXBUFFER_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Size"),
])
D3DSURFACE_DESC = Struct("D3DSURFACE_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(D3DMULTISAMPLE_TYPE, "MultiSampleType"),
(DWORD, "MultiSampleQuality"),
(UINT, "Width"),
(UINT, "Height"),
])
D3DVOLUME_DESC = Struct("D3DVOLUME_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
])
D3DLOCKED_RECT = Struct("D3DLOCKED_RECT", [
(INT, "Pitch"),
(LinearPointer(Void, "_MappedSize"), "pBits"),
])
D3DBOX = Struct("D3DBOX", [
(UINT, "Left"),
(UINT, "Top"),
(UINT, "Right"),
(UINT, "Bottom"),
(UINT, "Front"),
(UINT, "Back"),
])
D3DLOCKED_BOX = Struct("D3DLOCKED_BOX", [
(INT, "RowPitch"),
(INT, "SlicePitch"),
(LinearPointer(Void, "_MappedSize"), "pBits"),
])
D3DRANGE = Struct("D3DRANGE", [
(UINT, "Offset"),
(UINT, "Size"),
])
D3DRECTPATCH_INFO = Struct("D3DRECTPATCH_INFO", [
(UINT, "StartVertexOffsetWidth"),
(UINT, "StartVertexOffsetHeight"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Stride"),
(D3DBASISTYPE, "Basis"),
(D3DDEGREETYPE, "Degree"),
])
D3DTRIPATCH_INFO = Struct("D3DTRIPATCH_INFO", [
(UINT, "StartVertexOffset"),
(UINT, "NumVertices"),
(D3DBASISTYPE, "Basis"),
(D3DDEGREETYPE, "Degree"),
])
D3DADAPTER_IDENTIFIER9 = Struct("D3DADAPTER_IDENTIFIER9", [
(CString, "Driver"),
(CString, "Description"),
(CString, "DeviceName"),
(LARGE_INTEGER, "DriverVersion"),
(DWORD, "VendorId"),
(DWORD, "DeviceId"),
(DWORD, "SubSysId"),
(DWORD, "Revision"),
(GUID, "DeviceIdentifier"),
(DWORD, "WHQLLevel"),
])
D3DRASTER_STATUS = Struct("D3DRASTER_STATUS", [
(BOOL, "InVBlank"),
(UINT, "ScanLine"),
])
D3DQUERYTYPE = Enum("D3DQUERYTYPE", [
"D3DQUERYTYPE_VCACHE",
"D3DQUERYTYPE_RESOURCEMANAGER",
"D3DQUERYTYPE_VERTEXSTATS",
"D3DQUERYTYPE_EVENT",
"D3DQUERYTYPE_OCCLUSION",
"D3DQUERYTYPE_TIMESTAMP",
"D3DQUERYTYPE_TIMESTAMPDISJOINT",
"D3DQUERYTYPE_TIMESTAMPFREQ",
"D3DQUERYTYPE_PIPELINETIMINGS",
"D3DQUERYTYPE_INTERFACETIMINGS",
"D3DQUERYTYPE_VERTEXTIMINGS",
"D3DQUERYTYPE_PIXELTIMINGS",
"D3DQUERYTYPE_BANDWIDTHTIMINGS",
"D3DQUERYTYPE_CACHEUTILIZATION",
])
D3DISSUE = Flags(DWORD, [
"D3DISSUE_END",
"D3DISSUE_BEGIN",
])
D3DGETDATA = Flags(DWORD, [
"D3DGETDATA_FLUSH",
])
D3DRESOURCESTATS = Struct("D3DRESOURCESTATS", [
(BOOL, "bThrashing"),
(DWORD, "ApproxBytesDownloaded"),
(DWORD, "NumEvicts"),
(DWORD, "NumVidCreates"),
(DWORD, "LastPri"),
(DWORD, "NumUsed"),
(DWORD, "NumUsedInVidMem"),
(DWORD, "WorkingSet"),
(DWORD, "WorkingSetBytes"),
(DWORD, "TotalManaged"),
(DWORD, "TotalBytes"),
])
D3DDEVINFO_RESOURCEMANAGER = Struct("D3DDEVINFO_RESOURCEMANAGER", [
(Array(D3DRESOURCESTATS, "D3DRTYPECOUNT"), "stats"),
])
D3DDEVINFO_D3DVERTEXSTATS = Struct("D3DDEVINFO_D3DVERTEXSTATS", [
(DWORD, "NumRenderedTriangles"),
(DWORD, "NumExtraClippingTriangles"),
])
D3DDEVINFO_VCACHE = Struct("D3DDEVINFO_VCACHE", [
(DWORD, "Pattern"),
(DWORD, "OptMethod"),
(DWORD, "CacheSize"),
(DWORD, "MagicNumber"),
])
D3DDEVINFO_D3D9PIPELINETIMINGS = Struct("D3DDEVINFO_D3D9PIPELINETIMINGS", [
(FLOAT, "VertexProcessingTimePercent"),
(FLOAT, "PixelProcessingTimePercent"),
(FLOAT, "OtherGPUProcessingTimePercent"),
(FLOAT, "GPUIdleTimePercent"),
])
D3DDEVINFO_D3D9INTERFACETIMINGS = Struct("D3DDEVINFO_D3D9INTERFACETIMINGS", [
(FLOAT, "WaitingForGPUToUseApplicationResourceTimePercent"),
(FLOAT, "WaitingForGPUToAcceptMoreCommandsTimePercent"),
(FLOAT, "WaitingForGPUToStayWithinLatencyTimePercent"),
(FLOAT, "WaitingForGPUExclusiveResourceTimePercent"),
(FLOAT, "WaitingForGPUOtherTimePercent"),
])
D3DDEVINFO_D3D9STAGETIMINGS = Struct("D3DDEVINFO_D3D9STAGETIMINGS", [
(FLOAT, "MemoryProcessingPercent"),
(FLOAT, "ComputationProcessingPercent"),
])
D3DDEVINFO_D3D9BANDWIDTHTIMINGS = Struct("D3DDEVINFO_D3D9BANDWIDTHTIMINGS", [
(FLOAT, "MaxBandwidthUtilized"),
(FLOAT, "FrontEndUploadMemoryUtilizedPercent"),
(FLOAT, "VertexRateUtilizedPercent"),
(FLOAT, "TriangleSetupRateUtilizedPercent"),
(FLOAT, "FillRateUtilizedPercent"),
])
D3DDEVINFO_D3D9CACHEUTILIZATION = Struct("D3DDEVINFO_D3D9CACHEUTILIZATION", [
(FLOAT, "TextureCacheHitRate"),
(FLOAT, "PostTransformVertexCacheHitRate"),
])
D3DCOMPOSERECTSOP = Enum("D3DCOMPOSERECTSOP", [
"D3DCOMPOSERECTS_COPY",
"D3DCOMPOSERECTS_OR",
"D3DCOMPOSERECTS_AND",
"D3DCOMPOSERECTS_NEG",
])
D3DCOMPOSERECTDESC = Struct("D3DCOMPOSERECTDESC", [
(USHORT, "X"),
(USHORT, "Y"),
(USHORT, "Width"),
(USHORT, "Height"),
])
D3DCOMPOSERECTDESTINATION = Struct("D3DCOMPOSERECTDESTINATION", [
(USHORT, "SrcRectIndex"),
(USHORT, "Reserved"),
(Short, "X"),
(Short, "Y"),
])
D3DPRESENTSTATS = Struct("D3DPRESENTSTATS", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
])
D3DSCANLINEORDERING = Enum("D3DSCANLINEORDERING", [
"D3DSCANLINEORDERING_UNKNOWN",
"D3DSCANLINEORDERING_PROGRESSIVE",
"D3DSCANLINEORDERING_INTERLACED",
])
D3DDISPLAYMODEEX = Struct("D3DDISPLAYMODEEX", [
(UINT, "Size"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "RefreshRate"),
(D3DFORMAT, "Format"),
(D3DSCANLINEORDERING, "ScanLineOrdering"),
])
D3DDISPLAYMODEFILTER = Struct("D3DDISPLAYMODEFILTER", [
(UINT, "Size"),
(D3DFORMAT, "Format"),
(D3DSCANLINEORDERING, "ScanLineOrdering"),
])
D3DDISPLAYROTATION = Enum("D3DDISPLAYROTATION", [
"D3DDISPLAYROTATION_IDENTITY",
"D3DDISPLAYROTATION_90",
"D3DDISPLAYROTATION_180",
"D3DDISPLAYROTATION_270",
])
D3D9_RESOURCE_PRIORITY = FakeEnum(DWORD, [
"D3D9_RESOURCE_PRIORITY_MINIMUM",
"D3D9_RESOURCE_PRIORITY_LOW",
"D3D9_RESOURCE_PRIORITY_NORMAL",
"D3D9_RESOURCE_PRIORITY_HIGH",
"D3D9_RESOURCE_PRIORITY_MAXIMUM",
])
| PeterLValve/apitrace | specs/d3d9types.py | Python | mit | 30,033 |
"""
Implementation of model
"""
import numpy as np
import numpy.random as npr
from scipy import ndimage
from configuration import get_config
config = get_config()
class LatticeState(object):
""" Treat 1D list as 2D lattice and handle coupled system
This helps with simply passing this object to scipy's odeint
"""
def __init__(self, width, height, pacemakers=[]):
""" Initialize lattice
"""
self.width = width
self.height = height
self.pacemakers = pacemakers
self.discrete_laplacian = np.ones((3, 3)) * 1/2
self.discrete_laplacian[1, 1] = -4
self.state_matrix = np.zeros((width, height))
self.tau_matrix = np.ones((width, height)) * (-config.t_arp) # in ARP
def _update_state_matrix(self, camp, exci):
""" Compute state matrix value, with
quiescent/refractory cell -> 0
firing cell -> 1
"""
# this function gets executed once per timestep
for j in range(self.width):
for i in range(self.height):
if self.state_matrix[i, j] == 0: # not firing
self.handle_off_cell(i, j, camp, exci)
else: # firing
self.handle_on_cell(i, j)
def handle_on_cell(self, i, j):
""" Handle cell where state_matrix == 1
"""
self.tau_matrix[i, j] += config.dt
if self.tau_matrix[i, j] >= 0: # end of firing reached
self.state_matrix[i, j] = 0
self.tau_matrix[i, j] = -config.t_arp
def handle_off_cell(self, i, j, camp, exci):
""" Handle cell where state_matrix == 0
"""
tau = self.tau_matrix[i, j]
if tau >= 0: # in RRP
A = ((config.t_rrp + config.t_arp) \
* (config.c_max - config.c_min)) / config.t_rrp
t = (config.c_max - A * (tau / (tau + config.t_arp))) \
* (1 - exci[i, j])
# increase time up to t_rrp
if tau < config.t_rrp:
self.tau_matrix[i, j] += config.dt
# check threshold
if camp[i, j] > t:
self.fire_cell(i, j)
# handle pacemaker
if (i, j) in self.pacemakers and npr.random() < config.p:
self.fire_cell(i, j)
else: # in ARP
self.tau_matrix[i, j] += config.dt
def fire_cell(self, i, j):
""" Fire cell `i`x`j`
"""
self.state_matrix[i, j] = 1
self.tau_matrix[i, j] = -config.t_f
def get_size(self):
""" Return number of cells in underlying system
"""
return self.width * self.height
def _state_vec2camp_exci(self, state):
""" Convert ODE state vector to cAMP and excitability matrices
"""
flat_camp = state[:self.get_size()]
flat_exci = state[self.get_size():]
camp = np.reshape(flat_camp, (self.width, self.height))
exci = np.reshape(flat_exci, (self.width, self.height))
return camp, exci
def _camp_exci2state_vec(self, camp, exci):
""" Reverse of `_state_vec2camp_exci`
"""
flat_camp = np.reshape(camp, self.get_size())
flat_exci = np.reshape(exci, self.get_size())
return np.append(flat_camp, flat_exci)
def get_ode(self, state, t):
""" Return corresponding ODE
Structure:
[
camp00, camp01, .. ,camp0m, camp10, .., campnm
...
exci00, exci01, .. ,exci0m, exci10, .., excinm
]
"""
# parse ODE state
camp, exci = self._state_vec2camp_exci(state)
# compute next iteration
self._update_state_matrix(camp, exci)
next_camp = np.zeros((self.width, self.height))
next_exci = np.zeros((self.width, self.height))
laplacian_conv = ndimage.convolve(
camp, self.discrete_laplacian,
mode='constant', cval=0.0
)
for j in range(self.width):
for i in range(self.height):
next_camp[i, j] = -config.gamma * camp[i, j] \
+ config.r * self.state_matrix[i, j] \
+ config.D * laplacian_conv[i, j]
if exci[i, j] < config.e_max:
next_exci[i, j] = config.eta + config.beta * camp[i, j]
return self._camp_exci2state_vec(next_camp, next_exci)
def parse_result(self, orig_res):
""" Parse integration result
"""
t_range = len(orig_res)
res = orig_res.T
flat_camp = res[:self.get_size()].reshape(self.get_size() * t_range)
flat_exci = res[self.get_size():].reshape(self.get_size() * t_range)
camp = np.reshape(flat_camp, (self.width, self.height, t_range))
exci = np.reshape(flat_exci, (self.width, self.height, t_range))
return camp, exci
def __repr__(self):
""" Nice visual representation of lattice
"""
return '%dx%d' % (self.width, self.height)
| kpj/PyWave | model.py | Python | mit | 5,052 |
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_required, current_user
from ..models import User, Sensors, Sensor_data
from . import chart
from pytz import timezone
# import pygal
from pyecharts import Line
import os
from ..main.forms import SelectMultipleSensorForm
tzchina = timezone('Asia/Shanghai')
utc = timezone('UTC')
@chart.route('/v1.0')
@login_required
def chart1():
username = current_user.username
return redirect(url_for('main.sensors', username=username))
@chart.route('/v2.0', methods=['GET', 'POST'])
@login_required
def chart3():
if Sensors.query.filter_by(author_id=current_user.id).first():
sensors = Sensors.query.filter_by(author_id=current_user.id).order_by(Sensors.id.desc()).all()
form = SelectMultipleSensorForm(sensors, prefix="sensorform")
valid = 0
if form.validate_on_submit():
options = form.sensor.data
line = Line(width=800, height=400)
for sensor in options:
sensor_data = Sensor_data.query.filter_by(sensor_id=sensor).order_by(-Sensor_data.id.desc()).all()
timestamp = []
data = []
for i in sensor_data:
timestamp.append(i.timestamp.replace(tzinfo=utc).astimezone(tzchina).strftime('%Y/%m/%d-%H:%M:%S'))
data.append(i.value)
if len(data) is 0:
no_sensor = 0
return render_template('no_sensor_dat.html', no_sensor=no_sensor)
else:
s = Sensors.query.filter_by(id=sensor).first()
title = s.name
attr = timestamp
d = data
line.add(title, attr, d, is_smooth=False, is_datazoom_show=True, mark_line=["average"],
mark_point=["min", "max"])
valid = 1
return render_template('sensor_chart.html', form=form, chart=line.render_embed(), valid=valid)
else:
valid = 0
return render_template('sensor_chart.html', form=form, valid=valid)
else:
no_sensor = 1
return render_template('no_sensor_dat.html', no_sensor=no_sensor)
#if request.method == 'POST':
#options = request.form.getlist('myform')
#as_dict = request.form.to_dict()
#print(request)
#print(options)
| Harold2017/myfirstflasky | app/chart/views.py | Python | mit | 2,433 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# main.py file is part of spman
#
# spman - Slackware package manager
# Home page: https://github.com/MyRequiem/spman
#
# Copyright (c) 2018 Vladimir MyRequiem Astrakhan, Russia
# <mrvladislavovich@gmail.com>
# All rights reserved
# See LICENSE for details.
"""
main.py
"""
import sys
from .helpmess import show_help_mess
from .maindata import MainData
class Main:
"""
class Main
"""
def __init__(self):
# list of arguments
# delete argument[0] (path and name of the script)
self.args = sys.argv[1:]
self.meta = MainData()
self.repos = self.meta.get_repo_dict()
self.commands = {
'-h': self.show_help,
'--help': self.show_help,
'-v': self.check_version,
'--check-version': self.check_version,
'-l': self.show_repo_list,
'--repolist': self.show_repo_list,
'-r': self.show_info_repos,
'--repoinfo': self.show_info_repos,
'-b': self.show_blacklist,
'--blacklist': self.show_blacklist,
'-u': self.update,
'--update': self.update,
'-t': self.check_health,
'--health': self.check_health,
'-w': self.find_new_configs,
'--new-config': self.find_new_configs,
'-g': self.check_upgrade,
'--check-upgrade': self.check_upgrade,
'-d': self.download_pkg,
'--download': self.download_pkg,
'-m': self.upgrade_pkgs,
'--upgrade-pkgs': self.upgrade_pkgs,
'-e': self.remove_pkgs,
'--remove-pkgs': self.remove_pkgs,
'-q': self.processing_queue,
'--queue': self.processing_queue,
'-y': self.history,
'--history': self.history,
'-p': self.find_deps,
'--find-deps': self.find_deps,
'-s': self.view_slackbuild,
'--view-slackbuild': self.view_slackbuild,
'-f': self.find_pkg,
'--find-pkg': self.find_pkg,
'-k': self.checkdeps,
'--check-deps': self.checkdeps,
'-a': self.bad_links,
'--bad-links': self.bad_links,
'-i': self.pkglist,
'--pkglist': self.pkglist
}
def start(self) -> None:
"""
parse arguments and launch of the relevant options
"""
# program is run without arguments
if not self.args:
show_help_mess('error')
if self.args[0] in self.commands:
# check exists dirs and files from /etc/spman/spman.conf
args = ['-h',
'--help',
'-v',
'--check-version',
'-l',
'--repolist',
'-b',
'--blacklist',
'-u',
'--update',
'-t',
'--health',
'-w',
'--new-config',
'-m',
'--upgrade-pkgs',
'-e',
'--remove-pkgs',
'-k',
'--checkdeps',
'-a',
'--bad-links']
if self.args[0] not in args:
from .majortests import MajorTests
MajorTests().start()
# run command
self.commands[self.args[0]]()
else:
show_help_mess('error')
def show_help(self) -> None:
"""
show help message
"""
if len(self.args) == 1:
show_help_mess()
else:
show_help_mess('error')
def show_repo_list(self) -> None:
"""
show repo list from /etc/spman/repo-list
"""
if len(self.args) > 1:
show_help_mess('error')
from .repolist import show_repo_list
show_repo_list()
def update(self) -> None:
"""
Update PACKAGES.TXT, SLACKBUILDS.TXT and
ChangeLog.txt for each repository
"""
if len(self.args) > 1:
show_help_mess('error')
from .utils import check_internet_connection
if check_internet_connection():
from .update import Update
Update().start()
def show_info_repos(self) -> None:
"""
show information about all repositories.
"""
if len(self.args) > 1:
show_help_mess('error')
from .showinforepos import ShowInfoRepos
ShowInfoRepos().start()
def check_version(self) -> None:
"""
check program version
"""
if len(self.args) > 1:
show_help_mess('error')
from .utils import check_internet_connection
if check_internet_connection():
from .checkprgver import check_prg_ver
check_prg_ver()
def check_health(self) -> None:
"""
Check health installed packages
"""
if len(self.args) > 1:
show_help_mess('error')
from .checkhealth import CheckHealth
CheckHealth().start()
def find_new_configs(self) -> None:
"""
Find all '*.new' files from /etc/ and /usr/share/ folders and subfolders
"""
if len(self.args) > 1:
show_help_mess('error')
from .findnewconfigs import FindNewConfigs
FindNewConfigs().start()
def check_upgrade(self) -> None:
"""
Check packages for upgrade
"""
if len(self.args) > 1:
show_help_mess('error')
from .checkupgrade import CheckUpgrade
CheckUpgrade().start()
def show_blacklist(self) -> None:
"""
Show blacklist
"""
if len(self.args) > 1:
show_help_mess('error')
for pkg in self.meta.get_blacklist():
print('{0}{1}{2}'.format(self.meta.clrs['lred'],
pkg,
self.meta.clrs['reset']))
def download_pkg(self) -> None:
"""
Download package or source + SlackBuild script
"""
if len(self.args) < 4:
show_help_mess('error')
# Examples:
# spman --download --src sbo pkg1 pkg2 pkg3
# spman -d --pkg alienbob pkg1 pkg2 pkg3
mode = self.args[1]
repo = self.args[2]
pkglist = self.args[3:]
if repo not in self.repos:
show_help_mess(repo)
if mode not in ['--src', '--pkg']:
show_help_mess('error')
if mode == '--pkg' and repo == 'sbo':
print('Only SlackBuild script with source code\n'
'can be downloaded from \'sbo\' repository')
show_help_mess('error')
if mode == '--src' and repo == 'multilib':
print('Only binary packages can be\ndownloaded '
'from \'multilib\' repository')
show_help_mess('error')
from .utils import check_internet_connection
if check_internet_connection():
from .downloadpkg import DownloadPkg
DownloadPkg(mode, repo, pkglist).start()
def upgrade_pkgs(self) -> None:
"""
upgrade packages in the current directory
"""
num_args = len(self.args)
if num_args > 2:
show_help_mess('error')
from .upgradepkgs import Upgradepkgs
if num_args == 2:
if self.args[1] != '--only-new':
show_help_mess('error')
else:
Upgradepkgs(True).start()
else:
Upgradepkgs(False).start()
def remove_pkgs(self) -> None:
"""
remove packages in the current directory
"""
if len(self.args) > 1:
show_help_mess('error')
from .removepkgs import Removepkgs
Removepkgs().start()
def processing_queue(self) -> None:
"""
processing queue for 'sbo' repository
"""
repo = 'sbo'
if repo not in self.repos:
show_help_mess(repo)
if len(self.args) < 2:
show_help_mess('error')
from .queue import Queue
if len(self.args) == 2:
if self.args[1] == '--clear':
Queue().clear()
elif self.args[1] == '--show':
Queue().show()
elif self.args[1] == '--install':
from .utils import check_internet_connection
if check_internet_connection():
Queue().install()
else:
show_help_mess('error')
if len(self.args) > 2:
pkgs = self.args[2:]
if self.args[1] == '--add':
Queue().add(pkgs)
elif self.args[1] == '--remove':
Queue().remove(pkgs)
else:
show_help_mess('error')
def history(self) -> None:
"""
show/update package history
"""
num_args = len(self.args)
if num_args > 2:
show_help_mess('error')
from .history import History
if num_args == 2:
if self.args[1] != '--update':
show_help_mess('error')
else:
History(True).start()
else:
History(False).start()
def find_deps(self) -> None:
"""
show list all dependencies for package from 'sbo' repository
"""
repo = 'sbo'
if repo not in self.repos:
show_help_mess(repo)
if len(self.args) != 2:
show_help_mess('error')
from .finddeps import FindDeps
FindDeps().start(self.args[1])
def view_slackbuild(self) -> None:
"""
View README, slack-desc, doinst.sh and .SlackBuild
files from sbo repository.
"""
repo = 'sbo'
if repo not in self.repos:
show_help_mess(repo)
if len(self.args) != 2:
show_help_mess('error')
from .viewslackbuild import ViewSlackBuild
ViewSlackBuild(self.args[1]).start()
def find_pkg(self) -> None:
"""
Find package from each enabled repository and view info.
"""
num_args = len(self.args)
if num_args < 2 or num_args > 3:
show_help_mess('error')
strict = False
pkgname = self.args[1]
if len(self.args) == 3:
if self.args[1] != '--strict':
show_help_mess('error')
else:
pkgname = self.args[2]
strict = True
from .findpkg import FindPkg
FindPkg(strict, pkgname).start()
def checkdeps(self) -> None:
"""
Search dependency problems in the system packages
using 'sbbdep' or 'ldd' tool.
"""
if len(self.args) != 2 or self.args[1] not in ['--sbbdep', '--ldd']:
show_help_mess('error')
from .checkdeps import CheckDeps
CheckDeps(self.args[1]).start()
def bad_links(self) -> None:
"""
find links to non-existent files/directories
"""
if len(self.args) != 2:
show_help_mess('error')
from .badlinks import BadLinks
BadLinks(self.args[1]).start()
def pkglist(self) -> None:
"""
Show complete list of the packages in the repository
"""
arglen = len(self.args)
if arglen != 2 and arglen != 3:
show_help_mess('error')
repo = self.args[1]
if repo not in self.repos:
show_help_mess(repo)
only_installed = False
if arglen == 3:
if self.args[2] != '--only-installed':
show_help_mess('error')
only_installed = True
from .pkglist import PkgList
PkgList(repo, only_installed).start()
| MyRequiem/spman | src/main.py | Python | mit | 12,016 |
from django.conf import settings
def _get_backend():
return getattr(settings, 'SESSIONPROFILE_BACKEND', 'sessionprofile.backends.db')
| modelbrouwers/django-sessionprofile | sessionprofile/settings.py | Python | mit | 140 |
#!/usr/bin/env python
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:
README = fp.read()
with open(os.path.join(here, 'VERSION')) as version_file:
VERSION = version_file.read().strip()
excluded_packages = ["docs", "tests", "tests.*"]
if not os.environ.get('READTHEDOCS', False):
excluded_packages += ["faker.sphinx", "faker.sphinx.*"]
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
import pkgutil
import zipimport
zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
except AttributeError:
zip_safe = False
setup(
name='Faker',
version=VERSION,
description="Faker is a Python package that generates fake data for you.",
long_description=README,
entry_points={
'console_scripts': ['faker=faker.cli:execute_from_command_line'],
'pytest11': ['faker = faker.contrib.pytest.plugin'],
},
classifiers=[
# See https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
keywords='faker fixtures data test mock generator',
author='joke2k',
author_email='joke2k@gmail.com',
url='https://github.com/joke2k/faker',
license='MIT License',
packages=find_packages(exclude=excluded_packages),
platforms=["any"],
zip_safe=zip_safe,
python_requires=">=3.4",
install_requires=[
"python-dateutil>=2.4",
"text-unidecode==1.3",
],
)
| danhuss/faker | setup.py | Python | mit | 2,500 |
import os
import tarfile
from contextlib import closing
from archie import helpers
def find_backup(cfg):
files = []
rcfiles = cfg.options('rcfiles')
for rc in rcfiles:
backup = helpers.get_backupfile(cfg, rc)
rcfile = helpers.get_rcfile(cfg, rc)
if os.path.lexists(backup) and tarfile.is_tarfile(backup):
files.append((backup, rcfile))
return files
def gunzip_and_restore(cfg, backupfiles):
for backup, rc in backupfiles:
if os.path.islink(rc):
os.unlink(rc)
with closing(tarfile.open(backup, 'r:gz')) as tar:
tar.extractall('/')
return backupfiles
def Restore(cfg):
backupfiles = find_backup(cfg)
return gunzip_and_restore(cfg, backupfiles)
| fudanchii/archie | archie/handlers/restore.py | Python | mit | 753 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slice_visualization.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_slice_visualization_gui(object):
def setupUi(self, slice_visualization_gui):
slice_visualization_gui.setObjectName("slice_visualization_gui")
slice_visualization_gui.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(slice_visualization_gui)
self.centralwidget.setObjectName("centralwidget")
slice_visualization_gui.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(slice_visualization_gui)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
slice_visualization_gui.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(slice_visualization_gui)
self.statusbar.setObjectName("statusbar")
slice_visualization_gui.setStatusBar(self.statusbar)
self.retranslateUi(slice_visualization_gui)
QtCore.QMetaObject.connectSlotsByName(slice_visualization_gui)
def retranslateUi(self, slice_visualization_gui):
_translate = QtCore.QCoreApplication.translate
slice_visualization_gui.setWindowTitle(_translate("slice_visualization_gui", "MainWindow"))
| johanesmikhael/ContinuityAnalysis | slice_visualization_ui.py | Python | mit | 1,409 |
from csacompendium.research.models import ExperimentUnit
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import ExperimentUnitListFilter
from csacompendium.research.api.experimentunit.experimentunitserializers import experiment_unit_serializers
def experiment_unit_views():
"""
Experiment unit views
:return: All experiment unit views
:rtype: Object
"""
experiment_unit_serializer = experiment_unit_serializers()
class ExperimentUnitCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated]
class ExperimentUnitListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = ExperimentUnitListFilter
pagination_class = APILimitOffsetPagination
class ExperimentUnitDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'ExperimentUnitListAPIView': ExperimentUnitListAPIView,
'ExperimentUnitDetailAPIView': ExperimentUnitDetailAPIView,
'ExperimentUnitCreateAPIView': ExperimentUnitCreateAPIView
}
| nkoech/csacompendium | csacompendium/research/api/experimentunit/experimentunitviews.py | Python | mit | 2,054 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'menu.views.home', name='home'),
url(r'^hall/(?P<slug>[-\w]+)/$', 'menu.views.halldetail', name='halldetail'),
url(r'^admin/', include(admin.site.urls)),
)
| JOUR491-NewsApplications/JOUR491-FoodOnCampus | food/food/urls.py | Python | mit | 314 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
from datetime import timedelta, date
import glob
import os
import sys
import pyinotify
from evernote.edam.error.ttypes import EDAMUserException
from tomboy2evernote.tomboy2evernote import Evernote, convert_tomboy_to_evernote
__author__ = 'Denis Kovalev (aikikode)'
TOMBOY_DIR = os.path.join(os.environ['HOME'], ".local", "share", "tomboy")
CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.config', 't2ev')
if not os.path.isdir(CONFIG_DIR):
os.mkdir(CONFIG_DIR)
if CONFIG_DIR not in sys.path:
sys.path.append(CONFIG_DIR)
CONFIG_FILE = os.path.join(CONFIG_DIR, 'settings.py')
import logging
logger = logging.getLogger(__name__)
def get_token():
try:
from settings import DEV_TOKEN
except ImportError:
DEV_TOKEN = ''
with open(CONFIG_FILE, 'w') as config_file:
config_file.write("DEV_TOKEN = ''")
if not DEV_TOKEN:
logger.error(
'Please, get new Evernote development token from the site and put it into the\n'
'{} file. E.g.: DEV_TOKEN = "12345"'.format(CONFIG_FILE)
)
return DEV_TOKEN
def main():
parser = argparse.ArgumentParser(
description='Tomboy2Evernote notes converter. Upload Tomboy notes to your Evernote account')
parser.add_argument('-t', action='store', choices=['day', 'week', 'month', 'all'], default='day',
help='Upload only notes modified during this period. Default: day', required=False)
parser.add_argument('-d', '--daemon', action='store_true', help='Run as daemon', required=False)
args = parser.parse_args()
try:
evernote = Evernote(token=get_token())
except EDAMUserException as ex:
sys.exit(ex.errorCode)
if args.daemon:
run_as_daemon(evernote)
else:
convert_all_tomboy_notes(evernote, args.t)
def convert_all_tomboy_notes(evernote, modified_time=None):
delta = timedelta.max
if modified_time == 'day':
delta = timedelta(days=1)
elif modified_time == 'week':
delta = timedelta(weeks=1)
elif modified_time == 'month':
delta = timedelta(weeks=4)
today = date.today()
notes_files = list(filter(lambda f: delta > today - date.fromtimestamp(os.path.getmtime(f)),
glob.glob(os.path.join(TOMBOY_DIR, "*.note"))))
total_notes = len(notes_files)
failed_notes = []
notes_hash = dict()
for idx, tomboy_note in enumerate(notes_files):
print('[{}/{}]:'.format(idx + 1, total_notes), end=' ')
ev_note = convert_tomboy_to_evernote(tomboy_note)
if ev_note:
print('Converted \'{}\'. Uploading...'.format(ev_note['title']), end=' ')
try:
evernote.create_or_update_note(ev_note)
except:
failed_notes.append(ev_note['title'])
print('FAILED')
else:
print('OK')
notes_hash[tomboy_note] = ev_note['title']
else:
print('Skipped template note')
if failed_notes:
print('The following notes failed to upload:')
for idx, note_title in enumerate(failed_notes):
print('[{}]: \'{}\''.format(idx + 1, note_title))
return notes_hash
def run_as_daemon(evernote_client):
# First we need to get all current notes and their titles to correctly handle note deletion
notes = convert_all_tomboy_notes(evernote_client)
# Configure daemon
wm = pyinotify.WatchManager()
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_MODIFY | \
pyinotify.IN_MOVED_TO | pyinotify.IN_MOVED_FROM
class EventHandler(pyinotify.ProcessEvent):
def my_init(self, evernote, notes_hash):
self.evernote = evernote
self.notes_hash = notes_hash
def process_IN_CREATE(self, event):
self.process_IN_MOVED_TO(event)
def process_IN_DELETE(self, event):
self.process_IN_MOVED_FROM(event)
def process_IN_MODIFY(self, event):
self.process_IN_MOVED_TO(event)
def process_IN_MOVED_TO(self, event):
# New note / Modify note
tomboy_note = event.pathname
if os.path.isfile(tomboy_note) and os.path.splitext(tomboy_note)[1] == '.note':
ev_note = convert_tomboy_to_evernote(tomboy_note)
if ev_note:
try:
self.evernote.create_or_update_note(ev_note)
self.notes_hash[tomboy_note] = ev_note['title']
logger.info('Updated \'{}\''.format(ev_note['title']))
except:
logger.error('ERROR: Failed to upload \'{}\' note'.format(ev_note['title']))
def process_IN_MOVED_FROM(self, event):
# Delete note
tomboy_note = event.pathname
note_title = self.notes_hash.get(tomboy_note)
if note_title:
try:
self.evernote.remove_note(note_title)
logger.info('Deleted \'{}\''.format(note_title))
self.notes_hash.pop(tomboy_note, None)
except:
logger.error('ERROR: Failed to delete "{}" note'.format(note_title))
handler = EventHandler(evernote=evernote_client, notes_hash=notes)
notifier = pyinotify.Notifier(wm, handler)
wm.add_watch(TOMBOY_DIR, mask, rec=False)
try:
notifier.loop(daemonize=True, pid_file='/tmp/t2ev.pid', stdout='/tmp/t2ev.log')
except pyinotify.NotifierError as ex:
logger.exception('ERROR: notifier exception: {}'.format(ex))
if __name__ == "__main__":
()
| aikikode/tomboy2evernote | tomboy2evernote/command_line.py | Python | mit | 5,742 |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2018 Shi Chi(Mack Stone)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from .func_exponential import *
if sys.version_info > (3, 0):
long = int
def length(x):
"""Returns the length of x, i.e., sqrt(x * x).
:param x: Floating-point vector types.
.. seealso::
`GLSL length man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/length.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
# TODO: implement vec2 type
# if isinstance(x, Vec2):
# sqr = x.x * x.x + x.y * x.y
# return math.sqrt(sqr)
if isinstance(x, Vec3):
sqr = x.x * x.x + x.y * x.y + x.z * x.z
return math.sqrt(sqr)
elif isinstance(x, Vec4):
sqr = x.x * x.x + x.y * x.y + x.z * x.z + x.w * x.w
return math.sqrt(sqr)
elif isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
return abs(x)
else:
raise TypeError('unsupport type %s' % type(x))
def dot(x, y):
"""Returns the dot product of x and y, i.e., result = x * y.
:param x: Floating-point vector types.
.. seealso::
`GLSL dot man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/dot.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
# TODO: implement vec2
# if isinstance(x, Vec2) and isinstance(y, Vec2):
# tmp = Vec2(x * y)
# return tmp.x + tmp.y
if isinstance(x, Vec3) and isinstance(y, Vec3):
tmp = Vec3(x * y)
return tmp.x + tmp.y + tmp.z
elif isinstance(x, Vec4) and isinstance(y, Vec4):
tmp = Vec4(x * y)
return (tmp.x + tmp.y) + (tmp.z + tmp.w)
elif isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
return x * y
else:
raise TypeError('unsupport type %s' % type(x))
def normalize(x):
"""Returns a vector in the same direction as x but with length of 1.
.. seealso::
`GLSL normalize man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/normalize.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
if isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
return -1.0 if x < 0.0 else 1.0
#elif isinstance(x, Vec2):
#sqr = x.x * x.x + x.y * x.y
#return x * inversesqrt(sqr)
elif isinstance(x, Vec3):
sqr = x.x * x.x + x.y * x.y + x.z * x.z
return x * inversesqrt(sqr)
elif isinstance(x, Vec4):
sqr = x.x * x.x + x.y * x.y + x.z * x.z + x.w * x.w
return x * inversesqrt(sqr)
| mackst/glm | glm/detail/func_geometric.py | Python | mit | 3,822 |
__author__ = 'jmoran'
from Asteroids import Object
class MovingObject(Object):
def __init__(self, window, game, init_point, slope):
Object.__init__(self, window, game)
self.point = init_point
self.slope = slope
| waddedMeat/asteroids-ish | Asteroids/MovingObject.py | Python | mit | 242 |
# -*- coding: utf-8 -*-
import config
from metodos import *
from mensagens import myid
def my_id(msg):
chat_id = msg['chat']['id']
try: user = '@' + msg['from']['username']
except:user = " "
if msg['text'] == '/id':
if msg['chat']['type'] == 'private':
sendMessage(chat_id, myid['private'].decode('utf8').format(msg['from']['first_name'].encode('utf-8'),msg['from']['id'],user))
if msg['chat']['type'] == 'supergroup' or msg['chat']['type'] == 'group':
sendMessage(chat_id, myid['private'].decode('utf8').format(msg['from']['first_name'],msg['from']['id'],user))
| francis-taylor/Timotty-Master | cybot/plug/myid.py | Python | mit | 625 |
import sys
from pymongo import MongoClient
# Connecting to the mongo client
client = MongoClient('localhost',27017)
# Connecting to the database
db = client['rescueHomeless']
# Connecting to the required collection
collection = db['userDB']
userEmail = sys.argv[1]
result = collection.find({'email':userEmail})
pIDs = result['personIDs']
if len(pIDs)==0:
exit(1)
print(pIDs.pop())
exit(0) | Four-Stooges/Server | public/resources/scripts/latestupload.py | Python | mit | 392 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
yield self.call(u'com.arguments.ping')
print("Pinged!")
res = yield self.call(u'com.arguments.add2', 2, 3)
print("Add2: {}".format(res))
starred = yield self.call(u'com.arguments.stars')
print("Starred 1: {}".format(starred))
starred = yield self.call(u'com.arguments.stars', nick=u'Homer')
print("Starred 2: {}".format(starred))
starred = yield self.call(u'com.arguments.stars', stars=5)
print("Starred 3: {}".format(starred))
starred = yield self.call(u'com.arguments.stars', nick=u'Homer', stars=5)
print("Starred 4: {}".format(starred))
orders = yield self.call(u'com.arguments.orders', u'coffee')
print("Orders 1: {}".format(orders))
orders = yield self.call(u'com.arguments.orders', u'coffee', limit=10)
print("Orders 2: {}".format(orders))
arglengths = yield self.call(u'com.arguments.arglen')
print("Arglen 1: {}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', 1, 2, 3)
print("Arglen 2: {}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', a=1, b=2, c=3)
print("Arglen 3: {}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', 1, 2, 3, a=1, b=2, c=3)
print("Arglen 4: {}".format(arglengths))
self.leave()
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
| RyanHope/AutobahnPython | examples/twisted/wamp/rpc/arguments/frontend.py | Python | mit | 3,404 |
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="histogram.marker.pattern", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram/marker/pattern/_bgcolorsrc.py | Python | mit | 433 |
from trie import Trie
#constructs a tree of this shape ('*' means root)
#
# *
# /|\
# a i o
# / /|\
# n f n u
# | |\
# e r t
#
#The LOUDS bit-string is then
#[1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0]
words = ['an', 'i', 'of', 'one', 'our', 'out']
test_set = {
'out': 11, #'t' in 'out' is at 11th node in the tree
'our': 10, #'r' in 'our' is at 10th node in the tree
'of': 6,
'i': 3,
'an': 5,
'one': 9,
'ant': None #'ant' is not in the tree
}
trie = Trie(words)
for query, answer in test_set.items():
result = trie.search(query)
print("query: {!s:>5} result: {!s:>5} answer: {!s:>5}".format(
query, result, answer))
##parent [0, 0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 6, 7, 7, 8, 8, 8, 9, 0, 1]
##bit_array [1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0]
##child [1, -, 2, 3, 4, -, 5, -, -, 6, 7, 8, -, -, -, 9, -, 0, 1, -, -, -, -]
##index [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
#labels ['', '', 'a', 'i', 'o', 'n', 'f', 'n', 'u', 'e', 'r', 't']
##index [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1]
| IshitaTakeshi/Louds-Trie | python/test.py | Python | mit | 1,193 |
from collections import OrderedDict
def main():
net_prices = OrderedDict()
for _ in range(int(input())):
item, price = input().rsplit(maxsplit=1)
net_prices[item] = net_prices.get(item, 0) + int(price)
for item in net_prices:
print(item, net_prices[item])
if __name__ == '__main__':
main()
| FireClaw/HackerRank | Python/py-collections-ordereddict.py | Python | mit | 342 |
import re
from blaze import resource, DataFrame
import pandas as pd
from snakemakelib.odo.pandas import annotate_by_uri
@resource.register('.+fastq.summary')
@annotate_by_uri
def resource_fastqc_summary(uri, **kwargs):
with open(uri):
data = pd.read_csv(uri, sep=",", index_col=["fileName"])
return DataFrame(data)
| Oliver-Lab/snakemakelib-oliver | snakemakelib_oliver/odo/geo.py | Python | mit | 333 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from botocore.vendored.requests.exceptions import ReadTimeout
from py_swf.errors import NoTaskFound
__all__ = ['DecisionClient', 'DecisionTask']
DecisionTask = namedtuple('DecisionTask', 'events task_token workflow_id workflow_run_id workflow_type')
"""Contains the metadata to execute a decision task.
See the response syntax in :meth:`~SWF.Client.poll_for_decision_task`.
"""
def nametuplefy(thing):
"""Recursively turns a dict into namedtuples."""
if type(thing) == dict:
# Only supports string keys
Dict = namedtuple('Dict', ' '.join(thing.keys()))
nametuplefied_children = {}
for k, v in thing.items():
nametuplefied_children[k] = nametuplefy(v)
return Dict(**nametuplefied_children)
if type(thing) == list:
return list(map(nametuplefy, thing))
else:
return thing
class DecisionClient(object):
"""A client that provides a pythonic API for polling and responding to decision tasks through an SWF boto3 client.
:param decision_config: Contains SWF values commonly used when making SWF api calls.
:type decision_config: :class:`~py_swf.config_definitions.DecisionConfig`
:param boto_client: A raw SWF boto3 client.
:type boto_client: :class:`~SWF.Client`
"""
def __init__(self, decision_config, boto_client):
self.decision_config = decision_config
self.boto_client = boto_client
def poll(self, identity=None, use_raw_event_history=False):
"""Opens a connection to AWS and long-polls for decision tasks.
When a decision is available, this function will return with exactly one decision task to execute.
Only returns a contiguous subset of the most recent events.
If you want to grab the entire history for a workflow, use :meth:`~py_swf.decision.DecisionClient.walk_execution_history`
Passthrough to :meth:`~SWF.Client.poll_for_decision_task`.
:param identity: A freeform text that identifies the client that performed the longpoll. Useful for debugging history.
:type identity: string
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:return: A decision task to execute.
:rtype: DecisionTask
:raises py_swf.errors.NoTaskFound: Raised when polling for a decision task times out without receiving any tasks.
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=True,
taskList={
'name': self.decision_config.task_list,
},
)
# boto doesn't like None values for optional kwargs
if identity is not None:
kwargs['identity'] = identity
try:
results = self.boto_client.poll_for_decision_task(
**kwargs
)
except ReadTimeout as e:
raise NoTaskFound(e)
# Sometimes SWF gives us an incomplete response, ignore these.
if not results.get('taskToken', None):
raise NoTaskFound('Received results with no taskToken')
events = results['events']
if not use_raw_event_history:
events = nametuplefy(events)
return DecisionTask(
events=events,
task_token=results['taskToken'],
workflow_id=results['workflowExecution']['workflowId'],
workflow_run_id=results['workflowExecution']['runId'],
workflow_type=results['workflowType'],
)
def walk_execution_history(
self,
workflow_id,
workflow_run_id,
reverse_order=True,
use_raw_event_history=False,
maximum_page_size=1000,
):
"""Lazily walks through the entire workflow history for a given workflow_id. This will make successive calls
to SWF on demand when pagination is needed.
See :meth:`~SWF.Client.get_workflow_execution_history` for more information.
:param workflow_id: The workflow_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param workflow_run_id: The workflow_run_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param reverse_order: Passthru for reverseOrder to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: bool
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:param maximum_page_size: Passthru for maximumPageSize to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: int
:return: A generator that returns successive elements in the workflow execution history.
:rtype: collections.Iterable
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=reverse_order,
execution=dict(
workflowId=workflow_id,
runId=workflow_run_id,
),
maximumPageSize=maximum_page_size,
)
while True:
results = self.boto_client.get_workflow_execution_history(
**kwargs
)
next_page_token = results.get('nextPageToken', None)
events = results['events']
for event in events:
if not use_raw_event_history:
event = nametuplefy(event)
yield event
if next_page_token is None:
break
kwargs['nextPageToken'] = next_page_token
def finish_decision_with_activity(self, task_token, activity_id, activity_name, activity_version, activity_input):
"""Responds to a given decision task's task_token to schedule an activity task to run.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param activity_id: A unique identifier for the activity task.
:type identity: string
:param activity_name: Which activity name to execute.
:type identity: string
:param activity_name: Version of the activity name.
:type identity: string
:param activity_input: Freeform text of the input for the activity
:type identity: string
:return: None
:rtype: NoneType
"""
activity_task = build_activity_task(
activity_id,
activity_name,
activity_version,
activity_input,
self.decision_config,
)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[activity_task],
)
def finish_workflow(self, task_token, result):
"""Responds to a given decision task's task_token to finish and terminate the workflow.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param result: Freeform text that represents the final result of the workflow.
:type identity: string
:return: None
:rtype: NoneType
"""
workflow_complete = build_workflow_complete(result)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[workflow_complete],
)
def build_workflow_complete(result):
return {
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': result,
},
}
def build_activity_task(activity_id, activity_name, activity_version, input, decision_config):
return {
'decisionType': 'ScheduleActivityTask',
'scheduleActivityTaskDecisionAttributes': {
'activityType': {
'name': activity_name,
'version': activity_version,
},
'activityId': activity_id,
'input': input,
'taskList': {
'name': decision_config.task_list,
},
'scheduleToCloseTimeout': str(decision_config.schedule_to_close_timeout),
'scheduleToStartTimeout': str(decision_config.schedule_to_start_timeout),
'startToCloseTimeout': str(decision_config.start_to_close_timeout),
'heartbeatTimeout': str(decision_config.heartbeat_timeout),
},
}
| quantsini/pyswf | py_swf/clients/decision.py | Python | mit | 9,074 |
from csp import *
from collections import Counter
class WordSquare:
"""
Find a word square of the given size from the given dictionary.
"""
alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
def __init__(self, wordsfile):
"""
Constructor.
Arguments:
wordsfile -- the path to a text file of valid words for this
word square, with one word on a line
"""
self.words = (str.upper(w.rstrip()) for w in open(wordsfile) if str.isalpha(w.rstrip()))
def csp(self, size, diag=False):
return WordSquareCSP(self, size, diag)
class WordSquareCSP(ConstraintSatisfactionProblem):
def __init__(self, wordsquare, size, diag):
"""
Constructor.
Arguments:
wordsquare -- the word square associated with the CSP
size -- the length of the words in the square
diag -- True if the CSP has a diagonal constraint, otherwise
False
"""
ConstraintSatisfactionProblem.__init__(self)
self.is_disjoint_constraints = True
self.size = size
self.letters_count = Counter()
# create a map: indexOf -> map(letter -> wordlist)
lettermap = [{letter: set() for letter in wordsquare.alphabet} for i in range(size)]
# filter out words whose length != size
for word in (word for word in wordsquare.words if len(word) == size):
for index in range(len(word)):
lettermap[index][word[index]].add(word)
self.letters_count.update(word[index])
# create a variable for each (row,col) pair in the word square
self.variables = {(i, j): WordSquareVariable(self, (i, j)) for i in range(size) for j in range(size)}
# create a constraint for each row and for each col (and the diagonal if requested)
self.constraints = set()
WordSquareConstraint.lettermap = lettermap
for i in range(size):
self.constraints.add(WordSquareConstraint({self.variables[(i, col)] for col in range(size)}))
self.constraints.add(WordSquareConstraint({self.variables[(row, i)] for row in range(size)}))
if diag:
self.constraints.add(WordSquareConstraint({self.variables[(i, i)] for i in range(size)}))
def __str__(self):
L = list(' ' * (self.size * self.size))
for var_name in self.variables:
if self.variables[var_name].value:
L[var_name[0] * self.size + var_name[1]] = self.variables[var_name].value
M = list()
for i in range(0, len(L), self.size):
M.extend(L[i:i + self.size] + ["\n"])
return ''.join(M)
class WordSquareVariable(BaseVariable):
"""
A variable in the word square CSP.
Public instance variables:
csp -- a reference to the CSP wrapping this variable
name -- a (row, column) tuple identifying this variable referencing
the letter at (row, column) in the word square
domain -- this variable's domain of legal values at this stage in
the problem
value -- the letter assigned to this variable, or None
constraints -- a set of constraints covering this variable
"""
def __init__(self, csp, name):
BaseVariable.__init__(self, csp, name)
self.domain = WordSquare.alphabet[:]
def ordered_domain(self):
"""
Returns:
This variable's domain as a list of values, sorted by most common
to least common.
"""
return sorted(self.domain, key=lambda c: self.csp.letters_count[c], reverse=True)
def find_constraint(self, other_var):
"""
Find a constraint that covers two given variables.
Arguments:
other_var -- the variable we're looking for a shared constraint
with
Returns:
The constraint that covers both `self` and `other_var`. The nature
of the word square implies that at most one constraint covers
any two given variables.
"""
for constraint in self.constraints:
if constraint in other_var.constraints:
return constraint
return None
class WordSquareConstraint(BaseConstraint):
"""
A constraint in the word square CSP. The constraint is of the form
[V_0 = d_0, V_1 = d_1, ..., V_n = d_n] and is satisfied if there's a
word W such that W[0] = d_0, W[1] = d_1, ..., W[n] = d_n.
Public instance variables:
variables -- a list of variables this constraint covers, in order from
top to bottom or left to right
Public class variables:
lettermap -- a map: string index i -> map: letter -> words whose
i-th character is letter
Unpublished instance variables:
indices -- a map: variable v -> index i such that `self.variables[i] is v`
"""
def __init__(self, variables):
"""
Constructor.
Arguments:
variables -- a set of variables this constraint covers
"""
BaseConstraint.__init__(self, sorted(iter(variables), key=WordSquareVariable.get_name))
self.indices = {self.variables[i].name: i for i in range(len(self.variables))}
def is_satisfiable(self, variable, assignment):
"""
Is the constraint, including variables already assigned values,
satisfiable with the given assignment `variable.value = assignment`?
Arguments:
variable -- the variable we're assigning to
assignment -- the value we're assigning to the variable
Returns:
A list of words W such that for all indices i in self.variables,
W[i] is in self.variables[i].domain AND `W[i] = assignment` if
`self.variables[i] is variable`.
"""
words = self.lettermap[self.indices[variable.name]][assignment]
for other_var in self.variables:
if other_var is not variable:
words = [w for w in words if w[self.indices[other_var.name]] in other_var.domain]
return words
def __repr__(self):
return "[Constraint] %s" % [var.name for var in self.variables]
if __name__ == '__main__':
wordsquare = WordSquare('resources/words.txt')
puzzle = wordsquare.csp(5, True)
print(puzzle.solve())
| jharris119/ConstraintSatisfactionProblem | examples/wordsquare.py | Python | mit | 6,259 |
"""simpleplantms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
#home page
url(r'^$', 'home.views.index'),
#this will send requests to the sites app example /simpleplantms/sites/
url(r'^sites/', include('sites.urls')),
#this will send requests to the departments app example /simpleplantms/departments/
url(r'^departments/', include('departments.urls')),
#this will send requests to the departments app example /simpleplantms/equipment/
url(r'^equipment/', include('equipment.urls')),
#this will send requests to the auth app example /simpleplantms/accounts/
url(r'^accounts/', include('accounts.urls')),
#This will send requests for profile info to the profiles app /simpleplantms/profiles/
url(r'^profiles/', include('profiles.urls')),
#This will route requests about maintenance tasks to the app eg /simpleplantms/maintjobs/
url(r'^maintjobs/', include('mainttask.urls')),
#This will route requests about maintenance tasks to the app eg /simpleplantms/maintenance/
url(r'^maintenance/', include('maintenance.urls')),
url(r'^jet/', include('jet.urls','jet')),
url(r'^jet/dashboard/', include('jet.dashboard.urls','jet-dashboard')),
url(r'^admin/', include(admin.site.urls)),
]
| dylan-reeves/Simple-Plant-MS | simpleplantms/urls.py | Python | mit | 1,905 |
#!/usr/bin/env python
import subprocess, random, sys, os
def randomrange():
try:
value1 = int(sys.argv[1])
value2 = int(sys.argv[2])
randvalue = random.randint(value1, value2)
return randvalue
except IndexError:
randvalue = random.randint(0,10000000)
return randvalue
def main():
value = randomrange()
print(value)
main()
| defunSM/code | pyth/randomword.py | Python | mit | 348 |
"""
The MIT License (MIT)
Copyright (c) 2014 Chris Wimbrow
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
| cwimbrow/veganeyes-api | app/api_1_0/errors.py | Python | mit | 1,089 |
import json
from mockito import *
import os
import shutil
import tempfile
import unittest
from ice.history import ManagedROMArchive
class ManagedROMArchiveTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.temppath = os.path.join(self.tempdir, "tempfile")
self.mock_user = mock()
self.mock_user.user_id = 1234
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_previous_managed_ids_returns_none_for_missing_file(self):
missing_path = os.path.join("some", "stupid", "path")
self.assertFalse(os.path.exists(missing_path))
archive = ManagedROMArchive(missing_path)
self.assertIsNone(archive.previous_managed_ids(self.mock_user))
def test_previous_managed_ids_raises_exception_for_malformed_json(self):
with open(self.temppath, "w+") as f:
f.write("notrealjson")
with self.assertRaises(ValueError):
archive = ManagedROMArchive(self.temppath)
def test_previous_managed_ids_returns_empty_list_for_missing_user(self):
data = {
"1337": []
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), [])
def test_previous_managed_ids_returns_list_from_json(self):
data = {
"1234": [
"1234567890",
"0987654321",
]
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), ["1234567890","0987654321"])
def test_set_managed_ids_creates_new_file_if_needed(self):
self.assertFalse(os.path.exists(self.temppath))
archive = ManagedROMArchive(self.temppath)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertTrue(os.path.exists(self.temppath))
def test_previous_managed_ids_returns_new_value_after_set_managed_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertEqual(archive.previous_managed_ids(self.mock_user), new_ids)
def test_creating_new_archive_after_set_managed_ids_uses_new_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
new_archive = ManagedROMArchive(self.temppath)
self.assertEqual(new_archive.previous_managed_ids(self.mock_user), new_ids)
| scottrice/Ice | tests/managed_rom_archive_tests.py | Python | mit | 2,688 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sufwebapp1.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sufhani/suf-webapp | manage.py | Python | mit | 253 |
# -*- coding: utf-8 -*-
import os
import codecs
import random
# this is the path of the folder that will contain the tweet files
tweets_folder = os.path.join("D:", os.sep, "Documents", "PycharmProjects",
"easy_group_classifier", "text_files")
# checks if previous path exists, if not, it creates it
if not os.path.isdir(tweets_folder):
os.makedirs(tweets_folder)
# the name of the file with clean tweets to scramble
filename = "technology"
tweets_file = os.path.join(tweets_folder, "%s.txt" % filename)
shuffled_file = os.path.join(tweets_folder, "%s_shuffled.txt" % filename)
tweet_list = []
with codecs.open(tweets_file, "rb", encoding="utf-8") as f:
for line in f:
tweet = line.strip()
tweet_list.append(tweet)
random.shuffle(tweet_list)
with codecs.open(shuffled_file, "wb", encoding="utf-8") as f:
for tweet in tweet_list:
f.write("%s\n" % tweet)
| jluukvg/text-classifier | python/randomize_tweet_order.py | Python | mit | 957 |
from oplogreplayer import OplogReplayer | uberVU/mongo-oplogreplay | oplogreplay/__init__.py | Python | mit | 39 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PySCUBA/src/PySCUBA/PySCUBA_design.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com; ggiecold@jimmy.harvard.edu
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text,
disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.setupMainWindow(MainWindow)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
MainWindow.setCentralWidget(self.centralwidget)
self.setupMenuBar(MainWindow)
self.setupStatusBar(MainWindow)
self.mainVerticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.mainVerticalLayout.setObjectName(_fromUtf8("mainVerticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setHorizontalSpacing(3)
self.gridLayout.setVerticalSpacing(2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.hline1 = QtGui.QFrame(self.centralwidget)
self.hline1.setFrameShadow(QtGui.QFrame.Raised)
self.hline1.setLineWidth(7)
self.hline1.setFrameShape(QtGui.QFrame.HLine)
self.hline1.setObjectName(_fromUtf8("hline1"))
self.hline1.raise_()
self.cancelOkLayout = QtGui.QHBoxLayout()
self.cancelOkLayout.setObjectName(_fromUtf8("cancelOkLayout"))
self.hline2 = QtGui.QFrame(self.centralwidget)
self.hline2.setFrameShadow(QtGui.QFrame.Raised)
self.hline2.setLineWidth(7)
self.hline2.setFrameShape(QtGui.QFrame.HLine)
self.hline2.setObjectName(_fromUtf8("hline2"))
self.hline2.raise_()
self.graphicsVerticalLayout = QtGui.QVBoxLayout()
self.graphicsVerticalLayout.setObjectName(
_fromUtf8("graphicsVerticalLayout"))
self.mainVerticalLayout.addLayout(self.gridLayout)
self.mainVerticalLayout.addWidget(self.hline1)
self.mainVerticalLayout.addLayout(self.cancelOkLayout)
self.mainVerticalLayout.addWidget(self.hline2)
self.mainVerticalLayout.addLayout(self.graphicsVerticalLayout)
self.adornGridLayout(MainWindow)
self.adornCancelOkLayout(MainWindow)
self.adornGraphicsVerticalLayout(MainWindow)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def setupMainWindow(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.setGeometry(150, 100, 564, 635)
MainWindow.setMouseTracking(False)
MainWindow.setFocusPolicy(QtCore.Qt.ClickFocus)
MainWindow.setAutoFillBackground(False)
MainWindow.setTabShape(QtGui.QTabWidget.Rounded)
def setupMenuBar(self, MainWindow):
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionExit.setShortcut('Ctrl+Q')
self.actionExit.setStatusTip('Exit application')
self.actionExit.triggered.connect(QtGui.qApp.quit)
self.actionCredits = QtGui.QAction(MainWindow)
self.actionCredits.setObjectName(_fromUtf8("actionCredits"))
self.actionCredits.setShortcut('Ctrl+I')
self.actionCredits.setStatusTip('Display credits')
self.actionCredits.triggered.connect(self.showCredits)
self.actionHelp = QtGui.QAction(MainWindow)
self.actionHelp.setObjectName(_fromUtf8("actionHelp"))
self.actionHelp.setShortcut('Ctrl+H')
self.actionHelp.setStatusTip('Help and documentation')
self.actionHelp.triggered.connect(self.showDocumentation)
self.menubar = QtGui.QMenuBar(MainWindow)
self.fileMenu = self.menubar.addMenu('&File')
self.fileMenu.addAction(self.actionExit)
self.fileMenu = self.menubar.addMenu('&Credits')
self.fileMenu.addAction(self.actionCredits)
self.fileMenu = self.menubar.addMenu('&Help')
self.fileMenu.addAction(self.actionHelp)
self.menubar.setGeometry(QtCore.QRect(0, 0, 542, 23))
self.menubar.setDefaultUp(False)
self.menubar.setNativeMenuBar(False)
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
def setupStatusBar(self, MainWindow):
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.showMessage(
"Ready - Please select a dataset to process")
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
def showCredits(self, MainWindow):
QtGui.QMessageBox.information(self, "Credits",
"Author: Gregory Giecold\nAffiliation: Harvard "
"University & DFCI\nContact: ggiecold@jimmy.harvard.edu\n"
"GitHub: https://github.com/GGiecold\n")
def showDocumentation(self, MainWindow):
url = QtCore.QUrl("https://github.com/GGiecold/PySCUBA")
if not QtGui.QDesktopServices.openUrl(url):
QtGui.QMessageBox.warning(self, 'Help & Documentation',
'Could not open url to online documentation!')
def adornGridLayout(self, MainWindow):
self.datasetHorizontalLayout = QtGui.QHBoxLayout()
self.datasetHorizontalLayout.setObjectName(
_fromUtf8("datasetHorizontalLayout"))
self.gridLayout.addLayout(self.datasetHorizontalLayout, 0, 0, 1, 1)
self.datasetHorizontalLayout.addStretch(1)
self.selectDatasetButton = QtGui.QPushButton(self.centralwidget)
self.selectDatasetButton.setToolTip("Click this button to browse "
"through\nyour computer's directories and select\na dataset "
"to subject to a clustering\nand bifurcation analysis.")
self.selectDatasetButton.setObjectName(
_fromUtf8("selectDatasetButton"))
self.datasetHorizontalLayout.addWidget(self.selectDatasetButton)
self.datasetHorizontalLayout.addStretch(1)
self.withinGridVerticalLayout_1 = QtGui.QVBoxLayout()
self.withinGridVerticalLayout_1.setObjectName(
_fromUtf8("withinGridVerticalLayout_1"))
self.gridLayout.addLayout(self.withinGridVerticalLayout_1, 1, 0, 1, 1)
self.dataTypeLabel = QtGui.QLabel(self.centralwidget)
self.dataTypeLabel.setFrameShadow(QtGui.QFrame.Raised)
self.dataTypeLabel.setLineWidth(5)
self.dataTypeLabel.setAlignment(QtCore.Qt.AlignCenter)
self.dataTypeLabel.setObjectName(_fromUtf8("dataTypeLabel"))
self.withinGridVerticalLayout_1.addWidget(self.dataTypeLabel)
self.dataTypeComboBox = QtGui.QComboBox(self.centralwidget)
self.dataTypeComboBox.setToolTip("Does the file to process qualify "
"as qPCR, RNAseq\nor flow or mass cytometry data?\nThe latter "
"is expected to be in *.fcs format,\nwhereas the first two "
"types should be delivered\nas rows of tab-separated entries.")
self.dataTypeComboBox.setEditable(True)
self.dataTypeComboBox.setObjectName(_fromUtf8("dataTypeComboBox"))
self.dataTypeComboBox.addItem(_fromUtf8(""))
self.dataTypeComboBox.addItem(_fromUtf8(""))
self.dataTypeComboBox.addItem(_fromUtf8(""))
self.withinGridVerticalLayout_1.addWidget(self.dataTypeComboBox)
self.withinGridVerticalLayout_2 = QtGui.QVBoxLayout()
self.withinGridVerticalLayout_2.setObjectName(
_fromUtf8("withinGridVerticalLayout_2"))
self.gridLayout.addLayout(self.withinGridVerticalLayout_2, 2, 0, 1, 1)
self.clusterModeLabel = QtGui.QLabel(self.centralwidget)
self.clusterModeLabel.setFrameShadow(QtGui.QFrame.Raised)
self.clusterModeLabel.setLineWidth(5)
self.clusterModeLabel.setAlignment(QtCore.Qt.AlignCenter)
self.clusterModeLabel.setObjectName(_fromUtf8("clusterModeLabel"))
self.withinGridVerticalLayout_2.addWidget(self.clusterModeLabel)
self.clusterModeComboBox = QtGui.QComboBox(self.centralwidget)
self.clusterModeComboBox.setToolTip("For each timestamp binned to "
"a particular stage, PySCUBA proceeds\nto several rounds of "
"aggregating the corresponding samples\ninto an optimal number "
"of clusters.\nBy selecting 'PCA' from this menu, the clustering "
"will be based on\na reduction of the original dataset to its "
"first few principal components.\nA choice of 'PCA2' specifies "
"that the principal components analysis\nwill be based on "
"the samples that are part of the final stage\nof the "
"temporal ordering.")
self.clusterModeComboBox.setEditable(True)
self.clusterModeComboBox.setObjectName(
_fromUtf8("clusterModeComboBox"))
self.clusterModeComboBox.addItem(_fromUtf8(""))
self.clusterModeComboBox.addItem(_fromUtf8(""))
self.clusterModeComboBox.addItem(_fromUtf8(""))
self.withinGridVerticalLayout_2.addWidget(self.clusterModeComboBox)
self.logCheckBox = QtGui.QCheckBox(self.centralwidget)
self.logCheckBox.setChecked(True)
self.logCheckBox.setObjectName(_fromUtf8("logCheckBox"))
self.gridLayout.addWidget(self.logCheckBox, 0, 1, 1, 1)
self.pseudotimeCheckBox = QtGui.QCheckBox(self.centralwidget)
self.pseudotimeCheckBox.setToolTip("If your data is not endowed with "
"temporal information of any kind, please\ndo check this box. "
"PySCUBA will thereby run a principal curve analysis\nto infer a "
"temporal ordering for each sample of your dataset.")
self.pseudotimeCheckBox.setChecked(True)
self.pseudotimeCheckBox.setObjectName(_fromUtf8("pseudotimeCheckBox"))
self.gridLayout.addWidget(self.pseudotimeCheckBox, 1, 1, 1, 1)
def adornCancelOkLayout(self, MainWindow):
self.cancelOkLayout.addStretch(1)
self.cancelButton = QtGui.QPushButton(self.centralwidget)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.cancelOkLayout.addWidget(self.cancelButton)
self.okButton = QtGui.QPushButton(self.centralwidget)
self.okButton.setToolTip("Click this button to browse "
"through\nyour computer's directories and select\na dataset "
"to subject to a clustering\nand bifurcation analysis.")
self.okButton.setObjectName(_fromUtf8("okButton"))
self.cancelOkLayout.addWidget(self.okButton)
self.cancelOkLayout.addStretch(1)
def adornGraphicsVerticalLayout(self, MainWindow):
self.scene = QtGui.QGraphicsScene(self.centralwidget)
self.graphicsView = QtGui.QGraphicsView(self.scene)
self.graphicsView.setFrameShadow(QtGui.QFrame.Raised)
self.graphicsView.setLineWidth(3)
self.graphicsView.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.graphicsView.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.graphicsView.setTransformationAnchor(
QtGui.QGraphicsView.AnchorUnderMouse)
self.graphicsView.setResizeAnchor(
QtGui.QGraphicsView.AnchorUnderMouse)
self.graphicsView.setBackgroundBrush(
QtGui.QBrush(QtGui.QColor(245,245,245)))
self.graphicsView.setFrameShape(QtGui.QFrame.NoFrame)
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
self.graphicsVerticalLayout.addWidget(self.graphicsView)
self.displayHorizontalLayout = QtGui.QHBoxLayout()
self.displayHorizontalLayout.setObjectName(
_fromUtf8("displayHorizontalLayout"))
self.graphicsVerticalLayout.addLayout(self.displayHorizontalLayout)
self.displayHorizontalLayout.addStretch(1)
self.displayFileButton = QtGui.QPushButton(self.centralwidget)
self.displayFileButton.setToolTip("Various files and figures will "
"show up in this box as they are\nbeing produced by the PySCUBA "
"analysis of your data.\nClick on any of those and it will be "
"displayed in an adjacent\ngraphics box.")
self.displayFileButton.setObjectName(_fromUtf8("displayFileButton"))
self.displayHorizontalLayout.addWidget(self.displayFileButton)
self.displayHorizontalLayout.addStretch(1)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow",
"PySCUBA - GC Yuan Lab", None))
self.selectDatasetButton.setText(_translate("MainWindow",
"1. Select dataset to analyze", None))
self.dataTypeLabel.setText(_translate("MainWindow",
"2. Specify type of data:", None))
self.dataTypeComboBox.setItemText(0, _translate("MainWindow",
"RNASeq", None))
self.dataTypeComboBox.setItemText(1, _translate("MainWindow",
"PCR", None))
self.dataTypeComboBox.setItemText(2, _translate("MainWindow",
"cytometry", None))
self.clusterModeLabel.setText(_translate("MainWindow",
"3. Choose cluster mode:", None))
self.clusterModeComboBox.setItemText(0, _translate("MainWindow",
"None", None))
self.clusterModeComboBox.setItemText(1, _translate("MainWindow",
"PCA", None))
self.clusterModeComboBox.setItemText(2, _translate("MainWindow",
"PCA2", None))
self.logCheckBox.setText(_translate("MainWindow",
"4. Apply a log-transform?", None))
self.pseudotimeCheckBox.setText(_translate("MainWindow",
"5. Infer temporal ordering?", None))
self.cancelButton.setText(_translate("MainWindow", "Cancel", None))
self.okButton.setText(_translate("MainWindow", "Ok", None))
self.displayFileButton.setText(_translate("MainWindow", "Select file to display", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| GGiecold/PySCUBA | src/PySCUBA/PySCUBA_design.py | Python | mit | 15,073 |
import numpy as np
import pandas as pd
from lazy_property import LazyProperty
from . import _describe_template
from .plot import Plotter
from .. import bin_counts
from .. import numeric_datatypes, _pretty_print
from ..util import seaborn_required
class Column(object):
"""
In Pandas, a column of a DataFrame is represented as a Series.
Similarly, a column in a database table is represented by
an object from this class.
Note that the Series represented by these columns have the default index (ie non-negative, consecutive integers starting at zero). Thus, for the portion of the Pandas Series API mocked here, we need not worry about multilevel (hierarchical) indices.
"""
def __init__(self, name, parent_table):
"""
:param str name: The name of the column. Required.
:param pg_utils.table.Table parent_table: The table to which this column belongs. Required.
"""
self.parent_table = parent_table
self.name = name
self.is_numeric = parent_table._all_column_data_types[name] in numeric_datatypes
self.plot = Plotter(self)
def select_all_query(self):
"""
Provides the SQL used when selecting everything from this column.
:return: The SQL statement.
:rtype: str
"""
return "select {} from {}".format(self, self.parent_table)
def sort_values(self, ascending=True, limit=None, **sql_kwargs):
"""
Mimics the method `pandas.Series.sort_values <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.sort_values.html#pandas.Series.sort_values>`_.
:param int|None limit: Either a positive integer for the number of rows to take or ``None`` to take all.
:param bool ascending: Sort ascending vs descending.
:param dict sql_kwargs: A dictionary of keyword arguments passed into `pandas.read_sql <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html>`_.
:return: The resulting series.
:rtype: pandas.Series
"""
if limit is not None and (not isinstance(limit, int) or limit <= 0):
raise ValueError("limit must be a positive integer or None (got {})".format(limit))
sql = self.select_all_query() + " order by 1"
if not ascending:
sql += " desc"
if limit is not None:
sql += " limit {}".format(limit)
return pd.read_sql(sql, self.parent_table.conn, **sql_kwargs)[self.name]
def unique(self):
"""
Returns an array of unique values in this column. Includes ``null`` (represented as ``None``).
:return: The unique values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute("select distinct {} from {}".format(self, self.parent_table))
return np.array([x[0] for x in cur.fetchall()])
def hist(self, **kwargs):
return self.plot.hist(**kwargs)
def head(self, num_rows=10):
"""
Fetches some values of this column.
:param int|str num_rows: Either a positive integer number of values or the string `"all"` to fetch all values
:return: A NumPy array of the values
:rtype: np.array
"""
if (isinstance(num_rows, int) and num_rows < 0) or \
num_rows != "all":
raise ValueError("num_rows must be a positive integer or the string 'all'")
query = self.select_all_query()
if num_rows != "all":
query += " limit {}".format(num_rows)
cur = self.parent_table.conn.cursor()
cur.execute(query)
return np.array([x[0] for x in cur.fetchall()])
@LazyProperty
def is_unique(self):
"""
Determines whether or not the values of this column are all unique (ie whether this column is a unique identifier for the table).
:return: Whether or not this column contains unique values.
:rtype: bool
"""
cur = self.parent_table.conn.cursor()
cur.execute("""select {}
from {}
group by 1 having count(1) > 1""".format(self, self.parent_table))
return cur.fetchone() is None
@LazyProperty
def dtype(self):
"""
The ``dtype`` of this column (represented as a string).
:return: The ``dtype``.
:rtype: str
"""
return self.parent_table._all_column_data_types[self.name]
def _get_describe_query(self, percentiles=None, type_="continuous"):
if type_.lower() not in ["continuous", "discrete"]:
raise ValueError("The 'type_' parameter must be 'continuous' or 'discrete'")
if not self.is_numeric:
return None
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
elif not bool(percentiles):
percentiles = []
if not isinstance(percentiles, (list, tuple)):
percentiles = [percentiles]
if any([x < 0 or x > 1 for x in percentiles]):
raise ValueError(
"The `percentiles` attribute must be None or consist of numbers between 0 and 1 (got {})".format(
percentiles))
percentiles = sorted([float("{0:.2f}".format(p)) for p in percentiles if p > 0])
suffix = "cont" if type_.lower() == "continuous" else "desc"
query = _describe_template.render(column=self, percentiles=percentiles,
suffix=suffix, table=self.parent_table)
if self.parent_table.debug:
_pretty_print(query)
return query
def describe(self, percentiles=None, type_="continuous"):
"""
This mocks the method `pandas.Series.describe`, and provides
a series with the same data (just calculated by the database).
:param None|list[float] percentiles: A list of percentiles to evaluate (with numbers between 0 and 1). If not specified, quartiles (0.25, 0.5, 0.75) are used.
:param str type_: Specifies whether the percentiles are to be taken as discrete or continuous. Must be one of `"discrete"` or `"continuous"`.
:return: A series returning the description of the column, in the same format as ``pandas.Series.describe``.
:rtype: pandas.Series
"""
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
cur = self.parent_table.conn.cursor()
cur.execute(self._get_describe_query(percentiles=percentiles, type_=type_))
index = ["count", "mean", "std_dev", "minimum"] + \
["{}%".format(int(100 * p)) for p in percentiles] + \
["maximum"]
return pd.Series(cur.fetchone()[1:], index=index)
@seaborn_required
def distplot(self, bins=None, **kwargs):
"""
Produces a ``distplot``. See `the seaborn docs <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_ on ``distplot`` for more information.
Note that this requires Seaborn in order to function.
:param int|None bins: The number of bins to use. If unspecified, the `Freedman-Diaconis rule <https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule>`_ will be used to determine the number of bins.
:param dict kwargs: A dictionary of options to pass on to `seaborn.distplot <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_.
"""
import seaborn
bc = bin_counts.counts(self, bins=bins)
n = sum([entry[2] for entry in bc])
left = np.zeros(n)
right = np.zeros(n)
overall_index = 0
for entry in bc:
for i in range(entry[2]):
left[overall_index] = entry[0]
right[overall_index] = entry[1]
overall_index += 1
# We'll take our overall data points to be in the midpoint
# of each binning interval
# TODO: make this more configurable (left, right, etc)
return seaborn.distplot((left + right) / 2.0, **kwargs)
@LazyProperty
def values(self):
"""
Mocks the method `pandas.Series.values`, returning a simple NumPy array
consisting of the values of this column.
:return: The NumPy array containing the values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute(self.select_all_query())
return np.array([x[0] for x in cur.fetchall()])
def _calculate_aggregate(self, aggregate):
query = "select {}({}) from (\n{}\n)a".format(
aggregate, self, self.select_all_query())
cur = self.parent_table.conn.cursor()
cur.execute(query)
return cur.fetchone()[0]
@LazyProperty
def mean(self):
"""
Mocks the ``pandas.Series.mean`` method to give the mean of the values in this column.
:return: The mean.
:rtype: float
"""
return self._calculate_aggregate("avg")
@LazyProperty
def max(self):
"""
Mocks the ``pandas.Series.max`` method to give the maximum of the values in this column.
:return: The maximum.
:rtype: float
"""
return self._calculate_aggregate("max")
@LazyProperty
def min(self):
"""
Mocks the ``pandas.Series.min`` method to give the maximum of the values in this column.
:return: The minimum.
:rtype: float
"""
return self._calculate_aggregate("min")
@LazyProperty
def size(self):
"""
Mocks the ``pandas.Series.size`` property to give a count of the values in this column.
:return: The count.
:rtype: int
"""
return self.parent_table.count
def __str__(self):
return self.name
def __repr__(self):
return "<{} '{}'>".format(self.__class__, self.name)
def __eq__(self, other):
if not isinstance(other, Column):
return False
return self.name == other.name and self.parent_table == other.parent_table
def __ne__(self, other):
return not self.__eq__(other)
| jackmaney/pg-utils | pg_utils/column/base.py | Python | mit | 10,187 |
from inventory import Inventory
import cmd
from room import get_room
from player import Player
import textwrap
import time
import random
class Controls(cmd.Cmd):
prompt = '> '
def __init__(self):
#-----------------------------------------------------------------------
#Here the game is initialized asking for commands via the Cmd module,
#the variables given are the first room you start in and prints out the
#location
cmd.Cmd.__init__(self)
self.loc = get_room('intro')
self.look()
self.pos()
self.event = Events()
self.inventory = Inventory()
self.Player = Player()
#------------------------------------------------------------------------
#This checks which room you are in if you can go the way for the command
#given and prints out your location
def emptyline(self):
pass
def objects(self, args):
objects = self.loc._objects(args)
if objects is None:
print(('Ther are no ' + repr(args) + ' in the area' ))
self.look()
else:
self.look()
def move(self, dir):
newroom = self.loc._neighbor(dir)
if newroom is None:
print('''You cannot go this away''')
self.look()
else:
self.loc = get_room(newroom)
self.look()
# event.spawnAtPos()
def pos(self):
position = self.loc.name
def look(self):
# print((self.loc.name))
for line in textwrap.wrap(self.loc.description, 72):
print(line)
print('')
#-----------------------------------------------------------------------
#commands
#movement
def do_n(self, args):
'''goes north'''
self.move('n')
def do_s(self, args):
'''goes south'''
self.move('s')
def do_e(self, args):
'''goes east'''
self.move('e')
self.move('east')
def do_w(self, args):
'''goes west'''
self.move('w')
def do_climb(self, args):
'''Climbs where possible'''
self.move('climb')
def do_get(self, args):
'''Gets items from an area or from your bag'''
if self.inventory.slots[args] > 0:
self.player.right_hand(args)
else:
print('You do not have this item')
def do_enter(self, args):
'''Enters rooms, Villages, and caves where possible'''
self.move('enter')
def do_leave(self, args):
'''Exits the current room'''
self.move('leave')
def help_get(self):
for i in (textwrap.wrap(''' If you are trying to grab an item out from
your bag type get followed by the item in your bag, this applys to
items in an area as well''', 72)):
print(('', i))
#prompts
def do_sky(self, args):
self.event.sky()
def do_time(self, args):
self.event.timeOfDay()
def do_chop(self, args):
self.objects('trees')
def do_name(self, args):
'''Prints the users name if there is one'''
self.player.player_name()
def do_hand(self, args):
'''Prints what is in hand'''
if self.Player.hand() == ' ':
print("You are not holding anything")
else:
print(self.Player.hand())
def do_next(self, args):
'''Gets the next event'''
self.move('next')
def do_look(self, args):
'''Prints the current area you are in'''
self.look()
def do_inventory(self, args):
'''Checks Inventory'''
self.inventory.bag()
self.look()
def do_quit(self, args):
'''Quits the game'''
print("thank you for playing")
return True
''' def do_pos(self, args):
print(self.loc.name) '''
class Events(object):
# In this events class we will handle all game events such as time,
# spawning of monsters, and possibly special event occurenses based on date, time of day
# I'm thinking of making this games time as the same as the system time.
def __init__(self):
self.room = Controls.pos
self.time = time
def timeOfDay(self):
print('The time is ' + time.strftime('%I:%M %p'))
def sky(self):
timeInInt = int(time.strftime("%I"))
timeInAmPm = time.strftime("%p")
if timeInAmPm == 'AM':
print("It is morning")
elif timeInAmPm == 'PM':
if timeInInt <= 5:
print("It is afternoon")
elif timeInInt > 5 & timeInInt <= 11:
print("It is night")
#-------------------------------------------------
# creature spawning
def spawAtPos(self):
chance = random.randrange(100)
for i in chance:
if i <= 49:
print("There is a monster in the area")
else:
print("The area seems safe for now")
if __name__ == '__main__':
c = Controls()
c.cmdloop()
| kevin2314/TextGame | game/main.py | Python | mit | 4,805 |
# Copyright (c) 2020 Bartosz Szczesny <bszcz@bszcz.org>
# This program is free software under the MIT license.
print('\n# avoid new line at the beginning')
s = """\
test
"""
print(s)
print('\n# string are immutable')
s = 'string'
try:
s[1] = 'p'
except TypeError as e:
print(e)
print('\n# enumerate() function')
for n, c in enumerate(['a', 'b', 'c']):
print(n, c)
print('\n# list() is an iterator')
print(list(range(10)))
print('\n# else clause in loops')
for i in range(10):
if n == 2:
break
else:
print('loop did not break')
print('\n# docstrings')
def documented():
"This function is documented."
pass
# now can run: help(documented)
print(documented.__doc__)
print('\n# unpacking arguments')
def unpack(n, c):
print('unpacked:', n, c)
arg_list = [1, 'a']
arg_dict = {'n': 1, 'c': 'a'}
unpack(*arg_list)
unpack(**arg_dict)
print('\n# function annotations')
def annotated(i: int, s: str) -> str:
return 's'
print(annotated.__annotations__)
print('\n# not feeling myself')
class NotSelf():
def __init__(o, n):
o.n = n
def get_n(o):
return o.n
ns = NotSelf(10)
print(ns.get_n())
print('\n# lists operations')
print("""\
a = list()
a.copy() => a[:] # return shallow copy
a.clear() => del a[:]
a.append(item) => a[len(a):] = [item]
a.extend(iterable) => a[len(a):] = iterable
""")
print('\n# set comprehension')
a = 'abracadabra'
s = {x for x in a}
print(a, '->', s)
print('\n# keys can be any immutable type')
d = dict()
d[('a', 1)] = 100
d[('b', 2)] = 200
print(d)
print('\n# dictionary comprehension')
d = {x: 'got ' + str(x) for x in range(3)}
print(d)
print('\n# simple strings as keys')
d = dict(a=1, b=2, c=3)
print(d)
print('\n# reversed() function')
a = reversed(range(10)) # iterator
print(list(a))
print('\n# reload import')
# reload a module without
# restarting the interpreter
# or an already running script
import math
import importlib
importlib.reload(math)
print('\n# dir() function')
import builtins
print(dir()) # currently defined
print()
print(dir(math)) # defined by the module
print()
print(dir(builtins)) # build-in objects
print('\n# string formatting')
c = 299_792_458
print(f'Speed of light is {c} m/s.')
print('Speed of light is {c:.0f} km/s.'.format(c=c/1000))
pi = 3.14159
print(f'Pi is {pi:.2f}.')
d = {'a': 1, 'b': 2}
print('A: {a}, B: {b}.'.format(**d))
print('\n# exceptions')
class E1(Exception):
pass
class E2(E1):
pass
for e in [E1, E2, Exception]:
try:
raise e # no need for ()
except E1: # will catch E2 as well
print('E1.')
except E2:
print('E2.')
except: # will catch anything
print('Exception.')
finally:
print('Finally.')
print()
try:
pass
except:
pass
else: # if not exception raised
print('No exception.')
finally:
print('Finally.')
print()
try:
try:
raise E1
except E2:
print('E2.')
except: # will catch anything
raise # re-raise
finally:
print('Finally (E2).')
except E1:
print('E1.')
finally:
print('Finally (E1).')
print('\n# global and nonlocal scope')
def scope_test():
def do_local():
s = 'local'
def do_nonlocal():
nonlocal s
s = 'nonlocal'
def do_global():
global s
s = 'global'
s = 's'
do_local()
print(s)
do_nonlocal()
print(s)
do_global()
print(s)
scope_test()
print(s)
print('\n# instance and subclass')
print(isinstance(1, int))
print(isinstance(1.0, int))
print(issubclass(bool, int))
print('\n# struct')
class Struct:
pass
s = Struct()
s.x = 1
s.y = 2
print(s.x, s.y)
print('\n# generator')
def rev(s):
for i in range(len(s) - 1, -1, -1):
yield s[i]
for c in rev('abc'):
print(c)
print('\n# generator expression')
# like list comprehension
# but with parentheses
s = sum(i * i for i in range(10))
print(s)
print('\n# regex')
import re
# can use \1 in regex string
r = re.sub(r'([0-9]) \1', r'\1', '1 2 2 3 3 3')
print(r)
print('\n# array')
# store numbers of the same type efficiently
import sys
from array import array
l = list([1, 2, 3, 4, 5])
a = array('B', [1, 2, 3, 4, 5]) # B - unsigned byte
print(sys.getsizeof(l))
print(sys.getsizeof(a))
print('\n# float as ratio')
pi = 3.14159
print(pi.as_integer_ratio())
print('\n# float as hex')
pi = 3.14159
print(pi.hex())
print(float.fromhex('0x1.921f9f01b866ep+1'))
print('\n# precise sum')
a = [0.3, 0.3, 0.3, 0.1]
print(sum(a) == 1)
print(math.fsum(a) == 1)
| bszcz/python | tutorial_notes.py | Python | mit | 4,575 |
#!/usr/bin/env python
from coveragit.application.console import Application
if __name__ == "__main__":
Application().run()
| felixcarmona/coveragit | run.py | Python | mit | 128 |
#from StreamReader import *
import numpy
import cv2
class ObjectDetecor :
#stream = None
#def __init__(self, stream) :
#self.stream = stream
minRadius = 15;
minContourArea = 500;
def findObjects(self, frame, npLower, npUpper) :
#convert image and colors to HSV
#hsvFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
#hsvLower = toHSV(qlower)
#hsvUpper = toHSV(qupper)
#hsvFrame = frame
#hsvLower = toRGB(qlower)
#hsvUpper = toRGB(qupper)
#find all pixels with color between hsvLower and hsvUpper
mask = cv2.inRange(frame, npLower, npUpper)
#remove all small groups of pixels in the mask
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#find contours in the mask
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
#cv2.drawContours(frame, contours, -1, (255,0,0), 1)
objects = list() #list of found objects
#find largest contours in the mask and centers of this contours
bigContours = [x for x in contours if cv2.contourArea(x) >= self.minContourArea]
#cv2.drawContours(frame, bigContours, -1, (0,255,0), 2)
for c in bigContours:
#area = cv2.contourArea(contours[0])
#c = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > self.minRadius:
objects.append([int(x), int(y), int(radius)])
# draw the circle and centroid on the frame
cv2.circle(frame, (int(x), int(y)), int(radius), (255, 0, 0), 1)
#cv2.circle(frame, center, 5, (0, 0, 255), -1)
return objects
| linduine/RobotVYE | Test/PyQtTest/ObjectsDetector.py | Python | mit | 1,964 |
import unittest, tempfile, os.path
from gitdh.config import Config
from gitdh.git import Git
from subprocess import check_output
class GitdhConfigGitTestCase(unittest.TestCase):
def setUp(self):
self.cStr = """
[Git]
RepositoryPath = /var/lib/gitolite/repositories/test.git
[Database]
Engine = sqlite
DatabaseFile = /var/lib/gitolite/data.sqlite
Table = commits
[master]
Path=/home/www/production
[development]
Path=/home/www/development
[crunch-command]
Mode = perfile
RegExp = \.php$
Command = eff_php_crunch ${f}
"""
def test_gitRepo(self):
d = tempfile.TemporaryDirectory()
self._createGitRepo(d.name)
c = Config.fromGitRepo(d.name)
self.assertTrue('Database' in c)
self.assertTrue('master' in c)
self.assertEqual(c['Database']['Engine'], 'sqlite')
self.assertEqual(c.repoPath, d.name)
c = Config.fromPath(d.name)
self.assertTrue('Database' in c)
self.assertTrue('master' in c)
self.assertEqual(c['Database']['Engine'], 'sqlite')
self.assertEqual(c.repoPath, d.name)
d.cleanup()
def test_bareGitRepo(self):
d = tempfile.TemporaryDirectory()
self._createBareGitRepo(d.name)
c = Config.fromGitRepo(d.name)
self.assertTrue('Database' in c)
self.assertTrue('master' in c)
self.assertEqual(c['Database']['Engine'], 'sqlite')
self.assertEqual(c.repoPath, d.name)
c = Config.fromPath(d.name)
self.assertTrue('Database' in c)
self.assertTrue('master' in c)
self.assertEqual(c['Database']['Engine'], 'sqlite')
self.assertEqual(c.repoPath, d.name)
d.cleanup()
def _createGitRepo(self, path):
check_output(('git', 'init'), cwd=path)
gC = Git(path)
gC._executeGitCommand('config', 'user.email test@localhost')
gC._executeGitCommand('config', 'user.name Test')
with open(os.path.join(path, 'README'), 'w') as f:
f.write('On master')
gC._executeGitCommand('add', '.')
gC._executeGitCommand('commit', '-m "Initial Import"')
gC._executeGitCommand('branch', 'development')
gC._executeGitCommand('checkout', 'development', suppressStderr=True)
with open(os.path.join(path, 'README'), 'w') as f:
f.write('On development')
gC._executeGitCommand('add', '.')
gC._executeGitCommand('commit', '-m "Development branch added"')
gC._executeGitCommand('branch', 'gitdh')
gC._executeGitCommand('checkout', 'gitdh', suppressStderr=True)
with open(os.path.join(path, 'gitdh.conf'), 'w') as f:
f.write(self.cStr)
gC._executeGitCommand('add', '.')
gC._executeGitCommand('commit', '-m "Gitdh conf added"')
return gC
def _createBareGitRepo(self, path):
d = tempfile.TemporaryDirectory()
self._createGitRepo(d.name)
check_output(('git', 'clone', '-q', '--bare', d.name, path))
d.cleanup()
| seoester/Git-Deployment-Handler | gitdh/tests/configgit.py | Python | mit | 2,693 |
# coding=utf-8
"""
https://docs.python.org/3/howto/logging.html
https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
"""
import logging
import logging.config
from logging import Formatter
import pathlib
import yaml
import time
from common.settings import AlgoSettings
def setup_logging(default_level=logging.INFO):
"""Setup logging configuration
:param default_level:
:return:
"""
path = AlgoSettings().log_configuration()
path = pathlib.Path(path)
try:
with open(path, 'rt') as my_file:
config = yaml.safe_load(my_file.read())
logging.config.dictConfig(config)
except:
logging.basicConfig(level=default_level)
raise SystemError
def update_conf_file():
"""Update the logging configuration file with the paths
defined in the CONFIG file
"""
sett = AlgoSettings()
saving_path = pathlib.Path(sett.log_saving_path())
config_file = pathlib.Path(sett.log_configuration())
with open(config_file) as my_file:
doc = yaml.load(my_file)
doc['handlers']['info_file_handler']['filename'] = \
str(saving_path / 'bsk_info.log')
doc['handlers']['error_file_handler']['filename'] = \
str(saving_path / 'bsk_error.log')
with open(config_file, 'w') as my_file:
yaml.dump(doc, my_file)
class UTCFormatter(Formatter):
converter = time.gmtime
def log_title(msg):
"""Format for log titles
:param msg:
:return:
"""
total_len = 120
len_msg = len(msg)
sides_space = (total_len-len_msg) - 2
l_space = sides_space / 2
r_space = l_space
if sides_space % 2 != 0:
r_space += 1
l_str = int(l_space) * " "
r_str = int(r_space) * " "
complete_line = ('#' * total_len)
msg_line = ('#' + l_str + msg + r_str + '#')
space_line = "#" + (" " * (total_len - 2)) + "#"
logging.info(complete_line)
logging.info(space_line)
logging.info(msg_line)
logging.info(space_line)
logging.info(complete_line)
def log_test():
x = 1
print(x)
logger.info('Var: {}'.format(x))
return x
if __name__ == '__main__':
# setup_logging()
# logger = logging.getLogger('log_config')
# log_test()
update_conf_file() | JavierGarciaD/AlgoTrader | algotrader/log/log_settings.py | Python | mit | 2,265 |
class Friend:
def walk(self,shravan=""):
'''
>>> Friend().walk()
walking
'''
print "walking",
def talk(self):
print "talking",
def fight(self):
print "fighting",
f1=Friend()
f1.walk()
import doctest
doctest.testmod() | shravanshandilya/catching-up-with-python | Recipes/class_example.py | Python | mit | 245 |
'''
The MIT License (MIT)
Copyright (c) 2016 Vasileios Kagklis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import glob, nltk, time, os, re, subprocess, math, urllib2, socket, itertools
from nltk.corpus import wordnet
from bs4 import BeautifulSoup,SoupStrainer
from operator import itemgetter
from xml.etree.ElementTree import ElementTree
from itertools import groupby
def most_common_func(L):
return max(groupby(sorted(L)), key=lambda(x, v):(len(list(v)),-L.index(x)))[0]
#folder with txt, pdf & html files - must be located in the same dir with the script
path = os.getcwd()
tkn_path = path + "\\tokenize\\"
tag_path = path + "\\tagged\\"
types = ['*.txt', '*.html', '*.htm']
while(1):
x = raw_input("Press:\n1. Run profile analysis \
\n2. Enter index mode\n3. Exit Program\nYour option: ")
accepted_inputs = ['1','2','3']
while(x not in accepted_inputs):
x = raw_input("Invalid input!!! Press:\n1. Run profile analysis \
\n2. Enter index mode\n3. Exit Program\nYour option: ")
#----------------------------------- READ BOOKMARK FILE -------------------------------------#
if(x is '1'):
if not(os.path.exists(path+"\profile")):
print "\nThere is no profile folder. Create a folder with the name profile and put your files in it. Then re-try...\n"
continue
inverted_index = {}
#noun_dictionary -> {noun1:{doc_id1:weight1, doc_id2:weight2, ...}, noun2:... }
noun_dictionary = {}
t0=time.time()
try:
fh = open('bookmarks.html', 'r')
except:
print "\nThere is no file named bookmarks.html! Export bookmarks in HTML and re-try...\n"
continue
page = fh.read()
fh.close()
#------------------------- EXTRACT LINKS AND PUT THEM ON A TXT/LIST -------------------------#
with open('url_links.txt', 'w') as fh:
for link in BeautifulSoup(page, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_key('href') and re.match('^http',link['href']):
fh.write(link['href']+"\n")
#---------------------------- DOWNLOAD WEB PAGES - .HTML FILES ------------------------------#
i=1
fh = open('url_links.txt', 'r')
for link in fh.readlines():
request = urllib2.Request(link)
opener = urllib2.build_opener()
try:
filehandle = opener.open(request, timeout=5)
myFile = open(path+'\\profile\\'+'bookmark%04d.html'%i,'w')
myFile.write(link)
myFile.write(filehandle.read())
except urllib2.URLError:
filehandle.close()
continue
except socket.timeout:
filehandle.close()
continue
except:
myFile.close()
filehandle.close()
continue
i += 1
myFile.close()
filehandle.close()
print ("\nBookmarked web pages have been succesfuly downloaded!\nProceeding...")
try:
os.mkdir("tokenize")
os.mkdir("tagged")
except WindowsError:
pass
tokens_list=[]
squares = []
open_class_cat =['JJ','JJR','JJS','NN','NNS','NP','NPS','NNP','NNPS','RB','RBR','RBS','VV','VVD','VVG','VVN','VVP','VVZ','FW']
#-------------------------------- START TO LOOP INTO FILES ----------------------------------#
#dictionary with id - path/url correspondence
dic = {}
i = 1
N = 0
for file_type in types:
N += len(glob.glob(path+"\\profile\\"+file_type))
for file_type in types:
for doc in glob.glob(path+"\\profile\\"+file_type):
if not(re.match('.+bookmark\d{4}\.html$', doc)):
dic[i] = os.path.join(path+"\profile\\",doc)
else:
with open(doc, 'r') as fh:
link = fh.readline()
dic[i] = re.compile('\n').sub('',link)
#-------------------------------------- TOKENIZATION ----------------------------------------#
#exclude files that contain no latin characters
try:
fh = open(doc,'r')
s = fh.read()
if not(re.match('((.|\n)*[a-zA-Z]+(.|\n)*)+', s)):
continue
except IOError:
pass
finally:
fh.close()
s = BeautifulSoup(s, 'html.parser')
tokens_list = nltk.word_tokenize(s.get_text())
tkn = "tokenized_text_%04d.txt"%i
with open(tkn_path+tkn,'w') as fh:
for each_token in tokens_list:
if not(re.search('&',each_token)) and not(each_token.isspace()):
fh.write(each_token.encode('utf-8'))
fh.write("\n")
#------------------------------------------ TAGGING -----------------------------------------#
tag = "tagged_output_%04d.txt"%i
subprocess.call('.\\TreeTagger\\bin\\tree-tagger.exe -token -lemma .\\TreeTagger\\lib\\english.par "'+tkn_path+tkn+'">"'+tag_path+tag+'"',shell=True)
#-------------------------------------- REMOVE STOP WORDS -----------------------------------#
with open(tag_path+tag, 'r') as fh:
lemmas = []
for line in fh.readlines():
op = line.split()
if ((op[1] in open_class_cat) and (op[2] != '<unknown>') and (op[2] != '@card@')and (op[2] != '@ord@')and (op[2] != '%')):
p = re.compile('(^[\w]{1}$)|(^[\w]+[\.]$)|(^[\w]-[0-9]+)|(^[\w]-[\w])|(^[\w]-)|(-[\w]-[\w]-[\w])|([0-9]+-[0-9]+)|(^[0-9]+$)|((^[\w])([\d]$))')
op[2] = p.sub('', op[2])
#------------------------------- START CREATING THE INVERTED INDEX --------------------------#
if (op[2] != ''):
if op[2].lower() not in inverted_index:
inverted_index[op[2].lower()] = {}
lemmas.append(op[2].lower())
if(op[2].lower() not in noun_dictionary and (op[1] == 'NN' or op[1] == 'NNS') and op[2] != '<unknown>'):
noun_dictionary[op[2].lower()] = {}
if ((op[1] == 'NN' or op[1] == 'NNS') and op[2] != '<unknown>'):
noun_dictionary[op[2].lower()][i] = 0
u_lemmas = list(set(lemmas))
#--------------------------------- CALCULATING SUM OF (tf*idf)^2 ----------------------------#
squares.append(0)
for lemma in u_lemmas:
inverted_index[lemma][i] = int(lemmas.count(lemma))
tf = float(lemmas.count(lemma))
if lemma in noun_dictionary.keys():
noun_dictionary[lemma][i] = tf
idf = float(math.log10(N/len(inverted_index[lemma])))
squares[i-1] += float(pow(tf*idf,2))
i += 1
#------------------------ CREATING INVERTED INDEX AND SAVING IT IN XML FILE -----------------#
del u_lemmas, lemmas
top20 = []
with open("inverted_index.xml", 'w') as fh_index:
fh_index.write('<?xml version=\"1.0\" ?>\n')
fh_index.write('<inverted_index>\n')
for lemma in inverted_index:
fh_index.write("\t<lemma name=\""+lemma+"\">\n")
for doc_id,term_frequency in inverted_index[lemma].items():
tf = float(term_frequency)
#idf=log10(total documents/number of documents that contain lemma)
idf=float(math.log10(N/ len(inverted_index[lemma])))
weight=float(float(tf*idf)/float(math.sqrt(squares[doc_id-1])+1))
inverted_index[lemma][doc_id] = weight
fh_index.write("\t\t<document id=\"%d\" weight=\"%f\"/>\n"%(doc_id,weight))
fh_index.write('\t</lemma>\n')
fh_index.write('</inverted_index>\n')
fh_index.write('<doc_index>\n')
for i in dic:
fh_index.write('\t<matching id="%d" path="'%i+dic[i]+'"\>\n')
fh_index.write('</doc_index>\n')
#------------------------------- FIND TOP 20 POPULAR NOUNS ----------------------------------#
noun_list = []
noun_freq_list = []
for lemma in noun_dictionary:
sum_w = 0
for freq in noun_dictionary[lemma].values():
sum_w += freq
noun_list.append(lemma)
noun_freq_list.append(float(sum_w/N))
for j in range(len(noun_list)):
top20.append((noun_list[j],noun_freq_list[j]))
top20 = sorted(top20, key=itemgetter(1),reverse=True)
top20 = top20[:20]
#--------------------------------- DESTROY REDUNDANT ITEMS ----------------------------------#
del tokens_list, noun_dictionary, noun_list, noun_freq_list, squares
#---------------------------------- RUN PROFILE ANALYSIS ------------------------------------#
step = 4
const_step = step
top20=top20+top20[:step]
WSD = {}
while(step<=len(top20)):
#print step
syns = []
pointer = []
if step<=20:
pointer=range(step-const_step,step)
else:
pointer=range(step-const_step,20)
pointer +=range(0,step-20)
for j in pointer:
if(wordnet.synsets(top20[j][0], pos=wordnet.NOUN)):
syns.append(wordnet.synsets(top20[j][0], pos=wordnet.NOUN))
else:
syns.append((1,1))
confs = [()]
for x in syns:
confs = [i + (y,) for y in x for i in confs]
max_conf=0
max_sim=0
for conf in confs:
combinations = list(itertools.combinations(conf,2))
sim = 0
for pair in combinations:
if(pair[0] is not 1 and pair[1] is not 1):
sim += wordnet.wup_similarity(pair[0], pair[1])
sim = float(sim)/float(len(combinations))
if(sim >= max_sim):
max_sim = sim
max_conf = confs.index(conf)
j=0
for element in confs[max_conf]:
if pointer[j] not in WSD:
WSD[pointer[j]] = []
WSD[pointer[j]].append(element)
j += 1
step += 1
t1 = time.time()
time = (t1-t0)
minutes = time/60
sec = time%60
print ("Profile Analysis completed in %d minutes and %d seconds"%(minutes, sec))
print ("Your interests are represented from the following nouns and their definitions: \n")
j=0
for element in WSD:
if most_common_func(WSD[j]) is not 1:
print (most_common_func(WSD[j]).name()+": "+most_common_func(WSD[j]).definition())
j+=1
#------------------------- LOADING INVERTED INDEX FROM XML FILE -----------------------------#
elif(x is '2'):
flag = 0
try:
len(dic)
except NameError:
dic = {}
flag = 1
else:
pass
try:
len(inverted_index)
except NameError:
print "No index was created recently! Checking for a saved copy... "
try:
with open('inverted_index.xml') as f: pass
except IOError as e:
print 'There was no saved index found!\n\n'
else:
print "A saved index was found. Loading...!"
inverted_index = {}
fh = open("./inverted_index.xml", 'r')
for line in fh.readlines():
if(re.match('(.|\n)*<lemma', line)):
lemma = re.search('"(.*)"', line).group(1)
inverted_index[lemma] = {}
elif(re.match('(.|\n)*<document', line)):
op = line.split('"')
inverted_index[lemma][int(op[1])] = float(op[3])
elif(re.match('(.|\n)*<matching', line) and flag):
op = line.split('"')
dic[int(op[1])] = op[3]
else:
continue
#------------------------------ SEARCH QUERY IN INVERTED INDEX ------------------------------#
try:
len(inverted_index)
except NameError:
print "\nIndex hasn't been created or loaded!\n"
else:
while(True):
import time
text_ID_list = []
weight_list = []
index_result = []
query = raw_input('Please insert queries or -1 to exit: \n')
if query == '-1':
print "Exiting Index...\n\n"
break
t0 = time.time()
query_list = query.split()
for each_query in query_list:
if each_query in inverted_index.keys():
for text_id, weight in inverted_index[each_query].items():
if text_id not in text_ID_list:
text_ID_list.append(text_id)
weight_list.append(weight)
else:
text_pointer = text_ID_list.index(text_id)
else:
print("\nCouldn't be found in index!!\n")
break
for j in range(len(text_ID_list)):
if weight_list[j] > 0:
index_result.append((text_ID_list[j],weight_list[j]))
query_xml = sorted(index_result, key=itemgetter(1),reverse=True)
t1 = time.time()
time = (t1-t0)
if len(query_xml)>0:
for doc_id,weight in query_xml:
print ("ID: %d Path/URL: "%doc_id+dic[doc_id]+"\nCosine Similarity: %f\n"%weight)
else:
print("Query appears in every file or doesn't appear at all")
print("Respond time = %f\n"%time)
#-------------------------------------- EXIT PROGRAM ----------------------------------------#
elif(x is '3'):
break
| kagklis/profile-analysis | profile_analysis.py | Python | mit | 16,905 |
"""Module for displaying Terrain, both in 2D and 3D.
(Not accessible outside of package; use display methods of Terrain instead.)
"""
from Tkinter import Tk, Canvas, Frame, BOTH
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
class Terrain2D(Frame):
"""2D graphical representation of a Terrain object.
Consists of a 2D top-down image of terrain as a grid of greyscale squares.
Each square corresponds to a height value, being on a scale from white if 1 to black if 0.
"""
SQUARE_SIDE = 3
"""Length of one side of colored square."""
@classmethod
def display_terrain(cls, terrain):
"""Display a Terrain in 2D.
Args:
terrain (Terrain): Terrain to display.
"""
root = Tk()
dimensions = "{0}x{1}".format(terrain.width * Terrain2D.SQUARE_SIDE,
terrain.length * Terrain2D.SQUARE_SIDE)
root.geometry(dimensions)
app = Terrain2D(root, terrain)
root.mainloop()
def __init__(self, parent, terrain):
"""Make self child of a TK parent, then initialize own UI.
Args:
parent (TK): Parent to attach self to.
terrain (Terrain): Terrain to display.
"""
Frame.__init__(self, parent)
self.terrain = terrain
self.parent = parent
self.init_ui()
def init_ui(self):
"""Initialize UI of window."""
self.parent.title("Terrain (top-down)")
self.pack(fill=BOTH, expand=1)
self.draw_heights()
def draw_heights(self):
"""Draw grid of height values on window.
Heights are shown as squares, with greyscale colors becoming brighter for greater heights.
"""
canvas = Canvas(self)
for x in range(self.terrain.width):
for y in range(self.terrain.length):
x_pos = x * Terrain2D.SQUARE_SIDE
y_pos = y * Terrain2D.SQUARE_SIDE
color = int(self.terrain[x, y] * 15)
hex_color = "#" + "0123456789abcdef"[color] * 3
canvas.create_rectangle(x_pos, y_pos,
x_pos + Terrain2D.SQUARE_SIDE,
y_pos + Terrain2D.SQUARE_SIDE,
outline=hex_color, fill=hex_color)
canvas.pack(fill=BOTH, expand=1)
class Terrain3D(object):
"""A 3D representation of a Terrain.
Consists of a 3D surface mesh, shown at an angle. Can be seen at different angles.
Uses matplotlib.mplot3d to display rudimentary 3D version of terrain.
Notes:
Is somewhat guaranteed to be slow. Not intended for use other than visualizing terrain during development.
"""
def __init__(self, terrain):
self.terrain = terrain
self.x_grid, self.y_grid = np.meshgrid(range(self.terrain.width),
range(self.terrain.length))
z_vals = np.array([self.terrain[x, y] for x, y in zip(np.ravel(self.x_grid), np.ravel(self.y_grid))])
self.z_grid = z_vals.reshape(self.x_grid.shape)
def display_terrain(self):
"""Display 3D surface of terrain."""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(self.x_grid, self.y_grid, self.z_grid)
ax.set_zlim(0.0, 1.0)
plt.show()
| jackromo/RandTerrainPy | randterrainpy/terraindisplay.py | Python | mit | 3,444 |
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
import profiles.urls
import accounts.urls
from . import views
urlpatterns = [
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^users/', include(profiles.urls, namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(accounts.urls, namespace='accounts')),
url(r'^apis/', include('api.urls', namespace='api')),
]
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
urlpatterns += [
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
]
| triump0870/movie_task | src/mdb/urls.py | Python | mit | 812 |
"""Test diff."""
# --- import -------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
def test_ascending_1():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[:-1])
def test_ascending_2():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 2)
assert np.all((np.abs(d + np.sin(x)) < 0.0001)[1:-2])
def test_ascending_3():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[2:-3])
def test_ascending_4():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 4)
assert np.all((np.abs(d - np.sin(x)) < 0.0001)[3:-4])
def test_descending_1():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[1:-1])
def test_descending_3():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[3:-3])
| wright-group/WrightTools | tests/kit/diff.py | Python | mit | 1,245 |
import numpy as np
from apvisitproc import despike
import pytest
import os
DATAPATH = os.path.dirname(__file__)
FILELIST1 = os.path.join(DATAPATH, 'list_of_txt_spectra.txt')
FILELIST2 = os.path.join(DATAPATH, 'list_of_fits_spectra.txt')
@pytest.fixture
def wave_spec_generate():
'''
Read in three small chunks of spectra for testing purposes
'wavelist_speclist_generate' can be used as input to any other test function
that needs access to the variables it returns!
wave1, spec1 are a single chunk of 1d spectrum
wavelist, speclist are lists of three chunks of 1d spectrum
'''
wave1, spec1 = np.loadtxt(os.path.join(DATAPATH, 'spec1test.txt'), unpack=True)
wave2, spec2 = np.loadtxt(os.path.join(DATAPATH, 'spec2test.txt'), unpack=True)
wave3, spec3 = np.loadtxt(os.path.join(DATAPATH, 'spec3test.txt'), unpack=True)
wavelist = [wave1, wave2, wave3]
speclist = [spec1, spec2, spec3]
return wave1, spec1, wavelist, speclist
@pytest.mark.parametrize('filelist, cond', [
(FILELIST1, False),
(FILELIST2, True),
])
def test_read_infiles(filelist, cond):
'''
Test reading in both text and fits files
Each resulting wavelength array should be sorted in ascending order
'''
infilelist, wavelist, speclist = despike.read_infiles(DATAPATH, filelist, cond)
assert len(infilelist) > 0
assert len(infilelist) == len(wavelist)
assert len(wavelist) == len(speclist)
for wave in wavelist:
assert all(value >= 0 for value in wave)
assert list(np.sort(wave)) == list(wave)
assert all(np.equal(np.sort(wave), wave))
def test_simpledespike(wave_spec_generate):
'''
spike condition is met at pixels 15, 16, 17 and 18
so indices 9 through 24, inclusive, should be removed
'''
wave, spec = wave_spec_generate[0], wave_spec_generate[1]
newwave, newspec = despike.simpledespike(wave, spec, delwindow=6,
stdfactorup=0.7, stdfactordown=3,
plot=False)
assert len(newwave) == len(newspec)
assert len(newwave) <= len(wave)
assert len(newspec) <= len(spec)
assert all(np.equal(np.hstack((wave[0:9], wave[25:])), newwave))
assert all(np.equal(np.hstack((spec[0:9], spec[25:])), newspec))
def test_despike_spectra(wave_spec_generate):
'''
Test that new spectra are shorter than the original because the outliers are gone
'''
wavelist, speclist = wave_spec_generate[2], wave_spec_generate[3]
newwavelist, newspeclist = despike.despike_spectra(wavelist, speclist, type='simple', plot=False)
assert len(newwavelist) == len(wavelist)
assert len(newspeclist) == len(speclist)
| mrawls/apVisitproc | apvisitproc/tests/test_despike.py | Python | mit | 2,736 |
# 1395. Count Number of Teams - LeetCode
# https://leetcode.com/problems/count-number-of-teams/
from typing import List
# 暴力搜索都 AC 了
# 其实有两次筛选的算法
class Solution:
def numTeams(self, rating: List[int]) -> int:
if len(rating) <= 2:
return 0
count = 0
for i in range(len(rating)):
for j in range(i+1,len(rating)):
for k in range(j+1,len(rating)):
if rating[i] < rating[j] and rating[j] < rating[k]:
count += 1
if rating[i] > rating[j] and rating[j] > rating[k]:
count += 1
return count
# rating = [2,5,3,4,1]
rating = [1,2,3,4]
s = Solution()
ret = s.numTeams(rating)
print(ret)
| heyf/cloaked-octo-adventure | leetcode/1395_count-number-of-teams.py | Python | mit | 775 |
# -*- coding: utf-8 -*-
import numpy as np
from pandas import read_csv as importDB
import pandas as pd
database = r'\\UBSPROD.MSAD.UBS.NET\UserData\ozsanos\RF\Desktop\Black\stockData.csv'
tickers = ['AAPL','ADBE','ADI','AMD','AXP','BRCM','C','GLD','GOOG','GS','HNZ','HPQ','IBM','MSFT','TXN','XOM']
dateRange = [("2010-01-01","2010-12-31"),("2011-01-01","2011-12-31")]
# dateRange = pd.date_range(startDate, endDate)
'''
Pre-weightings permutations
'''
schemes = []
points = range(0, 11, 1)
for i in points:
for j in points:
for k in points:
z = i + j + k
if z <= 10:
schemes.append((round(i/10.0,1), round(j/10.0,1), round(k/10.0,1), round(1.0 - z/10.0,1)))
schemes = tuple(schemes)
'''
*** Code Body ***
'''
def getData(startDate, endDate, symbolSet):
return importDB(database, usecols = ['Close'] + symbolSet, index_col = 'Close').loc[startDate : endDate]
def simulate(startDate, endDate, symbolSet, weights):
marketData = getData(startDate, endDate, symbolSet).values
days = len(marketData)
portfolio = np.zeros(days)
returns = portfolio.copy()
for e in range(len(marketData[0])):
marketData[:,e] = weights[e] * marketData[:,e] / marketData[0,e]
portfolio += marketData[:,e]
for e in range(days):
if e > 0: returns[e] = (portfolio[e]/portfolio[e-1]) - 1
meanDailyReturn = np.average(returns)
stdDailyReturn = np.std(returns)
cummDailyReturn = portfolio[-1]
SharpeRatio = (days**0.5) * (meanDailyReturn / stdDailyReturn)
return [round(SharpeRatio,6), round(meanDailyReturn,6), round(stdDailyReturn,6), round(cummDailyReturn,6)]
def optimise(symbolSet, dateFlag):
maxSharpe = 0.0
metrics = []
for e in schemes:
#print e,
s = simulate(dateRange[dateFlag][0], dateRange[dateFlag][1], symbolSet, e)
#print s
if s[0] > maxSharpe:
maxSharpe = s[0]
metrics = [s, e]
print('\n+ - + - +')
print "\nPortfolio:"
print tuple(symbolSet)
print "\nOptimal Weights:"
print metrics[1]
print "\nPerformance Metrics:"
print tuple(metrics[0])
print('\n+ - + - +\n\n\n\n')
'''
Portfolios
'''
'''
# Test 1
optimise(['AAPL', 'GLD', 'GOOG', 'XOM'], True)
# Test 2
optimise(['AXP', 'HPQ', 'IBM', 'HNZ'], False)
'''
# Quiz 1
optimise(['AAPL', 'GOOG', 'IBM', 'MSFT'], True)
# Quiz 2
optimise(['BRCM', 'ADBE', 'AMD', 'ADI'], False)
# Quiz 3
optimise(['BRCM', 'TXN', 'AMD', 'ADI'], True)
# Quiz 4
optimise(['BRCM', 'TXN', 'IBM', 'HNZ'], False)
# Quiz 5
optimise(['C', 'GS', 'IBM', 'HNZ'], False)
'''
# Test 1
is2011 = True
symbolSet = ['AAPL', 'GLD', 'GOOG', 'XOM']
weights = [0.4,0.4,0.0,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Test 2
is2011 = False
symbolSet = ['AXP', 'HPQ', 'IBM', 'HNZ']
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
'''
# Quiz 1
is2011 = True
symbolSet = ['AAPL', 'GOOG', 'IBM', 'MSFT']
weights = [0.5,0.0,0.5,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.0,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.2,0.2,0.4]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.1,0.1,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 2
is2011 = False
symbolSet = ['BRCM', 'ADBE', 'AMD', 'ADI']
weights = [0.0,0.2,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [1.0,0.0,0.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.1,0.9]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 3
is2011 = True
symbolSet = ['BRCM', 'TXN', 'AMD', 'ADI']
weights = [0.0,0.0,0.8,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.2,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.1,0.9]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 4
is2011 = False
symbolSet = ['BRCM', 'TXN', 'IBM', 'HNZ']
weights = [0.1,0.1,0.6,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.3,0.0,0.7,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.1,0.1,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.4,0.4,0.0,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 5
is2011 = False
symbolSet = ['C', 'GS', 'IBM', 'HNZ']
weights = [0.0,0.0,1.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.0,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.4,0.6,0.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.2,0.4,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
| BoasWhip/Black | Code/marketData.py | Python | mit | 5,694 |
from testkit.context import *
def test_context_user():
from contextlib import contextmanager
test_dict = dict(value='before')
@contextmanager
def test_context(test_dict):
test_dict['value'] = 'during'
yield 'test'
test_dict['value'] = 'after'
ctx = ContextUser(test_context(test_dict))
assert test_dict['value'] == 'before'
ctx.enter()
assert test_dict['value'] == 'during'
ctx.exit()
assert test_dict['value'] == 'after'
class my_context(ContextDecorator):
def before(self):
self.hello = 'hello'
self.done = False
def after(self):
self.done = True
def test_context_decorator_as_decorator():
as_decorator = my_context()
@as_decorator
def hello(context):
assert context.hello == 'hello'
hello()
assert as_decorator.done == True
def test_context_decorator_as_decorator_exception():
as_decorator = my_context()
fake_message = 'A fake error!'
@as_decorator
def hello(context):
raise Exception(fake_message)
try:
hello()
except Exception, e:
assert e.message == fake_message
assert as_decorator.done == True
def test_context_decorator_as_context():
as_context = my_context()
with as_context as context:
assert context.hello == 'hello'
assert context.done == False
assert context.done == True
def test_context_decorator_as_context_exception():
as_context = my_context()
fake_message = 'error!'
try:
with as_context as context:
raise Exception(fake_message)
except Exception, e:
assert e.message == fake_message
assert context.done == True
class my_other_context(ContextDecorator):
def before(self):
self.hello = 'hello'
self.done = False
return self.hello
def after(self):
self.done = True
def test_context_decorator_before_returns_custom_context():
as_context = my_other_context()
with as_context as hello:
assert hello == 'hello'
| ravenac95/testkit | tests/test_context.py | Python | mit | 2,056 |
from heapq import *
from typing import List
class Solution:
def findMaximizedCapital(self, k: int, wealth: int, profits: List[int], capitals: List[int]) -> int:
minCapitalHeap, maxProfitHeap = [], []
for i in range(len(capitals)):
heappush(minCapitalHeap, (capitals[i], profits[i]))
for _ in range(k):
# find the projects which require less capital than available wealth
while minCapitalHeap and minCapitalHeap[0][0] <= wealth:
_, profit = heappop(minCapitalHeap)
heappush(maxProfitHeap, -profit)
if not maxProfitHeap:
break
wealth += -heappop(maxProfitHeap)
return wealth
k=0
W=0
Profits=[1,2,3,5]
Capital=[0,1,2,3]
ob = Solution()
print(ob.findMaximizedCapital(k, W, Profits, Capital))
| shobhitmishra/CodingProblems | LeetCode/Session3/ipo.py | Python | mit | 880 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='jestocke-mangopaysdk',
version='3.0.6',
description='A client library written in python to work with mangopay v2 api',
long_description='This SDK is a client library for interacting with the Mangopay API.',
url='https://github.com/Mangopay/mangopay2-python-sdk',
author='Mangopay (www.mangopay.com)',
author_email='support@mangopay.com',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='mangopay api development emoney sdk',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['requests', 'simplejson', 'blinker', 'six' ],
extras_require={
'dev': ['responses', 'nose', 'coverage', 'httplib2',
'pyopenssl', 'ndg-httpsclient', 'pyasn1', 'exam'],
'test': ['responses', 'nose', 'coverage', 'httplib2',
'pyopenssl', 'ndg-httpsclient', 'pyasn1', 'exam'],
},
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
| chocopoche/mangopay2-python-sdk | setup.py | Python | mit | 1,575 |
from typing import Optional
from thinc.api import Model
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import UkrainianLemmatizer
from ...language import Language
class UkrainianDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Ukrainian(Language):
lang = "uk"
Defaults = UkrainianDefaults
@Ukrainian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool
):
return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
__all__ = ["Ukrainian"]
| spacy-io/spaCy | spacy/lang/uk/__init__.py | Python | mit | 903 |
from nose.tools import *
from tictactoe.ai_strategies.easy import Easy
from tictactoe.game_board import GameBoard
def easy_strategy_makes_any_opening_move_test():
ai = Easy("X", "O")
board = GameBoard()
move = ai.make_move(board)
assert_equal(True, move in list(range(0, 9)))
def easy_strategy_makes_move_in_nearly_full_board_test():
ai = Easy("X", "O")
board = GameBoard()
board.play_move("X", 0)
board.play_move("O", 2)
board.play_move("X", 3)
board.play_move("O", 4)
board.play_move("X", 5)
board.play_move("O", 6)
board.play_move("X", 7)
board.play_move("O", 8)
assert_equal(1, ai.make_move(board))
| rickerbh/tictactoe_py | tests/ai_easy_strategy_tests.py | Python | mit | 670 |
"""Largest product in a grid
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 (26) 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 (63) 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 (78) 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 (14) 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
from euler import utils
GRID = '''
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
'''
def is_greater(numbers, adjacent, greatest_product):
"""Check if the product of the given numbers is greater than the greatest product.
Arguments:
numbers (list of int): A list of numbers to multiply together.
adjacent (int): The required count of (adjacent) numbers.
greatest_product (int): The current greatest product.
Returns:
int: If the count of numbers is equal to the required value, and the product
of the numbers is greater than the greatest product.
None: Otherwise.
"""
if len(numbers) == adjacent:
current_product = utils.product(numbers)
if greatest_product is None or current_product > greatest_product:
return current_product
# TODO: This could be more cleanly implemented as a class.
def search_grid(serialized, adjacent):
"""Search a grid for the greatest product of adjacent numbers.
Arguments:
serialized (str): Serialized representation of a grid of integers.
adjacent (int): How many adjacent numbers to consider.
Returns:
int: Greatest product of adjacent numbers.
"""
greatest_product = None
grid = utils.deserialize_grid(serialized)
# We only need to search right, down, and diagonally (upper right and
# lower right) as we visit each element to traverse the entire grid.
for row_index, row in enumerate(grid):
for column_index, column in enumerate(row):
# Look right
right = row[column_index:column_index + adjacent]
current_product = is_greater(right, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look down
down = []
for i in range(adjacent):
try:
down.append(grid[row_index + i][column_index])
# Index might be out of range, which means there isn't the required
# count of numbers vertically-adjacent to the current number.
except:
break
current_product = is_greater(down, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look diagonally, upper right
upper_diagonal = []
for i in range(adjacent):
working_row_index = row_index - i
# We don't want to be using negative indices, which would wrap around to
# the bottom of the grid.
if row_index < 0:
break
try:
upper_diagonal.append(grid[working_row_index][column_index + i])
# Index might be out of range, which means there isn't the required
# count of numbers diagonally-adjacent to the current number.
except:
break
current_product = is_greater(upper_diagonal, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look diagonally, lower right
lower_diagonal = []
for i in range(adjacent):
try:
lower_diagonal.append(grid[row_index + i][column_index + i])
# Index might be out of range, which means there isn't the required
# count of numbers diagonally-adjacent to the current number.
except:
break
current_product = is_greater(lower_diagonal, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
return greatest_product
| rlucioni/project-euler | euler/solutions/solution_11.py | Python | mit | 6,618 |
# docker-pipeline
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dan Leehr
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from docker.client import Client
from docker.utils import kwargs_from_env
class Runner():
def __init__(self, pipeline=None):
self.pipeline = pipeline
self.client = Runner.get_client()
self.remove_containers = False
self.result = None
@classmethod
def get_client(cls):
# Using boot2docker instructions for now
# http://docker-py.readthedocs.org/en/latest/boot2docker/
# Workaround for requests.exceptions.SSLError: hostname '192.168.59.103' doesn't match 'boot2docker'
client = Client(version='auto', **kwargs_from_env(assert_hostname=False))
return client
def run(self):
if self.pipeline.debug:
print "Running pipeline: {}".format(self)
for each_step in self.pipeline.steps:
if self.pipeline.pull_images:
self.pull_image(each_step)
container = self.create_container(each_step)
self.start_container(container, each_step)
self.result = self.get_result(container, each_step)
self.finish_container(container, each_step)
if self.result['code'] != 0:
# Container exited with nonzero status code
print "Error: step exited with code {}".format(self.result['code'])
# Pipeline breaks if nonzero result is encountered
break
if self.pipeline.debug:
print 'Result: {}'.format(self.result)
def pull_image(self, step):
if self.pipeline.debug:
print 'Pulling image for step: {}'.format(step)
image_result = self.client.pull(step.image)
if self.pipeline.debug:
print image_result
def create_container(self, step):
if self.pipeline.debug:
print 'Creating container for step: {}'.format(step)
print 'Image: {}'.format(step.image)
print 'Volumes: {}'.format(step.get_volumes())
print 'Environment: {}'.format(step.environment)
container = self.client.create_container(step.image,
command=step.command,
environment=step.environment,
volumes=step.get_volumes())
return container
def start_container(self, container, step):
if self.pipeline.debug:
print 'Running container for step {}'.format(step)
print 'Binds: {}'.format(step.binds)
# client.start does not return anything
self.client.start(container, binds=step.binds)
def get_result(self, container, step):
logs = self.client.attach(container, stream=True, logs=True)
result = {'image': step.image}
print 'step: {}\nimage: {}\n==============='.format(step.name, step.image)
all_logs = str()
for log in logs:
all_logs = all_logs + log
print log,
# Store the return value
code = self.client.wait(container)
result['code'] = code
return result
def finish_container(self, container, step):
if self.pipeline.debug:
print 'Cleaning up container for step {}'.format(step)
if self.remove_containers:
self.client.remove_container(container)
| Duke-GCB/docker-pipeline | docker-pipeline/runner.py | Python | mit | 4,468 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
map_instance = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.fetch()
print(map_instance.sid)
| teoreteetik/api-snippets | sync/rest/maps/retrieve-map/retrieve-map.6.x.py | Python | mit | 441 |