text stringlengths 8 6.05M |
|---|
from .eLABJournalPager import *
class SampleSeries(eLABJournalPager):
pass |
# -*- coding: utf-8 -*-
"""
Main pyramid_basemodel module.
Provides global scoped ``Session`` and declarative ``Base``, ``BaseMixin``
class and ``bind_engine`` function.
To use, import and, e.g.: inherit from the base classes::
>>> class MyModel(Base, BaseMixin):
... __tablename__ = 'my_model'
...
>>> instance = MyModel()
>>> Session.add(instance)
>>> # etc.
To automatically bind the base metadata and session to your db engine, just
include the package::
config.include('pyramid_basemodel')
"""
__version__ = "0.4.0"
__all__ = [
"Base",
"BaseMixin",
"Session",
"bind_engine",
]
import inflect
from datetime import datetime
from zope.interface import classImplements
from zope.sqlalchemy import register
from sqlalchemy import engine_from_config
from sqlalchemy import Column, DateTime, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from pyramid.path import DottedNameResolver
from pyramid.settings import asbool
from pyramid_basemodel.interfaces import IDeclarativeBase
Session = scoped_session(sessionmaker())
register(Session)
Base = declarative_base()
classImplements(Base, IDeclarativeBase)
class classproperty:
"""A basic [class property](http://stackoverflow.com/a/3203659)."""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
class BaseMixin:
"""
Default Base Model Mixin.
Provides an int ``id`` as primary key, ``version``, ``created`` and
``modified`` columns and a scoped ``self.query`` property.
"""
#: primary key
id = Column(Integer, primary_key=True)
#: schema version
version = Column("v", Integer, default=1)
#: timestamp of object creation
created = Column("c", DateTime, default=datetime.utcnow)
#: timestamp of object's latest update
modified = Column("m", DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
query = Session.query_property()
@classproperty
def class_name(cls):
"""
Determine class name based on the _class_name or the __tablename__.
If provided, defaults to ``cls._class_name``, otherwise default to
``cls.plural_class_name``, which is derived from the cls.__tablename__.
If singularising the plural class name doesn't work, uses the
``cls.__name__``
"""
# Try the manual override.
if hasattr(cls, "_class_name"):
return cls._class_name
singularise = inflect.engine().singular_noun
name = singularise(cls.plural_class_name)
if name:
return name
# If that didn't work, fallback on the class name.
return cls.__name__
@classproperty
def class_slug(cls):
"""Class slug based on either _class_slug or __tablename__."""
return getattr(cls, "_class_slug", cls.__tablename__)
@classproperty
def singular_class_slug(cls):
"""Return singular version of ``cls.class_slug``."""
# If provided, use ``self._singular_class_slug``.
if hasattr(cls, "_singular_class_slug"):
return cls._singular_class_slug
# Otherwise singularise the class_slug.
if inflect is not None:
singularise = inflect.engine().singular_noun
slug = singularise(cls.class_slug)
if slug:
return slug
# If that didn't work, fallback on the class name.
return cls.class_name.split()[-1].lower()
@classproperty
def plural_class_name(cls):
"""Return plurar version of a class name."""
# If provided, use ``self._plural_class_name``.
if hasattr(cls, "_plural_class_name"):
return cls._plural_class_name
# Otherwise pluralise the literal class name.
return cls.__tablename__.replace("_", " ").title()
def save(instance_or_instances, session=Session):
"""
Save model instance(s) to the db.
Both single and multiple instances can be saved.
"""
v = instance_or_instances
if isinstance(v, list) or isinstance(v, tuple):
session.add_all(v)
else:
session.add(v)
def bind_engine(engine, session=Session, base=Base, should_create=False, should_drop=False):
"""
Bind the ``session`` and ``base`` to the ``engine``.
:param should_create: Triggers create tables on all models
:param should_drop: Triggers drop on all tables
"""
session.configure(bind=engine)
base.metadata.bind = engine
if should_drop:
base.metadata.drop_all(engine)
if should_create:
base.metadata.create_all(engine)
def includeme(config):
"""Bind to the db engine specifed in ``config.registry.settings``."""
# Bind the engine.
settings = config.get_settings()
engine_kwargs_factory = settings.pop("sqlalchemy.engine_kwargs_factory", None)
if engine_kwargs_factory:
kwargs_factory = config.maybe_dotted(engine_kwargs_factory)
engine_kwargs = kwargs_factory(config.registry)
else:
engine_kwargs = {}
pool_class = settings.pop("sqlalchemy.pool_class", None)
if pool_class:
dotted_name = DottedNameResolver()
engine_kwargs["poolclass"] = dotted_name.resolve(pool_class)
should_bind = asbool(settings.get("basemodel.should_bind_engine", True))
should_create = asbool(settings.get("basemodel.should_create_all", False))
should_drop = asbool(settings.get("basemodel.should_drop_all", False))
if should_bind:
engine = engine_from_config(settings, "sqlalchemy.", **engine_kwargs)
config.action(
None,
bind_engine,
(engine,),
{"should_create": should_create, "should_drop": should_drop},
)
|
#!/usr/bin/env python
from os import path
from subprocess import call
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
DEFAULT_LANG = "en"
DEFAULT_OUTPUT = path.normpath(path.join(path.dirname(__file__), "dist"))
JSON = path.join(path.dirname(__file__), "generated-json/game.json")
def get_args():
parser = ArgumentParser(
description="Takes a Ren'Py game and bundles it to a html file.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
"renpy",
metavar="<renpy executable>",
help="path to renpy.sh executable")
parser.add_argument(
"game_base",
metavar="<renpy game folder>",
help="path to the renpy game folder")
parser.add_argument(
"--lang",
nargs=1,
metavar="<game_lang=%s>" % DEFAULT_LANG,
default=[DEFAULT_LANG],
help="output game language",
dest="game_lang")
parser.add_argument(
"-o",
nargs=1,
metavar="<output_dir=%s>" % DEFAULT_OUTPUT,
default=[DEFAULT_OUTPUT],
help="path to output folder",
dest="output_dir")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
rpy2json = path.join(path.dirname(__file__), "rpy2json/bin/rpy2json")
json2html = path.join(path.dirname(__file__), "json2html/bin/json2html")
code = call([rpy2json, args.renpy, args.game_base, "--lang", args.game_lang[0], "-o", JSON])
if code != 0:
exit(code)
code = call([json2html, JSON, "-o", args.output_dir[0]])
exit(code)
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
import tensorflow as tf
import ConfigParser
import json
from word import Word
from data_unit import cut2list
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(self, sentence_size, answer_size, filter_sizes, num_filters,
l2_reg_lambda=0.0,
embedding_size = 128,
max_grad_norm=40.0,
session=tf.Session(),
name = 'TextCNN'):
self.writer = tf.summary.FileWriter("./tensorboard/logs", session.graph)
self.answer = []
self.word = Word()
self._answer_size = answer_size
self._sentence_size = sentence_size
self._embedding_size = embedding_size
self._max_grad_norm = max_grad_norm
self._name = name
self._embedding_size = embedding_size
self._filter_sizes = filter_sizes
self._num_filters = num_filters
self._l2_reg_lambda = l2_reg_lambda
self.loss = None
self.accuracy = None
self.predictions = None
self._build_inputs()
self._inference()
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self._optimizer = tf.train.AdamOptimizer(1e-3)
# gradient pipeline
grads_and_vars = self._optimizer.compute_gradients(self.loss)
train_op = self._optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
# Summaries for loss and accuracy
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("accuracy", self.accuracy)
# assign ops
self.loss_op = self.loss
self.predict_op = self.predictions
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = session
self._sess.run(init_op)
self.saver = tf.train.Saver()
self.merge = tf.summary.merge_all()
def _build_inputs(self):
self.input_x = tf.placeholder(tf.float32, [None, self._sentence_size, self._embedding_size], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, self._answer_size], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
def _inference(self):
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.embedded_chars = self.input_x
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(self._filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, self._embedding_size, 1, self._num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self._num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, self._sentence_size - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = self._num_filters * len(self._filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, self._answer_size],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[self._answer_size]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self._l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def batch_fit(self, queries, answers, dropout_keep_prob):
"""Runs the training algorithm over the passed batch
Args:
queries: Tensor (None, sentence_size)
answers: Tensor (None, vocab_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
feed_dict = {self.input_x: queries, self.input_y: answers, self.dropout_keep_prob: dropout_keep_prob}
loss, _, summary = self._sess.run([self.loss_op, self.train_op, self.merge], feed_dict=feed_dict)
return loss, summary
def predict(self, queries, dropout_keep_prob):
"""Predicts answers as one-hot encoding.
Args:
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self.input_x: queries, self.dropout_keep_prob: dropout_keep_prob}
return self._sess.run(self.predict_op, feed_dict=feed_dict)
def load(self, checkpoint_dir):
tf.logging.warning("model start load")
with open("./data/ans.json", 'r') as pf:
self.answer = json.load(pf)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self._sess, ckpt.model_checkpoint_path)
tf.logging.warning("model restore success")
else:
tf.logging.error("model restore wrong!")
def string_to_vec(self, string):
vector = [[0] * self._embedding_size] * self._sentence_size
strlist = cut2list(string)
for index, word in enumerate(strlist):
vecjs = self.word.word_vec(word)
vec = json.loads(vecjs)
while isinstance(vec, unicode):
vec = json.loads(vec)
vector[index] = vec
return vector
def vec_to_answer(self, maxindex):
return self.answer[maxindex]
def respond(self, query, dropout_keep_prob):
qvec = self.string_to_vec(query)
feed_dict = {self.input_x:[qvec], self.dropout_keep_prob: dropout_keep_prob}
maxindex = self._sess.run(self.predict_op, feed_dict=feed_dict)
answer = self.vec_to_answer(maxindex[0])
return answer
if __name__ == "__main__":
mdir = "./tensorboard/logs/"
conf = ConfigParser.ConfigParser()
conf.read("./data/CNN.cfg")
answer_size = int(conf.get("CNN", "answer_size"))
sentence_size = int(conf.get("CNN", "sentence_size"))
filter_sizes = conf.get("CNN", "filter_sizes")
num_filters = int(conf.get("CNN", "num_filters"))
l2_reg_lambda = float(conf.get("CNN", "l2_reg_lambda"))
dropout_keep_prob = float(conf.get("CNN", "dropout_keep_prob"))
with tf.Session() as sess:
model = TextCNN(sentence_size, answer_size,
filter_sizes=list(map(int, filter_sizes.split(","))),
num_filters=num_filters,
l2_reg_lambda=l2_reg_lambda,
session=sess)
model.load('./model/cnn/')
while(1):
print(model.respond(raw_input(">"), dropout_keep_prob))
|
# input number and get a perfect number from 1 to number
def perfect(num) :
sum = 0
for i in range(1,num+1):
for j in range(1,i):
if i%j==0 :
sum+=j
if sum == i :
print sum, #print("%d "%sum, end= ' ') python 3.0
sum = 0
num = int(input("input the number: "))
perfect(num)
|
from rest_framework.serializers import ModelSerializer
from tracks.api.serializers import lesson_serializer
from contacts.api.serializers import user_serializer
from ..models import question,answer
class question_serializer (ModelSerializer):
lesson = lesson_serializer(required=False)
user = user_serializer(required=False)
class Meta:
model = question
fields = '__all__'
class answer_serializer (ModelSerializer):
question = question_serializer(required=False)
user = user_serializer(required=False)
likes = user_serializer(many=True,required=False)
class Meta :
model = answer
fields = '__all__'
|
h = input("Digite o valor da altura: ")
h = int(h)
b = input("Digite o valor da largura: ")
b = int(b)
soma = b * h
print("A área do retângulo é: ", soma, "metros quadrados.")
|
#!/usr/bin/python3
def divisible_by_2(my_list=[]):
new_list = my_list.copy()
index = 0
for a in new_list:
if a % 2 == 0:
new_list[index] = True
else:
new_list[index] = False
index = index + 1
return new_list
|
import pickle
from pathlib import Path
from typing import List
import torch
from torch.utils.data import Dataset
import Voxel_MOF
class MOFDataset(Dataset):
def __init__(self, path, no_grid=False, no_loc=False,transform=None):
self.path = path
self.no_grid = no_grid
self.no_loc = no_loc
path = Path(path)
with path.open("rb") as f:
self.data: List[Voxel_MOF] = pickle.load(f)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if self.no_grid:
return self.data[idx].loc_tensor
elif self.no_loc:
return self.data[idx].grid_tensor
else:
return self.data[idx].data
@staticmethod
def get_data_loader(path: str, batch_size: int, no_grid=False, no_loc=False):
return torch.utils.data.DataLoader(
MOFDataset(path, no_grid=no_grid, no_loc=no_loc),
batch_size=batch_size,
shuffle=True,
)
def main():
data_loader = MOFDataset.get_data_loader("../3D_Grid_Data/Test_MOFS.p", 25)
batch: int
mofs: torch.Tensor
for batch, mofs in enumerate(data_loader):
print(batch, mofs.shape)
if __name__ == '__main__':
main()
model = None
saved_model = Path("AE_MODEL_FULL.p")
if (saved_model.is_file()):
print("Loading Saved Model")
model = torch.load("AE_MODEL_FULL.p")
else:
model = AE.ConvolutionalAE(2048, 11)
if cuda:
model.cuda()
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
# LECTURE_2 URLS -->
router = DefaultRouter()
router.register('products', views.PublicProductViewSet)
router.register('products/category', views.PublicProductCategoryViewSet, 'public_category')
router.register('company/products', views.CompanyProductViewSet, 'company_products')
router.register('company/products/category', views.CompanyProductCategoryViewSet, 'company_category')
# LECTURE_2 URLS <--
urlpatterns = [
path('', views.index, name='index'),
path('categories/', views.CategoriesListView.as_view()),
path('categories/<int:pk>/', views.CategoryView.as_view()),
path('details/<int:pk>/', views.ProductView.as_view()),
path('api/v1/', include(router.urls)), # LECTURE_2 URLS
]
|
from django.db import models
from django.core.validators import FileExtensionValidator
from django.utils.html import format_html
from django.utils.functional import cached_property
from django_resized import ResizedImageField
from django.shortcuts import reverse
import uuid
import os
def get_image_path(instance, img_name):
img_name = img_name.split('.')[0]
img_dir = 'img_dir/working_app/'
img_extension = 'jpg'
img_name = '%s.%s' % (uuid.uuid4(), img_extension)
class HomePage(models.Model):
'''Создадим модель для главной страницы'''
title = models.CharField(
'Заголовок', max_length=255, help_text='Обязательное поле'
)
keywords = models.CharField(
'Ключевые слова', max_length=255, blank=True
)
description = models.TextField(
'Описание', blank=True
)
slide_photo_heading_text = models.CharField(
'Заголовок над слайдером', max_length=255, blank=True
)
slide_photo_bottom_text = models.TextField(
'Текст под слайдером', blank=True
)
product_heading_text = models.CharField(
'Заголовок продуктов', max_length=255, blank=True
)
map_latitude = models.DecimalField(
'Широта', max_digits=5, decimal_places=2
)
map_longitude = models.DecimalField(
'Долгота', max_digits=5, decimal_places=2
)
feedback_form_title = models.CharField(
'Заголовок формы фидбека', max_length=255, blank=True
)
class Meta:
verbose_name = "данные"
verbose_name_plural = "Главная страница"
def __str__(self):
return self.title
class HomePageImage(models.Model):
'''Создадим модель для добавления картинок в слайдер на главной странице'''
homepage = models.ForeignKey(
HomePage, on_delete=models.CASCADE, blank=True, verbose_name='Картинка для слайдера',
related_name='homepage_image'
)
image = ResizedImageField(
'Фото для слайдера', size=[1280, 860], crop=['middle', 'center'],
quality=80,
upload_to='images/', validators=[FileExtensionValidator(['jpg', 'jpeg'])],
help_text='Допускаются только изображения в формате .jpg, .jpeg'
)
@cached_property # иначе картинка при изменении будет отображаться старая
def display_image(self):
return format_html('<img src="{img}" width="300">', img=self.image.url)
display_image.short_description = 'Предпросмотр изображения'
class Meta:
verbose_name = "слайд"
verbose_name_plural = "слайды"
def __str__(self):
return 'элемент'
class Product(models.Model):
title = models.CharField(
'Заголовок', max_length=255, help_text='Обязательное поле'
)
keywords = models.CharField(
'Ключевые слова', max_length=255, blank=True
)
description = models.TextField(
'Описание', blank=True
)
name = models.CharField(
'Название продукта', max_length=255, help_text='Обязательное поле'
)
date_pub = models.DateTimeField(
auto_now_add=True
)
slug = models.SlugField(
'URL', max_length=255, unique=True, null=True
)
image = ResizedImageField(
'Фото продукта', size=[1280, 860], crop=['middle', 'center'],
quality=80,
upload_to='images/', validators=[FileExtensionValidator(['jpg', 'jpeg'])],
help_text='Допускаются только изображения в формате .jpg, .jpeg',
blank=True, null=True
)
heading1 = models.CharField(
'Заголовок продукта 1', max_length=255,
blank=True
)
heading2 = models.CharField(
'Заголовок продукта 2', max_length=255,
blank=True
)
heading3 = models.CharField(
'Заголовок продукта 3', max_length=255,
blank=True
)
text1 = models.TextField(
'Содержание пункта 1', blank=True
)
text2 = models.TextField(
'Содержание пункта 2', blank=True
)
text3 = models.TextField(
'Содержание пункта 3', blank=True
)
@cached_property # иначе картинка при изменении будет отображаться старая
def display_image(self):
return format_html('<img src="{img}" width="300">', img=self.image.url)
display_image.short_description = 'Предпросмотр изображения'
def get_absolute_url(self):
'''Получим ссылку на страницу с уникальным URL'''
return reverse('product_detail_url', kwargs={'slug': self.slug})
class Meta:
verbose_name = "продукт"
verbose_name_plural = "продукты"
ordering = ['-date_pub']
def __str__(self):
return self.name |
"""
resources.py provides useful tools for resources processing.
There are 2 commands available.
- clean: clean and unify the resources file names with some rules.
- round: generate the rounded images from the original squared images.
"""
import os
import subprocess
import sys
import config as cfg
from . import resource_dir
_usage = "Usage: resource.py <cmd> <pvd>"
def cleaner_onprem(f):
f = f.replace("_", "-")
return f.lower()
def cleaner_aws(f):
f = f.replace("_", "-")
f = f.replace("@4x", "")
f = f.replace("@5x", "")
f = f.replace("2.0", "2-0")
f = f.replace("-light-bg4x", "")
f = f.replace("-light-bg", "")
for p in cfg.FILE_PREFIXES["aws"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_azure(f):
f = f.replace("_", "-")
f = f.replace("(", "").replace(")", "")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["azure"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_gcp(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["gcp"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_ibm(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["ibm"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_firebase(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["firebase"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_k8s(f):
f = f.replace("-256", "")
for p in cfg.FILE_PREFIXES["k8s"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_digitalocean(f):
f = f.replace("-32", "")
for p in cfg.FILE_PREFIXES["digitalocean"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_alibabacloud(f):
for p in cfg.FILE_PREFIXES["alibabacloud"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_oci(f):
f = f.replace(" ", "-")
f = f.replace("_", "-")
for p in cfg.FILE_PREFIXES["oci"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower()
def cleaner_programming(f):
return f.lower()
def cleaner_generic(f):
return f.lower()
def cleaner_saas(f):
return f.lower()
def cleaner_elastic(f):
return f.lower()
def cleaner_outscale(f):
return f.lower()
def cleaner_openstack(f):
return f.lower()
cleaners = {
"onprem": cleaner_onprem,
"aws": cleaner_aws,
"azure": cleaner_azure,
"digitalocean": cleaner_digitalocean,
"gcp": cleaner_gcp,
"ibm": cleaner_ibm,
"firebase": cleaner_firebase,
"k8s": cleaner_k8s,
"alibabacloud": cleaner_alibabacloud,
"oci": cleaner_oci,
"programming": cleaner_programming,
"saas": cleaner_saas,
"elastic": cleaner_elastic,
"outscale": cleaner_outscale,
"generic": cleaner_generic,
"openstack": cleaner_openstack,
}
def clean_png(pvd: str) -> None:
"""Refine the resources files names."""
def _rename(base: str, png: str):
new = cleaners[pvd](png)
old_path = os.path.join(base, png)
new_path = os.path.join(base, new)
os.rename(old_path, new_path)
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
[_rename(root, png) for png in pngs]
def round_png(pvd: str) -> None:
"""Round the images."""
def _round(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_ROUND, *cfg.CMD_ROUND_OPTS, path])
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
paths = filter(lambda f: "rounded" not in f, pngs)
[_round(root, path) for path in paths]
def svg2png(pvd: str) -> None:
"""Convert the svg into png"""
def _convert(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_SVG2PNG, *cfg.CMD_SVG2PNG_OPTS, path])
subprocess.run(["rm", path])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs]
def svg2png2(pvd: str) -> None:
"""Convert the svg into png using image magick"""
def _convert(base: str, path: str):
path_src = os.path.join(base, path)
path_dest = path_src.replace(".svg", ".png")
subprocess.run([cfg.CMD_SVG2PNG_IM, *cfg.CMD_SVG2PNG_IM_OPTS, path_src, path_dest])
subprocess.run(["rm", path_src])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs]
# fmt: off
commands = {
"clean": clean_png,
"round": round_png,
"svg2png": svg2png,
"svg2png2": svg2png2,
}
# fmt: on
if __name__ == "__main__":
if len(sys.argv) < 3:
print(_usage)
sys.exit()
cmd = sys.argv[1]
pvd = sys.argv[2]
if cmd not in commands:
sys.exit()
if pvd not in cfg.PROVIDERS:
sys.exit()
commands[cmd](pvd)
|
from pippi import dsp
from pippi import tune
midi = {'lpd': 3}
def play(ctl):
param = ctl.get('param')
lpd = ctl.get('midi').get('lpd')
freqs = [
(10000, 15000),
(5000, 15000),
(5000, 10000),
]
low = dsp.rand(50, 100)
high = dsp.rand(80, 120)
low = 80
high = 120
wform = 'sine2pi'
amp = lpd.get(5, low=0, high=1, default=0)
low = dsp.rand(low * 0.9, low)
high = dsp.rand(high, high * 1.1)
length = dsp.mstf(lpd.get(1, low=10, high=900))
if dsp.rand() > 10.5:
length = length / 2
pulselength = lpd.geti(2, low=dsp.mstf(10), high=length, default=length)
out = dsp.bln(pulselength, low, high, wform)
out = dsp.env(out, 'phasor')
if dsp.rand() > 10.1:
beep = dsp.tone(dsp.flen(out), dsp.rand(12000, 12000), amp=dsp.rand(0.5, 1))
out = dsp.mix([out, beep])
out = dsp.drift(out, dsp.rand(0, 1))
out = dsp.pad(out, 0, length - dsp.flen(out))
out = dsp.pan(out, dsp.rand())
out = dsp.amp(out, amp)
return out
|
"""
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
def on_start(container):
phantom.debug('on_start() called')
# call 'Transform_Hosts_to_List' block
Transform_Hosts_to_List(container=container)
return
def Get_Volatility_Dump_scripts_and_exe(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Get_Volatility_Dump_scripts_and_exe() called')
input_parameter_0 = ""
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
Upload_Files_To_Target(container=container)
Upload_Files_To_Target(container=container)
return
def Upload_Files_To_Target(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Upload_Files_To_Target() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
# collect data for 'Upload_Files_To_Target' call
parameters = []
# build parameters list for 'Upload_Files_To_Target' call
parameters.append({
'vault_id': "",
'destination': "",
'ip_hostname': "",
})
phantom.act(action="upload file", parameters=parameters, assets=['cblumer windows 2012'], callback=join_decision_1, name="Upload_Files_To_Target")
return
def Upload_Files_To_Target(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Upload_Files_To_Target() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
parameters = []
phantom.act(action="upload file", parameters=parameters, callback=join_decision_1, name="Upload_Files_To_Target")
return
def decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_1() called')
# call connected blocks for 'else' condition 2
Error_uploading_files(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def join_decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_decision_1() called')
# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed
if phantom.completed(action_names=['Upload_Files_To_Target']):
# call connected block "decision_1"
decision_1(container=container, handle=handle)
return
def Error_uploading_files(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Error_uploading_files() called')
return
def Dump_Target_System_Memory(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Dump_Target_System_Memory() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
parameters = []
phantom.act(action="run script", parameters=parameters, callback=decision_2, name="Dump_Target_System_Memory")
return
def Download_Memory_Dump(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Download_Memory_Dump() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
parameters = []
phantom.act(action="get file", parameters=parameters, callback=Download_Memory_Dump_callback, name="Download_Memory_Dump")
return
def Download_Memory_Dump_callback(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('Download_Memory_Dump_callback() called')
decision_3(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
Clean_Up_Target_System(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def decision_2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_2() called')
# call connected blocks for 'else' condition 2
Error_Dumping_Memory(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Error_Dumping_Memory(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Error_Dumping_Memory() called')
return
def decision_3(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_3() called')
# call connected blocks for 'else' condition 2
Error_Dowloading_Memory_Dump(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Error_Dowloading_Memory_Dump(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Error_Dowloading_Memory_Dump() called')
return
def Run_Volatility_Investigative_Actions(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Run_Volatility_Investigative_Actions() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
parameters = []
phantom.act(action="find malware", parameters=parameters, name="Run_Volatility_Investigative_Actions")
return
def Clean_Up_Target_System(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Clean_Up_Target_System() called')
#phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
parameters = []
phantom.act(action="run script", parameters=parameters, name="Clean_Up_Target_System", parent_action=action)
return
def Transform_Hosts_to_List(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Transform_Hosts_to_List() called')
input_parameter_0 = ""
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
Prompt_select_host(container=container)
return
def Prompt_select_host(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Prompt_select_host() called')
# set user and message variables for phantom.prompt call
user = ""
message = """"""
#responses:
response_types = [
{
"prompt": "",
"options": {
"type": "list",
"choices": [
"Yes",
"No",
]
},
},
]
phantom.prompt2(container=container, user=user, message=message, respond_in_mins=30, name="Prompt_select_host", response_types=response_types, callback=decision_4)
return
def decision_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_4() called')
# call connected blocks for 'else' condition 2
User_negated_operation(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def filter_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('filter_1() called')
return
def User_negated_operation(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('User_negated_operation() called')
return
def on_finish(container, summary):
phantom.debug('on_finish() called')
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
return |
from .source_target_data_processor import SourceTargetDataProcessor
from .data_processor import DataProcessor
import torch
import random
from torch import nn
from BFT.utils import cuda_variable
from DatasetManager.piano.piano_midi_dataset import PAD_SYMBOL, START_SYMBOL
class PianoDataProcessor(DataProcessor):
def __init__(self,
dataloader_generator,
embedding_size,
num_events,
num_tokens_per_channel,
add_mask_token=False):
super(PianoDataProcessor,
self).__init__(embedding_size=embedding_size,
num_events=num_events,
num_tokens_per_channel=num_tokens_per_channel,
add_mask_token=add_mask_token)
# These are used to compute the loss_mask
self.dataloader_generator = dataloader_generator
self.pad_tokens = nn.Parameter(torch.LongTensor([
self.dataloader_generator.dataset.value2index[feature][PAD_SYMBOL]
for feature in self.dataloader_generator.features
]),
requires_grad=False)
self.start_tokens = nn.Parameter(torch.LongTensor([
self.dataloader_generator.dataset.value2index[feature]
[START_SYMBOL] for feature in self.dataloader_generator.features
]),
requires_grad=False)
def preprocess(self, x):
x = cuda_variable(x.long())
batch_size, num_events, _ = x.size()
padding_mask = (
x[:, :, :] == self.pad_tokens.unsqueeze(0).unsqueeze(0).repeat(
batch_size, num_events, 1))
start_mask = (
x[:, :, :] == self.start_tokens.unsqueeze(0).unsqueeze(0).repeat(
batch_size, num_events, 1))
loss_mask = padding_mask + start_mask
metadata_dict = dict(
loss_mask=loss_mask,
original_sequence=x
)
return x, metadata_dict
def postprocess(self, reconstruction, original=None):
if original is not None:
# Just concatenate along batch dimension original and reconstruction
original = original.long()
tensor_score = torch.cat(
[original[0].unsqueeze(0),
reconstruction.cpu()], dim=0)
# Add a first empty dimension as everything will be written in one score
return tensor_score.unsqueeze(0)
else:
return reconstruction
class MaskedPianoSourceTargetDataProcessor(SourceTargetDataProcessor):
def __init__(self, dataloader_generator, embedding_size, num_events, num_tokens_per_channel):
encoder_data_processor = PianoDataProcessor(
dataloader_generator=dataloader_generator,
embedding_size=embedding_size,
num_events=num_events,
num_tokens_per_channel=num_tokens_per_channel,
add_mask_token=True)
decoder_data_processor = PianoDataProcessor(
dataloader_generator=dataloader_generator,
embedding_size=embedding_size,
num_events=num_events,
num_tokens_per_channel=num_tokens_per_channel,
add_mask_token=False)
super(MaskedPianoSourceTargetDataProcessor,
self).__init__(encoder_data_processor=encoder_data_processor,
decoder_data_processor=decoder_data_processor)
# (num_channels, ) LongTensor
self.mask_symbols = nn.Parameter(torch.LongTensor(
self.encoder_data_processor.num_tokens_per_channel),
requires_grad=False)
# These are used to compute the loss_mask
self.dataloader_generator = dataloader_generator
self.pad_tokens = nn.Parameter(torch.LongTensor([
self.dataloader_generator.dataset.value2index[feature][PAD_SYMBOL]
for feature in self.dataloader_generator.features
]),
requires_grad=False)
self.start_tokens = nn.Parameter(torch.LongTensor([
self.dataloader_generator.dataset.value2index[feature]
[START_SYMBOL] for feature in self.dataloader_generator.features
]),
requires_grad=False)
def _mask_source(self, x, masked_positions=None):
"""Add a MASK symbol
Args:
x (batch_size, num_events, num_channels) LongTensor: non-embeded source input
masked_positions ([type], optional): if None, masked_positions are sampled. Defaults to None.
Returns:
[type]: masked_x
"""
batch_size, num_events, num_channels = x.size()
if masked_positions is None:
p = random.random() * 0.5
# independant masking:
# masked_positions = (torch.rand_like(x.float()) > p)
# event masking:
masked_positions = torch.rand_like(x[:, :, 0].float()) > p
masked_positions = masked_positions.unsqueeze(2).repeat(
1, 1, num_channels)
masked_positions = masked_positions.long()
mask_symbols = self.mask_symbols.unsqueeze(0).unsqueeze(0).repeat(
batch_size, num_events, 1)
masked_x = (x * (1 - masked_positions) +
masked_positions * mask_symbols)
return masked_x, masked_positions
def postprocess(self, x):
return self.decoder_data_processor.postprocess(x)
def preprocess(self, x):
"""
:param x: ?
:return: tuple source, target, metadata_dict where
- source is (batch_size, num_events_source, num_channels_source)
- target is (batch_size, num_events_target, num_channels_target)
- metadata_dict is a dictionnary which contains the masked_positions tensor of size (batch_size, num_events_source, num_channels_source)
"""
source = cuda_variable(x.long())
target = cuda_variable(x.long())
source, masked_positions = self._mask_source(source)
# compute loss_mask
batch_size, num_events, _ = target.size()
padding_mask = (
target[:, :, :] == self.pad_tokens.unsqueeze(0).unsqueeze(0).repeat(
batch_size, num_events, 1))
start_mask = (
target[:, :, :] == self.start_tokens.unsqueeze(0).unsqueeze(0).repeat(
batch_size, num_events, 1))
loss_mask = padding_mask + start_mask
metadata_dict = dict(masked_positions=masked_positions,
original_sequence=target,
loss_mask=loss_mask)
return source, target, metadata_dict
|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from __future__ import print_function
import fontforge
from bakery_lint.base import BakeryTestCase as TestCase
def ufo_required(f):
def func(self, *args, **kwargs):
if (not self.operator.path.lower().endswith('.ufo')
and not self.operator.path.lower().endswith('.sfd')):
# This test checks only UFO source font.
return
return f(self, *args, **kwargs)
return func
class TestUFOFontFamilyNamingTest(TestCase):
"The font follows the font family naming recommendation"
# See http://forum.fontlab.com/index.php?topic=313.0
targets = ['upstream']
# TODO use robofab to test this in UFOs
tool = 'FontForge'
name = __name__
@ufo_required
def test_fullfontname_less_than_64_chars(self):
""" <Full name> limitation is < 64 chars """
font = fontforge.open(self.operator.path)
length = len(font.sfnt_names[4][2])
self.assertLess(length, 64,
msg=('`Full Font Name` limitation is less'
' than 64 chars. Now: %s') % length)
@ufo_required
def test_postscriptname_less_than_30_chars(self):
""" <Postscript name> limitation is < 30 chars """
font = fontforge.open(self.operator.path)
length = len(font.sfnt_names[6][2])
self.assertLess(length, 30,
msg=('`PostScript Name` limitation is less'
' than 30 chars. Now: %s') % length)
@ufo_required
def test_postscriptname_consistof_allowed_chars(self):
""" <Postscript name> may contain only a-zA-Z0-9 and one hyphen """
font = fontforge.open(self.operator.path)
self.assertRegexpMatches(font.sfnt_names[6][2],
r'^[a-zA-Z0-9]+\-?[a-zA-Z0-9]+$',
msg=('`PostScript Name` may contain'
' only a-zA-Z0-9 characters and'
' one hyphen'))
@ufo_required
def test_familyname_less_than_32_chars(self):
""" <Family Name> limitation is 32 chars """
font = fontforge.open(self.operator.path)
length = len(font.sfnt_names[1][2])
self.assertLess(length, 32,
msg=('`Family Name` limitation is < 32 chars.'
' Now: %s') % length)
@ufo_required
def test_stylename_less_than_32_chars(self):
""" <Style Name> limitation is 32 chars """
font = fontforge.open(self.operator.path)
length = len(font.sfnt_names[2][2])
self.assertLess(length, 32,
msg=('`Style Name` limitation is < 32 chars.'
' Now: %s') % length)
@ufo_required
def test_weight_value_range_between_250_and_900(self):
""" <Weight> value >= 250 and <= 900 in steps of 50 """
font = fontforge.open(self.operator.path)
self.assertTrue(bool(font.os2_weight % 50 == 0),
msg=('Weight has to be in steps of 50.'
' Now: %s') % font.os2_weight)
self.assertGreaterEqual(font.os2_weight, 250)
self.assertLessEqual(font.os2_weight, 900)
|
""" Wemakeprice Recommendation Project.
Authors:
- Hyunsik Jeon (jeon185@snu.ac.kr)
- Jaemin Yoo (jaeminyoo@snu.ac.kr)
- U Kang (ukang@snu.ac.kr)
- Data Mining Lab. at Seoul National University.
File: data/columns.py
- Constants of DataFrame column names.
Version: 1.0.0
"""
TIMESTAMP = 'timestamp'
ACTION_TYPE = 'action_type'
SEARCH_KEYWORD = 'search_keyword'
MEMBER_ID = 'm_id'
COMP_MEMBER_ID = 'comp_mid'
AGE = 'age'
SEX = 'sex'
PRODUCT_NO = 'prod_no'
PRICE = 'sale_price'
SALE_START_DATE = 'sale_start_dt'
SALE_END_DATE = 'sale_end_dt'
PRODUCT_NAME = 'prod_nm'
DEAL_CONSTRUCTION = 'deal_construction'
DEPTH0_NAME = 'gnb_depth0_nm'
DEPTH1_NO = 'gnb_depth1_cate_id'
DEPTH1_NAME = 'gnb_depth1_nm'
DEPTH2_NO = 'gnb_depth2_cate_id'
DEPTH2_NAME = 'gnb_depth2_nm'
DEPTH0_DISPLAY_ORDER = 'display_order_depth0'
DEPTH1_DISPLAY_ORDER = 'display_order_depth1'
DEPTH2_DISPLAY_ORDER = 'display_order_depth2'
CATEGORY_NO = 'gnb_category_id'
DEAL_NO = 'deal_no'
ITEM_NO = 'item_no'
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
from wtforms.validators import InputRequired
from property_price_model.postcodes import pcode
class PropertyInputForm(FlaskForm):
pcode = StringField("Postcode", validators=[pcode(), InputRequired()])
sqft = IntegerField("Square Metres", validators=[InputRequired()])
submit = SubmitField("Submit data")
|
from random import seed, randint
print("H A N G M A N")
while True:
print('Type "play" to play the game, "exit" to quit:')
prompt = input()
if prompt == "play":
pass
elif prompt == "exit":
break
else:
continue
word_list = ['python', 'java', 'kotlin', 'javascript']
w_number = randint(0, 3)
word_hint = word_list[w_number]
output = "-" * len(word_hint)
temp_output_list = list(output)
letter_set = set(word_hint)
won = False
health = 8
entered_values = list()
while health > 0:
print()
print(output)
guess = input("Input a letter: ")
count = 0
if len(guess) >= 2 or len(guess) <= 0:
print("You should print a single letter")
continue
if guess != guess.lower() or not guess.isalpha():
print('It is not an ASCII lowercase letter.')
continue
if guess not in entered_values:
entered_values.append(guess)
elif guess in entered_values:
print("You already typed this letter")
continue
if guess not in word_hint:
print("No such letter in the word")
health -= 1
continue
for letter in word_hint:
if guess == letter:
temp_output_list[count] = word_hint[count]
count += 1
output = ''.join(temp_output_list)
if output == word_hint:
won = True
print(output)
print("You guessed the word!")
break
if won is True:
print("You survived!")
else:
print("You are hanged!")
|
import os, time, multiprocessing
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from config import FLAGS_CMNIST, FLAGS_CIFAR
from data import get_dataset_train, get_dataset_eval
from models import get_G, get_img_D, get_E, get_z_D
import random
import argparse
import math
import scipy.stats as stats
import sys
temp_out = sys.stdout # 记录当前输出指向,默认是consle
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='DWGAN', help='train or eval')
parser.add_argument('--is_continue', type=bool, default=False, help='load weights from checkpoints?')
parser.add_argument('--dataset', type=str, default='CIFAR_10', help=['CMNIST', 'CIFAR_10'])
args = parser.parse_args()
def data_aug(images):
z = np.random.normal(loc=0.0, scale=0.15,
size=[flags.batch_size_train, flags.img_size_h, flags.img_size_h, flags.c_dim]).astype(
np.float32)
return images + z
def KStest(real_z, fake_z):
p_list = []
for i in range(flags.batch_size_train):
_, tmp_p = stats.ks_2samp(fake_z[i], real_z[i])
p_list.append(tmp_p)
return np.min(p_list), np.mean(p_list)
def train(con=False):
dataset, len_dataset = get_dataset_train()
len_dataset = flags.len_dataset
G = get_G([None, flags.z_dim])
D = get_img_D([None, flags.img_size_h, flags.img_size_w, flags.c_dim])
E = get_E([None, flags.img_size_h, flags.img_size_w, flags.c_dim])
D_z = get_z_D([None, flags.z_dim])
if con:
G.load_weights('./checkpoint/G.npz')
D.load_weights('./checkpoint/D.npz')
E.load_weights('./checkpoint/E.npz')
D_z.load_weights('./checkpoint/D_z.npz')
G.train()
D.train()
E.train()
D_z.train()
n_step_epoch = int(len_dataset // flags.batch_size_train)
n_epoch = flags.n_epoch
# lr_G = flags.lr_G * flags.initial_scale
# lr_E = flags.lr_E * flags.initial_scale
# lr_D = flags.lr_D * flags.initial_scale
# lr_Dz = flags.lr_Dz * flags.initial_scale
lr_G = flags.lr_G
lr_E = flags.lr_E
lr_D = flags.lr_D
lr_Dz = flags.lr_Dz
# total_step = n_epoch * n_step_epoch
# lr_decay_G = flags.lr_G * (flags.ending_scale - flags.initial_scale) / total_step
# lr_decay_E = flags.lr_G * (flags.ending_scale - flags.initial_scale) / total_step
# lr_decay_D = flags.lr_G * (flags.ending_scale - flags.initial_scale) / total_step
# lr_decay_Dz = flags.lr_G * (flags.ending_scale - flags.initial_scale) / total_step
d_optimizer = tf.optimizers.Adam(lr_D, beta_1=flags.beta1, beta_2=flags.beta2)
g_optimizer = tf.optimizers.Adam(lr_G, beta_1=flags.beta1, beta_2=flags.beta2)
e_optimizer = tf.optimizers.Adam(lr_E, beta_1=flags.beta1, beta_2=flags.beta2)
dz_optimizer = tf.optimizers.Adam(lr_Dz, beta_1=flags.beta1, beta_2=flags.beta2)
curr_lambda = flags.lambda_recon
for step, batch_imgs_labels in enumerate(dataset):
'''
log = " ** new learning rate: %f (for GAN)" % (lr_v.tolist()[0])
print(log)
'''
batch_imgs = batch_imgs_labels[0]
# print("batch_imgs shape:")
# print(batch_imgs.shape) # (64, 64, 64, 3)
batch_labels = batch_imgs_labels[1]
# print("batch_labels shape:")
# print(batch_labels.shape) # (64,)
epoch_num = step // n_step_epoch
# for i in range(flags.batch_size_train):
# tl.visualize.save_image(batch_imgs[i].numpy(), 'train_{:02d}.png'.format(i))
# # Updating recon lambda
# if epoch_num <= 5: # 50 --> 25
# curr_lambda -= 5
# elif epoch_num <= 40: # stay at 25
# curr_lambda = 25
# else: # 25 --> 10
# curr_lambda -= 0.25
with tf.GradientTape(persistent=True) as tape:
z = flags.scale * np.random.normal(loc=0.0, scale=flags.sigma * math.sqrt(flags.z_dim),
size=[flags.batch_size_train, flags.z_dim]).astype(np.float32)
z += flags.scale * np.random.binomial(n=1, p=0.5,
size=[flags.batch_size_train, flags.z_dim]).astype(np.float32)
fake_z = E(batch_imgs)
fake_imgs = G(fake_z)
fake_logits = D(fake_imgs)
real_logits = D(batch_imgs)
fake_logits_z = D(G(z))
real_z_logits = D_z(z)
fake_z_logits = D_z(fake_z)
e_loss_z = - tl.cost.sigmoid_cross_entropy(fake_z_logits, tf.zeros_like(fake_z_logits)) + \
tl.cost.sigmoid_cross_entropy(fake_z_logits, tf.ones_like(fake_z_logits))
recon_loss = curr_lambda * tl.cost.absolute_difference_error(batch_imgs, fake_imgs)
g_loss_x = - tl.cost.sigmoid_cross_entropy(fake_logits, tf.zeros_like(fake_logits)) + \
tl.cost.sigmoid_cross_entropy(fake_logits, tf.ones_like(fake_logits))
g_loss_z = - tl.cost.sigmoid_cross_entropy(fake_logits_z, tf.zeros_like(fake_logits_z)) + \
tl.cost.sigmoid_cross_entropy(fake_logits_z, tf.ones_like(fake_logits_z))
e_loss = recon_loss + e_loss_z
g_loss = recon_loss + g_loss_x + g_loss_z
d_loss = tl.cost.sigmoid_cross_entropy(real_logits, tf.ones_like(real_logits)) + \
tl.cost.sigmoid_cross_entropy(fake_logits, tf.zeros_like(fake_logits)) + \
tl.cost.sigmoid_cross_entropy(fake_logits_z, tf.zeros_like(fake_logits_z))
dz_loss = tl.cost.sigmoid_cross_entropy(fake_z_logits, tf.zeros_like(fake_z_logits)) + \
tl.cost.sigmoid_cross_entropy(real_z_logits, tf.ones_like(real_z_logits))
# Updating Encoder
grad = tape.gradient(e_loss, E.trainable_weights)
e_optimizer.apply_gradients(zip(grad, E.trainable_weights))
# Updating Generator
grad = tape.gradient(g_loss, G.trainable_weights)
g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
# Updating Discriminator
grad = tape.gradient(d_loss, D.trainable_weights)
d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
# Updating D_z & D_h
grad = tape.gradient(dz_loss, D_z.trainable_weights)
dz_optimizer.apply_gradients(zip(grad, D_z.trainable_weights))
# # Updating lr
# lr_G -= lr_decay_G
# lr_E -= lr_decay_E
# lr_D -= lr_decay_D
# lr_Dz -= lr_decay_Dz
# show current state
if np.mod(step, flags.show_every_step) == 0:
with open("log.txt", "a+") as f:
p_min, p_avg = KStest(z, fake_z)
sys.stdout = f # 输出指向txt文件
print("Epoch: [{}/{}] [{}/{}] curr_lambda: {:.5f}, recon_loss: {:.5f}, g_loss: {:.5f}, d_loss: {:.5f}, "
"e_loss: {:.5f}, dz_loss: {:.5f}, g_loss_x: {:.5f}, g_loss_z: {:.5f}, e_loss_z: {:.5f}".format
(epoch_num, flags.n_epoch, step - (epoch_num * n_step_epoch), n_step_epoch, curr_lambda,
recon_loss, g_loss, d_loss, e_loss, dz_loss, g_loss_x, g_loss_z, e_loss_z))
print("kstest: min:{}, avg:{}".format(p_min, p_avg))
sys.stdout = temp_out # 输出重定向回console
print("Epoch: [{}/{}] [{}/{}] curr_lambda: {:.5f}, recon_loss: {:.5f}, g_loss: {:.5f}, d_loss: {:.5f}, "
"e_loss: {:.5f}, dz_loss: {:.5f}, g_loss_x: {:.5f}, g_loss_z: {:.5f}, e_loss_z: {:.5f}".format
(epoch_num, flags.n_epoch, step - (epoch_num * n_step_epoch), n_step_epoch, curr_lambda,
recon_loss, g_loss, d_loss, e_loss, dz_loss, g_loss_x, g_loss_z, e_loss_z))
print("kstest: min:{}, avg:{}".format(p_min, p_avg))
if np.mod(step, n_step_epoch) == 0 and step != 0:
G.save_weights('{}/{}/G.npz'.format(flags.checkpoint_dir, flags.param_dir), format='npz')
D.save_weights('{}/{}/D.npz'.format(flags.checkpoint_dir, flags.param_dir), format='npz')
E.save_weights('{}/{}/E.npz'.format(flags.checkpoint_dir, flags.param_dir), format='npz')
D_z.save_weights('{}/{}/Dz.npz'.format(flags.checkpoint_dir, flags.param_dir), format='npz')
# G.train()
if np.mod(step, flags.eval_step) == 0:
z = np.random.normal(loc=0.0, scale=1, size=[flags.batch_size_train, flags.z_dim]).astype(np.float32)
G.eval()
result = G(z)
G.train()
tl.visualize.save_images(result.numpy(), [8, 8],
'{}/{}/train_{:02d}_{:04d}.png'.format(flags.sample_dir, flags.param_dir,
step // n_step_epoch, step))
del tape
class Retrival_Obj():
def __init__(self, hash, label):
self.label = label
self.dist = 0
list1 = [True if hash[i] == 1 else False for i in range(len(hash))]
# convert bool list to bool array
self.hash = np.array(list1)
def __repr__(self):
return repr((self.hash, self.label, self.dist))
# to calculate the hamming dist between obj1 & obj2
def hamming(obj1, obj2):
res = obj1.hash ^ obj2.hash
ans = 0
for k in range(len(res)):
if res[k] == True:
ans += 1
obj2.dist = ans
def take_ele(obj):
return obj.dist
# to get 'nearest_num' nearest objs from 'image' in 'Gallery'
def get_nearest(image, Gallery, nearest_num):
for obj in Gallery:
hamming(image, obj)
Gallery.sort(key=take_ele)
ans = []
cnt = 0
for obj in Gallery:
cnt += 1
if cnt <= nearest_num:
ans.append(obj)
else:
break
return ans
# given retrivial_set, calc AP w.r.t. given label
def calc_ap(retrivial_set, label):
total_num = 0
ac_num = 0
ans = 0
result = []
for obj in retrivial_set:
total_num += 1
if obj.label == label:
ac_num += 1
ans += ac_num / total_num
result.append(ac_num / total_num)
result = np.array(result)
ans = np.mean(result)
return ans
def Evaluate_mAP():
print('Start Eval!')
# load images & labels
ds = get_dataset_eval()
E = get_E([None, flags.img_size_h, flags.img_size_w, flags.c_dim])
E.load_weights('./checkpoint/E.npz')
E.eval()
# create (hash,label) gallery
Gallery = []
cnt = 0
step_time1 = time.time()
for batch, label in ds:
cnt += 1
if cnt % flags.eval_print_freq == 0:
step_time2 = time.time()
print("Now {} Imgs done, takes {:.3f} sec".format(cnt, step_time2 - step_time1))
step_time1 = time.time()
hash_fake, _ = E(batch)
hash_fake = hash_fake.numpy()[0]
hash_fake = ((tf.sign(hash_fake * 2 - 1, name=None) + 1) / 2).numpy()
label = label.numpy()[0]
Gallery.append(Retrival_Obj(hash_fake, label))
print('Hash calc done, start split dataset')
# sample 1000 from Gallery and bulid the Query set
random.shuffle(Gallery)
cnt = 0
Queryset = []
G = []
for obj in Gallery:
cnt += 1
if cnt > flags.eval_sample:
G.append(obj)
else:
Queryset.append(obj)
Gallery = G
print('split done, start eval')
# Calculate mAP
Final_mAP = 0
step_time1 = time.time()
for eval_epoch in range(flags.eval_epoch_num):
result_list = []
cnt = 0
for obj in Queryset:
cnt += 1
if cnt % flags.retrieval_print_freq == 0:
step_time2 = time.time()
print("Now Steps {} done, takes {:.3f} sec".format(eval_epoch, cnt, step_time2 - step_time1))
step_time1 = time.time()
retrivial_set = get_nearest(obj, Gallery, flags.nearest_num)
result = calc_ap(retrivial_set, obj.label)
result_list.append(result)
result_list = np.array(result_list)
temp_res = np.mean(result_list)
print("Query_num:{}, Eval_step:{}, Top_k_num:{}, AP:{:.3f}".format(flags.eval_sample, eval_epoch,
flags.nearest_num, temp_res))
Final_mAP += temp_res / flags.eval_epoch_num
print('')
print("Query_num:{}, Eval_num:{}, Top_k_num:{}, mAP:{:.3f}".format(flags.eval_sample, flags.eval_epoch_num,
flags.nearest_num, Final_mAP))
print('')
def Evaluate_Cluster():
return 0
if __name__ == '__main__':
# To choose flags
flags = FLAGS_CMNIST()
if args.dataset == 'CMNIST':
flags = FLAGS_CMNIST()
elif args.dataset == 'CIFAR_10':
flags = FLAGS_CIFAR_10()
else:
print('dataset error')
# To make sure path is legal
tl.files.exists_or_mkdir(flags.checkpoint_dir + '/' + flags.param_dir) # checkpoint path
tl.files.exists_or_mkdir(flags.sample_dir + '/' + flags.param_dir) # samples path
tl.files.exists_or_mkdir(flags.checkpoint_dir) # save model
tl.files.exists_or_mkdir(flags.sample_dir) # save generated image
# Start training process
train(con=args.is_continue)
|
import glob
import numpy as np
from kd_helpers import read_labels
# Checking the number of points in each model
max_pts = 0
for main_folder in ["./data/train_data/*","./data/val_data/*","./data/test_data/*"]:
print(main_folder)
folders = glob.glob(main_folder)
model_files = []
for folder in folders:
model_files.extend(glob.glob(folder + '/*'))
for model_file in model_files:
with open(model_file,'r') as myfile:
num_pts = len(myfile.readlines())
if num_pts>max_pts:
max_pts = num_pts
if num_pts > 4096 or num_pts <= 512:
print(num_pts)
print("Out of range")
print('Total models : '),
print(len(model_files))
print("Highest number of points: " + str(max_pts))
# Checking the number of parts in each class
folders = glob.glob("./data/train_label/*")
for folder in folders:
label_files = glob.glob(folder + '/*')
print(folder)
max_part = 0
for label_file in label_files:
labels = read_labels(label_file)
if max(labels) > max_part:
max_part = max(labels)
print(max_part)
|
l=['mi','sony','samsung']
a='sony'
find(l)
def find(l =[], *args)
#for i in l:
# print('the position of ' + l[i] +'is' + i)
#return ''
print(len(l)) |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from jimovpn import settings
from django.core.management import setup_environ
setup_environ(settings)
|
from django.urls import path
from tutorials import views
urlpatterns = [
path('', views.tutorial_list),
path('<int:pk>', views.tutorial_detail),
path('published', views.tutorial_list_published)
] |
n = int(input())
all = list(map(int,input().split()))
result = 0
while(True):
for i in range(n):
if(all[i] % 2 ==1 or all[i]==0):
print(result)
exit()
result += 1
all = list(map(lambda x:x//2,all))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 19:18:20 2017
@author: Gavrilov
"""
#example to do squaring by repetitive addition
x = 27 #I'm going to start off with something that I want to square
ans = 0 #That's going to be where my answer goes
itersLeft = x #And I'm going to keep track of how many times I need to go through the process
while itersLeft != 0: #if I'm not down to 0,
ans = ans + x #I'm going to increase ans by x.
itersLeft = itersLeft - 1 #I'm going to decrease the number of steps by 1.
print(str(x)+"*"+str(x)+"="+str(ans))
|
from decimal import Decimal
import Domoticz
from devices.device import Device
class TemperatureHumiditySensor(Device):
def create_device(self, unit, device_id, device_name):
return Domoticz.Device(Unit=unit, DeviceID=device_id, Name=device_name, TypeName="Temp+Hum").Create()
def get_numeric_value(self, value, device):
return 0
def get_string_value(self, value, device):
return ';'.join([
str(round(Decimal(value['temperature']), 1)),
str(value['humidity']),
'0' # Humidity status (0 - Normal, 1 - Comfort, 2 - Dry, 3 - Wet)
])
|
#!/usr/local/bin/python
# coding: utf-8
import sys
import pkg_resources
from framgiaci.report_app import ReportApplication
from framgiaci.commands.run_finish import RunFinishCommand
from framgiaci.commands.run_report import RunReportCommand
from framgiaci.commands.run_test import RunTestCommand
from framgiaci.commands.init_template import InitTemplateCommand
from framgiaci.commands.run_all import RunAllCommand
from framgiaci.commands.check_config import CheckConfigCommand
from framgiaci.commands.show_config import ShowConfigCommand
YAML_CONFIGURE_FILE = '.framgia-ci.yml'
RESULT_TEMP_FILE = '.framgia-ci-result.temp.yml'
VERSION = pkg_resources.require("framgia-ci")[0].version
COMMANDS = [
RunTestCommand, RunReportCommand, RunFinishCommand, InitTemplateCommand,
CheckConfigCommand, ShowConfigCommand, RunAllCommand
]
def main():
print('Framgia CI Report Tool', VERSION)
app = ReportApplication()
app.config(YAML_CONFIGURE_FILE, RESULT_TEMP_FILE)
for command in COMMANDS:
app.register_command(command)
app.run()
sys.exit(0)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import sys
import tweepy
import couchdb
import json
import webbrowser
# Query terms
server = couchdb.Server('http://localhost:5984')
DB = 'database'
# Get these values from your application settings
CONSUMER_KEY = 'fFRVfkuoNyafZglDwDGKpWF1o'
CONSUMER_SECRET = 'lRMw1avbxoKF13eJ0CeF9WYC6jqMga4310mz6O3uyBQdOUDYkC'
# Get these values from the "My Access Token" link located in the
# margin of your application details, or perform the full OAuth
# dance
ACCESS_TOKEN = '978717116-53nmofGg5IrJoCSOaeeHp6eBWBkQFWqtRKtSDeK4'
ACCESS_TOKEN_SECRET = 'DMpeXk04NXoMKeZw6t7VXiOxc2k8vQjwkr8PPYZ79IXJn'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# Note: Had you wanted to perform the full OAuth dance instead of using
# an access key and access secret, you could have uses the following
# four lines of code instead of the previous line that manually set the
# access token via auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
#
# auth_url = auth.get_authorization_url(signin_with_twitter=True)
# webbrowser.open(auth_url)
# verifier = raw_input('PIN: ').strip()
# auth.get_access_token(verifier)
class CustomStreamListener(tweepy.StreamListener):
def on_status(self, status):
# We'll simply print some values in a tab-delimited format
# suitable for capturing to a flat file but you could opt
# store them elsewhere, retweet select statuses, etc.
data = json.loads(status)
try:
db = server.create(DB)
except couchdb.http.PreconditionFailed, e:
db = server[DB]
db.save(data)
return True
def on_error(self, status_code):
print >> sys.stderr, 'Encountered error with status code:', status_code
return True # Don't kill the stream
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True # Don't kill the stream
# Create a streaming API and set a timeout value of 1 minute
streaming_api = tweepy.streaming.Stream(auth, CustomStreamListener(), timeout=60)
# Optionally filter the statuses you want to track by providing a list
# of users to "follow"
streaming_api.filter(track = ['power outage','power cut','power failure','power blackout', 'powers out', 'no electricity', 'flickering lights'])
|
class Person:
def __init__(self, first='', last='', eye_color='', age=0):
self.first = first
self.last = last
self.eye_color = eye_color
self.age = age
# def __repr__(self):
# return (repr(self.first) + ' ' +
# repr(self.last) + ' ' +
# repr(self.eye_color) + ' ' +
# repr(self.age))
# def __str__(self):
# return str('Name: ' +
# self.first + ' ' +
# self.last +
# ', Eye Color: ' +
# self.eye_color +
# ', Age: ' + str(self.age))
if __name__ == "__main__":
person1 = Person('john', 'smith', 'brown', 50)
print(person1)
print(repr(person1))
print(str(person1))
|
from math import *
def getCos30():
return 0.866
def getSin30():
return 0.5
class Vector2():
def __init__(self, x, y):
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def get(self):
return (self.x, self.y)
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def set(self, x, y):
self.x = x
self.y = y
@staticmethod
def Zero():
return Vector2(0,0)
@staticmethod
def Plus(vec1, vec2):
return Vector2(vec1.getX() + vec2.getX(), vec1.getY() + vec2.getY())
@staticmethod
def Minus(vec1, vec2):
return Vector2(vec1.getX() - vec2.getX(), vec1.getY() - vec2.getY())
@staticmethod
def Distance(vec1, vec2):
return sqrt( (vec2.getX() - vec1.getX())**2 + (vec2.getY() - vec1.getY())**2 )
@staticmethod
def DistanceFromOrigin(vec):
return sqrt( vec.getX()**2 + vec.getY()**2 )
@staticmethod
def Lerp(vec1, vec2, percent):
diff = Vector2.Minus(vec2, vec1)
percentVec = Vector2(diff.getX() * percent, diff.getY() * percent)
if abs(Vector2.DistanceFromOrigin(percentVec)) <= 0.1 :
return vec2
else:
return Vector2.Plus(vec1, percentVec)
def IsoToScreen(iso_vec ,width, height):
return Vector2((iso_vec.getX() - iso_vec.getY()) * int(width)/2
, (iso_vec.getX() + iso_vec.getY()) * int(height)/2)
def ScreenToIso(screen_vec, width, height):
return Vector2((screen_vec.getX() / int(width)/2 + screen_vec.getY() / int(width)/2) /2
, (screen_vec.getY() / int(height)/2 -(screen_vec.getX() / int(width)/2)) /2)
if __name__=='__main__':
#print IsoToScreen(Vector2(2,1) , 128, 64).getX()
pass |
#!/usr/bin/python3
Square = __import__('5-square').Square
my_square = Square(3)
my_square.my_print()
print("--")
my_square.size = 10
my_square.my_print()
print("--")
my_square.size = 0
my_square.my_print()
print("--")
|
import argparse
import torch
import torch.onnx
import onnx
from model.dataset.dataloader import make_dataloader
from model.model.make_model import make_model
from model.config import cfg
import onnxruntime as ort
import numpy as np
import os
import onnxruntime
current_dir = os.getcwd()
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
os.chdir(parent_dir)
print(os.getcwd())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
PATH = "F:\\nis\Codes\ANDROID PROJECTs\PlantAi\model\log\plant_ai_resnet18_96\\best_model.pth"
# _, _, _, _, num_classes = make_dataloader(cfg)
num_classes = 39
model = make_model(cfg, num_classes)
model.load_state_dict(torch.load(PATH))
# model = model.float()
torch.onnx.export(model, torch.zeros((1, 3, 256, 256)), "model.onnx")
# opset_version=12
# ONNX
# Load the ONNX model
session = onnxruntime.InferenceSession("model.onnx")
# Run the model and get the output
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
x = np.zeros((1, 3, 256, 256))
x = np.array(x, dtype=np.float32)
output_data = session.run([output_name], {input_name: x})[0]
|
# Yahoo Financeからリアルタイムデータを取得する
import yfinance as yf
def main(code):
ticker = yf.Ticker(code)
for k, v in ticker.info.items():
print("{:<40}:{}".format(k, v))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('銘柄コードを入力して下さい')
print('python {} コード'.format(__file__))
sys.exit()
main(sys.argv[1])
|
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
vgg16 = VGG16(weights='imagenet', include_top=False, input_shape=(32, 32, 3))
# include_top : False로 해야 input_shape를 원하는 사이즈로 가능
print(vgg16.weights)
vgg16.trainable = False
vgg16.summary()
print(len(vgg16.weights))
print(len(vgg16.trainable_weights))
'''
Total params: 14,714,688
Trainable params: 0
Non-trainable params: 14,714,688
'''
vgg16.trainable = True
vgg16.summary()
print(len(vgg16.weights))
print(len(vgg16.trainable_weights))
'''
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
''' |
import res_partner
import marketing_campaign
|
import csv
import xlwt
import json
from books.models import Book, Author, Log, RequestBook
from books.forms import BookForm, AuthorForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import (
CreateView, ListView, UpdateView, DeleteView, TemplateView, View, DetailView,
)
from django.http import HttpResponse
from books.utils import display
from django.shortcuts import redirect, get_object_or_404
from books import model_choices as mch
from django.contrib import messages
# from pdb import set_trace; set_trace()
class FormUserKwargMixin:
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class Index(TemplateView):
template_name = 'index.html'
class BookCreate(FormUserKwargMixin, CreateView):
model = Book
success_url = reverse_lazy('books:my-books')
form_class = BookForm
def get_success_url(self):
messages.success(self.request, 'Book was created!')
return super().get_success_url()
class BookViewing(DetailView):
model = Book
template_name = 'books/book_viewing.html'
queryset = Book.objects.all()
def get_queryset(self):
queryset = super().get_queryset()
return queryset
class BookList(ListView):
queryset = Book.objects.all().select_related('author')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.exclude(user=self.request.user)
class MyBooksList(LoginRequiredMixin, ListView):
queryset = Book.objects.all().select_related('author')
template_name = 'books/my_books.html'
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class MyRequestedBooks(LoginRequiredMixin, ListView):
queryset = RequestBook.objects.all()
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(recipient=self.request.user)
class RequestedBooks(LoginRequiredMixin, ListView):
queryset = RequestBook.objects.all()
template_name = 'books/requested_book_list.html'
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(book__user=self.request.user)
class BookUpdate(FormUserKwargMixin, UpdateView):
model = Book
success_url = reverse_lazy('books:my-books')
form_class = BookForm
def get_success_url(self):
messages.success(self.request, 'Book was updated!')
return super().get_success_url()
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('books:list')
def get_success_url(self):
messages.success(self.request, 'Book was deleted!')
return super().get_success_url()
class RequestBookCreate(LoginRequiredMixin, View):
def get(self, request, book_id):
book = get_object_or_404(Book, pk=book_id)
if not RequestBook.objects.filter(book=book, recipient=request.user, status=mch.STATUS_IN_PROGRESS).exists():
RequestBook.objects.create(book=book, recipient=request.user, status=mch.STATUS_IN_PROGRESS)
return redirect('books:list')
class _ChangeRequestBaseView(LoginRequiredMixin, View):
CURRENT_STATUS = None
NEW_STATUS = None
REDIRECT_NAME = None
MESSAGE = None
def get(self, request, request_id):
request_obj = get_object_or_404(RequestBook, pk=request_id, status=self.CURRENT_STATUS)
request_obj.status = self.NEW_STATUS
request_obj.save(update_fields=('status',))
if self.MESSAGE:
messages.add_message(request, messages.INFO, self.MESSAGE)
return redirect(self.REDIRECT_NAME)
class RequestBookConfirm(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_IN_PROGRESS
NEW_STATUS = mch.STATUS_CONFIRMED
REDIRECT_NAME = 'books:requested-books'
MESSAGE = 'Book Request Was Confirmed!'
class RequestBookReject(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_IN_PROGRESS
NEW_STATUS = mch.STATUS_REJECT
REDIRECT_NAME = 'books:requested-books'
MESSAGE = 'Book Request Was Rejected!'
class RequestBookSentViaEmail(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_CONFIRMED
NEW_STATUS = mch.STATUS_SENT_TO_RECIPIENT
REDIRECT_NAME = 'books:requested-books'
class RequestBookReceivedBook(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_SENT_TO_RECIPIENT
NEW_STATUS = mch.STATUS_RECIPIENT_RECEIVED_BOOK
REDIRECT_NAME = 'books:my-requested-books'
class RequestBookSentBackToOwner(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_RECIPIENT_RECEIVED_BOOK
NEW_STATUS = mch.STATUS_SENT_BACK_TO_OWNER
REDIRECT_NAME = 'books:my-requested-books'
class RequestBookOwnerReceivedBack(_ChangeRequestBaseView):
CURRENT_STATUS = mch.STATUS_SENT_BACK_TO_OWNER
NEW_STATUS = mch.STATUS_OWNER_RECEIVED_BACK
REDIRECT_NAME = 'books:requested-books'
class AuthorCreate(FormUserKwargMixin, CreateView):
model = Author
success_url = reverse_lazy('books:my-authors')
form_class = AuthorForm
def get_success_url(self):
messages.success(self.request, 'Author was created!')
return super().get_success_url()
class AuthorList(ListView):
queryset = Author.objects.all()
class MyAuthorsList(LoginRequiredMixin, ListView):
queryset = Author.objects.all()
template_name = 'books/my_authors.html'
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class AuthorUpdate(FormUserKwargMixin, UpdateView):
model = Author
success_url = reverse_lazy('books:my-authors')
form_class = AuthorForm
def get_success_url(self):
messages.success(self.request, 'Author was updated!')
return super().get_success_url()
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('books:authors-list')
def get_success_url(self):
messages.success(self.request, 'Author was updated!')
return super().get_success_url()
class LogList(ListView):
queryset = Log.objects.all()
class DownloadCSVBookView(View):
HEADERS = (
'id',
'title',
'author.full_name',
'author.get_full_name',
'publish_year',
'condition',
)
def get(self, request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="books.csv"'
writer = csv.writer(response, delimiter=';')
writer.writerow(self.HEADERS)
for book in Book.objects.all().select_related('author').iterator():
writer.writerow([
display(book, header)
for header in self.HEADERS
])
return response
class DownloadXLSXBookView(View):
HEADERS = (
'id',
'title',
'author.full_name',
'author.get_full_name',
'publish_year',
'condition',
)
def get(self, request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="books.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet("sheet1")
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
for col_num in range(len(self.HEADERS)):
ws.write(row_num, col_num, self.HEADERS[col_num], font_style)
font_style = xlwt.XFStyle()
for book in Book.objects.all().select_related('author').iterator():
row_num = row_num + 1
for col_num in range(len(self.HEADERS)):
ws.write(row_num, col_num, display(book, self.HEADERS[col_num]), font_style)
wb.save(response)
return response
class DownloadCSVAuthorView(View):
HEADERS = (
'id',
'full_name',
'date_of_birth',
'date_of_death',
'country',
'gender',
'language',
)
def get(self, request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="authors.csv"'
writer = csv.writer(response, delimiter=';')
writer.writerow(self.HEADERS)
for author in Author.objects.all().iterator():
writer.writerow([
display(author, header)
for header in self.HEADERS
])
return response
class DownloadXLSXAuthorView(View):
HEADERS = (
'id',
'full_name',
'date_of_birth',
'date_of_death',
'country',
'gender',
'language',
)
def get(self, request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="authors.xlsx"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet("sheet1")
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
for col_num in range(len(self.HEADERS)):
ws.write(row_num, col_num, self.HEADERS[col_num], font_style)
font_style = xlwt.XFStyle()
for author in Author.objects.all().iterator():
row_num = row_num + 1
for col_num in range(len(self.HEADERS)):
ws.write(row_num, col_num, display(author, self.HEADERS[col_num]), font_style)
wb.save(response)
return response
class BookApiList(View):
def get(self, request):
queryset = Book.objects.all()
results = [
{'id': book.id, 'title': book.title}
for book in queryset
]
data = {
'results': results
}
return HttpResponse(json.dumps(data), content_type="application/json")
|
# -*- coding: utf-8 -*-
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from src.api.serializers.user_serializer import CitySerializer
from src.users.models import City
class ApiUserDetailView(APIView):
@api_view(['GET'])
def user_cities(request):
"""
Return group list registered in the system.
"""
cities = City.objects.all().order_by('id')
serializer = CitySerializer(cities, many=True)
return Response(serializer.data)
|
import sys
N = int(raw_input().strip())
if N % 2 != 0:
print 'Weird'
elif N >= 2 and N <= 5:
print 'Not Weird'
elif N >= 6 and N <= 20:
print 'Weird'
else:
print 'Not Weird'
|
# MEG object class
# %% Importing
# System
import os
import sys
import pickle
# Computing
import mne
import sklearn
import numpy as np
# Private settings
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'private')) # noqa
from dir_settings import RAW_DIR, MEMORY_DIR
from parameter_settings import PARAMETERS
# Local tools
sys.path.append(os.path.dirname(__file__)) # noqa
from file_tools import read_raw_fif, find_files
from remove_button_effect import Button_Effect_Remover
# %% Local settings
def prompt(msg):
"""Prompting method for [msg]
Args:
msg ({obj}): Message to be print
"""
print(f'>> {msg}')
def relabel(events, sfreq):
"""Re-label 2-> 4 when 2 is near to 1
Arguments:
events {array} -- The events array, [[idx], 0, [label]],
assume the [idx] column has been sorted.
sfreq {float} -- The sample frequency
Returns:
{array} -- The re-labeled events array
"""
# Init the pointer [j]
j = 0
# Repeat for every '1' event, remember as [a]
for a in events[events[:, -1] == 1]:
# Do until...
while True:
# Break,
# if [j] is far enough from latest '2' event,
# it should jump to next [a]
if events[j, 0] > a[0] + sfreq:
break
# Switch '2' into '4' event if it is near enough to the [a] event
if all([events[j, -1] == 2,
abs(events[j, 0] - a[0]) < sfreq]):
events[j, -1] = 4
# Add [j]
j += 1
# If [j] is out of range of events,
# break out the 'while True' loop.
if j == events.shape[0]:
break
# Return re-labeled [events]
return events
# %% Class
class MEG_Worker():
def __init__(self, running_name,
parameters=PARAMETERS,
use_memory=True):
self.running_name = running_name
self.parameters = parameters
self.use_memory = use_memory
def _get_raw(self, dir, ext):
# Get concatenated raws from [dir]
# Get good files path
files_path = find_files(dir, ext=ext)
# Get concatenated raw from files_path
raw = mne.concatenate_raws([read_raw_fif(path)
for path in files_path])
# Report and return
prompt(f'Got {raw} from {files_path}')
return raw
def _filter_raw(self, l_freq, h_freq):
# Filter self.raw by [l_freq, h_freq],
# IN-PLACE
self.raw.load_data()
self.raw.filter(l_freq=l_freq, h_freq=h_freq)
# Report
prompt(f'Filtered {self.raw} using ({l_freq}, {h_freq})')
def _get_epochs(self, stim_channel):
# Get epochs from [raw] as [stim_channel]
# Read and relabel events
events = mne.find_events(self.raw,
stim_channel=stim_channel)
sfreq = self.raw.info['sfreq']
events = relabel(events, sfreq)
# Get Epochs
epochs = mne.Epochs(self.raw,
events=events,
picks=self.parameters['picks'],
tmin=self.parameters['tmin'],
tmax=self.parameters['tmax'],
decim=self.parameters['decim'],
detrend=self.parameters['detrend'],
baseline=None)
epochs = epochs[self.parameters['events']]
# Report and return
prompt(f'Got {epochs}')
return epochs
def _denoise(self, epochs, use_xdawn=True):
# Denoise [epochs] using Xdawn
# Prepare epochs
epochs.load_data()
epochs.apply_baseline(self.parameters['baseline'])
# Init and fit xdawn
if hasattr(self, 'xdawn'):
xdawn = self.xdawn
else:
xdawn = mne.preprocessing.Xdawn(
n_components=self.parameters['n_components'])
xdawn.fit(self.epochs)
self.xdawn = xdawn
# Apply Xdawn and return
return xdawn.apply(epochs)[self.parameters['event']]
def _remove_button_effect(self, e1, e3):
"""Remove button effect from target epochs
Args:
e1 ({str}): Name of event 1
e3 ({str}): Name of event 3
Returns:
Clean epochs with button effect removed,
Paired lags of samples and estimated button effect timeline.
"""
remover = Button_Effect_Remover(self.denoise_epochs,
sfreq=self.raw.info['sfreq'])
clean_epochs, paired_lags_timelines = remover.zero_out_button(e1=e1,
e3=e3)
prompt(f'Removed button effect from target epochs')
return clean_epochs, paired_lags_timelines
def pipeline(self, band_name,
ext='_ica-raw.fif',
stim_channel='UPPT001'):
"""Pipeline of standard operations
Args:
band_name ({str}): Band name of filter raw
ext ({str}, optional): The extend name of interest files. Defaults to '_ica-raw.fif'.
stim_channel ({str}, optional): The channel name of stimuli. Defaults to 'UPPT001'.
"""
# Prepare memory stuffs ------------------------------------------------------
# Raw name
memory_name = f'{self.running_name}-{band_name}-epo.fif'
memory_path = os.path.join(MEMORY_DIR, memory_name)
# Denoise name
memory_denoise_name = f'{self.running_name}-{band_name}-denoise-epo.fif'
memory_denoise_path = os.path.join(MEMORY_DIR, memory_denoise_name)
# Clean name
memory_clean_name = [f'{self.running_name}-{band_name}-clean-epo.fif',
f'{self.running_name}-{band_name}-clean-lags.pkl']
memory_clean_path = [os.path.join(MEMORY_DIR, memory_clean_name[0]),
os.path.join(MEMORY_DIR, memory_clean_name[1])]
# Get raw -------------------------------------------------------------------
raw_dir = os.path.join(RAW_DIR, self.running_name)
self.raw = self._get_raw(raw_dir, ext)
# Raw epochs ----------------------------------------------------------------
try:
assert(self.use_memory)
# Recall epochs from memory
self.epochs = mne.read_epochs(memory_path)
prompt(f'Raw epochs are recalled from memory: {self.epochs}')
except:
# Filter raw
l_freq, h_freq = self.parameters['bands'][band_name]
self._filter_raw(l_freq=l_freq, h_freq=h_freq)
# Get epochs
self.epochs = self._get_epochs(stim_channel)
# Remember if [use_memory]
if self.use_memory:
self.epochs.save(memory_path)
# Denoise epochs ------------------------------------------------------------
try:
assert(self.use_memory)
# Recall denoise epochs from memory
self.denoise_epochs = mne.read_epochs(memory_denoise_path)
prompt(
f'Denoise epochs are recalled from memory: {self.denoise_epochs}')
except:
# Denoise epoch
self.denoise_epochs = self._denoise(self.epochs.copy())
# Remember if [use_memory]
if self.use_memory:
self.denoise_epochs.save(memory_denoise_path)
# Remove button effect ------------------------------------------------------
try:
assert(self.use_memory)
# Recall clean epochs and lags from memory
self.clean_epochs = mne.read_epochs(memory_clean_path[0])
with open(memory_clean_path[1], 'rb') as f:
self.paired_lags_timelines = pickle.load(f)
prompt(
f'Clean epochs are recalled from memory: {self.clean_epochs}')
except:
# Remove button effect
clean_epochs, paired_lags_timelines = self._remove_button_effect()
self.clean_epochs = clean_epochs
self.paired_lags_timelines = paired_lags_timelines
# Remember if [use_memory]
if self.use_memory:
self.clean_epochs.save(memory_clean_path[0])
with open(memory_clean_path[1], 'wb') as f:
pickle.dump(self.paired_lags_timelines, f)
# %%
# running_name = 'MEG_S03'
# band_name = 'U07'
# worker = MEG_Worker(running_name=running_name)
# worker.pipeline(band_name=band_name)
# %%
|
from adb import adb
import time
a = adb()
levels = []
current = 0
with open('battery_stats.log', 'a') as log:
while True:
levels[current] = a.battery_level()
log.write("{} - {}\n".format(time.strftime("%H:%M"),level))
old = (current - 1) % 5
if levels[current] < levels[old]:
log.write(" - Better check dat batt, girl.\n")
current = (current + 1) % 5
time.sleep(60)
|
import cv2
import numpy as np
from PIL import ImageGrab
def screenrecorder():
fourcc= cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("output.mp4",fourcc,5.0,(1920,1080))
while True:
img=ImageGrab.grab()
img_np=np.array(img)
frame = cv2.cvtColor(img_np,cv2.COLOR_BGR2RGB)
cv2.imshow("Screen Recorder",frame)
out.write(frame)
if cv2.waitKey(1) == 27:
break
out.release()
cv2.destroyWindow()
screenrecorder()
|
#import sys
#input = sys.stdin.readline
def main():
sx,sy, gx, gy = map(int,input().split())
if sx == gx:
print(sx)
return
print((sx*gy+gx*sy)/(gy+sy))
if __name__ == '__main__':
main()
|
import json
with open("termostato.json", "r") as termostato_config:
termostato = json.load(termostato_config)
print(termostato["proxy_bateria"]) |
class Solution(object):
def canJump(self, nums):
reachable = 0
for i in range(len(nums)):
if i > reachable:
return False
current_reachable = nums[i] + i
if current_reachable >= reachable:
reachable = current_reachable
return True
|
from summertime.model.base_model import SummModel
class SingleDocSummModel(SummModel):
def __init__(
self,
trained_domain: str = None,
max_input_length: int = None,
max_output_length: int = None,
):
super(SingleDocSummModel, self).__init__(
trained_domain=trained_domain,
max_input_length=max_input_length,
max_output_length=max_output_length,
)
@classmethod
def assert_summ_input_type(cls, corpus, query):
if not isinstance(corpus, list):
raise TypeError(
"Single-document summarization requires corpus of `List[str]`."
)
if not all([isinstance(ins, str) for ins in corpus]):
raise TypeError(
"Single-document summarization requires corpus of `List[str]`."
)
if query is not None:
if not isinstance(query, list):
raise TypeError(
"Query-based single-document summarization requires query of `List[str]`."
)
if not all([isinstance(q, str) for q in query]):
raise TypeError(
"Query-based single-document summarization requires query of `List[str]`."
)
warning = "Warning: non-ASCII input corpus detected!\n\
If this is not English, consider using \
one of our multilingual models such as summertime.model.multilingual.MBartModel ."
# python 3.6 does not have string.ascii() functionality, so we use this instead
try:
if all([isinstance(ins, list) for ins in corpus]):
[ins.encode("ascii") for batch in corpus for ins in batch]
elif isinstance(corpus, list):
[ins.encode("ascii") for ins in corpus]
except UnicodeEncodeError:
print(warning)
return "en" # ISO-639-1 code for English
# @classmethod
# def show_supported_languages(cls) -> str:
# return "english"
|
import unittest
import fraction
class FractionClassTests(unittest.TestCase):
def setUp(self):
self.such_number = fraction.Fraction(3, 4)
self.such_number2 = fraction.Fraction(5, 4)
self.simplify_number = fraction.Fraction(6, 3)
def test_init_(self):
self.assertEqual(self.such_number.denominator, 4)
self.assertEqual(self.such_number.nominator, 3)
def test_add_(self):
self.such_number3 = fraction.Fraction(2, 1)
self.assertEqual(
self.such_number + self.such_number2, self.such_number3)
def test_sub_(self):
self.such_number4 = fraction.Fraction(-1, 2)
self.assertEqual(
self.such_number - self.such_number2, self.such_number4)
def test_simplify(self):
self.simplified_number = fraction.Fraction(2, 1)
self.simplify_number.simplify_fraction()
self.assertEqual(self.simplified_number, self.simplify_number)
def test_eq_(self):
self.equality_number = fraction.Fraction(5, 4)
self.assertTrue(self.equality_number == self.such_number2)
self.assertFalse(self.equality_number == self.such_number)
def test_lt_(self):
self.assertTrue(self.such_number < self.such_number2)
self.assertFalse(self.such_number2 < self.such_number)
def test_gt_(self):
self.assertFalse(self.such_number > self.such_number2)
self.assertTrue(self.such_number2 > self.such_number)
if __name__ == '__main__':
unittest.main()
|
# class Foo:
# def __init__(self):
# print('hahah')
#
# def __call__(self, *args, **kwargs):
# print('call')
#
# foo = Foo() # 执行init方法
# foo() # 执行call python特殊方法,实例加()自动执行call方法
#
# class Foo:
# def __init__(self):
# pass
# def __int__(self):
# return 111
# def __str__(self):
# return 'qmy'
#
# obj = Foo()
# print(obj) # print默认掉用对象的str方法 print(str(obj))
# print(obj,type(obj))
#
#
# # int,对象,自动执行对象的__int__方法
# r = int(obj)
# print(r)
#
# s = str(obj) # str对象,自动执行对象的__str__方法
# print(s)
# print(obj)
#
# class F:
# def __init__(self,name,age):
# self.name = name
# self.age = age
#
# def __add__(self, other):
# # return "123"
# # self = obj1(alex,19)
# # other = obj2(erio,66)
# # return self.name+other.name,self.age + other.age
# return F(self.age,other.age) # <__main__.F object at 0x104ab80f0>
#
# def __del__(self): # 析构方法 对象被销毁时自动执行,由python内部触发
# print('对象被销毁')
#
#
# obj1 = F('qmy',123)
# obj2 = F('lyx',456)
# obj3 = F('zhangsan',123)
# # 两个对象相加时,自动执行第一个对象的__add__方法,并且将第二个对象作为参数传递进入
# a = obj1 + obj2
# print(a,type(a)) # ('qmylyx', 579) <class 'tuple'>
# class Foo():
# '''
# 注释
# '''
# def __init__(self,name,age):
# self.name = name
# self.age = age
# self.gender = 'nv'
#
# print(Foo.__dict__)
# obj = Foo('qmy',18)
# d = obj.__dict__
# print(d)
# li = [11,22,33,44]
# r1 = li[3]
# li[3] = 666
# del li[2]
#
# class Foo:
# def __init__(self):
# pass
# def __getitem__(self, item):
# return item+10
# def __setitem__(self, key, value):
# print(key,value)
# def __delitem__(self, key):
# print(key)
#
#
# li = Foo()
# print(li[1]) # 自动执行li对象中的__getitem__方法
# li[10] = 123 # 自动执行li对象中的__setitem__方法
# del li[10] # 自动执行li对象中的__delitem__方法
# class Foo():
# def __init__(self,name,age):
# self.name = name
# self.age = age
# print(self.age)
# def __str__(self):
# return '111'
# a = Foo('qm',11) # 执行Foo('qm',11)获取到的是__str__方法的返回值
# class Foo:
# def __init__(self):
# pass
# def __iter__(self):
# return iter([1,2,3,2])
# li = Foo()
#
# # 如果类中有__iter__方法,则创建的对象为可迭代对象
# # 可迭代对象.__iter__()的返回值:迭代器
# # for循环,遇到迭代器,直接执行next方法
# # for循环,遇到可迭代对象,先执行__iter__方法,并获取返回值,然后在执行next方法
# # 1、执行li对象对应的类中的__iter__方法,并获取其返回值
# # 2、循环上一步中的返回值
# for i in li:
# print(i)
#
# class Foo(object):
# def func(self):
# print('hello')
class Mytype(type):
def __init__(self,what,bases=None,dict=None):
super(Mytype,self).__init__(what,bases,dict)
def __call__(self, *args, **kwargs):
obj = self.__new__(self,*args,**args)
self.__init__(obj)
class Foo(object,metaclass=Mytype): # 类Foo,是type类的对象,创建Foo类的时候默认就会执行type类的构造方法
def __init__(self,name):
self.name = name
def __new__(cls, *args, **kwargs):
return object.__new__(cls,*args,**args)
obj = Foo()
# class Mytype(type):
# def __init__(self,what,bases=None,dict=None):
# # self = Foo
# print(123)
# def __call__(self, *args, **kwargs):
# # self是Mytype的对象,即Foo类
# print(456)
# r = self.__new__(*args, **kwargs)
# class Foo(object,metaclass=Mytype): # 类Foo,是type类的对象,创建Foo类的时候默认就会执行type类的构造方法
# def __init__(self):
# pass
# def __new__(cls, *args, **kwargs):
# # 在new里面才是真正的创建obj,即Foo类的对象
# return '对象'
# obj = Foo() # 执行Mytype的call方法 对象加(),执行call方法
# 第一阶段:解释器从上到下执行代码创建Foo类 即执行type类的构造方法
# 第二阶段:通过Foo类创建obj对象
# class Foo:
# def func(self):
# print(111)
# Foo().func()
#
# def func():
# print(11)
# Foo = type('Foo',(object,),{'func':func}) # Foo也是对象,是type的对象
# Foo.func() |
varx=300-123
number=int(input("enter value"))
if(number==varx):
print("barabar hai")
else:
print("nahi hai") |
s = "Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics."
print([len(word) for word in s.split(" ")])
|
#
# Copyright 2015-2016 Bleemeo
#
# bleemeo.com an infrastructure monitoring solution in the Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=too-many-lines
import datetime
import json
import logging
import os
import random
import re
import shlex
import socket
import subprocess
import sys
import threading
import time
import jinja2
import psutil
import requests
from six.moves import urllib_parse
import bleemeo_agent
try:
import docker
except ImportError:
docker = None
# With generate_password, taken from Django project
# Use the system PRNG if possible
try:
random = random.SystemRandom() # pylint: disable=invalid-name
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
try:
import docker
except ImportError:
docker = None
DOCKER_CGROUP_RE = re.compile(
r'^\d+:[^:]+:'
r'(/kubepods/.*pod[0-9a-fA-F-]+/|.*/docker[-/])'
r'(?P<docker_id>[0-9a-fA-F]+)'
r'(\.scope)?$',
re.MULTILINE,
)
def get_docker_id_from_cgroup(cgroup_data):
result = set()
for match in DOCKER_CGROUP_RE.finditer(cgroup_data):
result.add(match.group('docker_id'))
return result
def decode_docker_top(docker_top):
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
""" Return a list of process dict from docker_client.top()
Result of docker_client.top() is not always the same. On boot2docker,
on first boot docker will use ps from busybox which output only few
column.
In addition we first try with a "ps waux" and then with default
ps ("ps -ef").
The process dict is a dictonary with the same key as the one generated
by get_top_info from psutil data. Field not found in Docker ps are
omitted.
All process will at least have pid, cmdline and name key.
"""
result = []
container_process = docker_top.get('Processes')
user_index = None
pid_index = None
pcpu_index = None
rss_index = None
time_index = None
cmdline_index = None
stat_index = None
ppid_index = None
for (index, name) in enumerate(docker_top.get('Titles', [])):
if name == 'PID':
pid_index = index
elif name in ('CMD', 'COMMAND'):
cmdline_index = index
elif name in ('UID', 'USER'):
user_index = index
elif name == '%CPU':
pcpu_index = index
elif name == 'RSS':
rss_index = index
elif name == 'TIME':
time_index = index
elif name == 'STAT':
stat_index = index
elif name == 'PPID':
ppid_index = index
if pid_index is None or cmdline_index is None:
return result
# In some case Docker return None instead of process list. Make
# sure container_process is an iterable
container_process = container_process or []
for row in container_process:
# The PID is from the point-of-view of root pid namespace.
process = {
'pid': int(row[pid_index]),
'cmdline': row[cmdline_index],
'name': os.path.basename(row[cmdline_index].split()[0]),
}
if user_index is not None:
process['username'] = row[user_index]
try:
process['cpu_percent'] = float(row[pcpu_index])
except (TypeError, ValueError):
pass
try:
process['memory_rss'] = int(row[rss_index])
except (TypeError, ValueError):
pass
try:
process['cpu_times'] = pstime_to_second(row[time_index])
except (TypeError, ValueError):
pass
if stat_index is not None:
process['status'] = psstat_to_status(row[stat_index])
if ppid_index is not None:
try:
process['ppid'] = int(row[ppid_index])
except (TypeError, ValueError):
pass
result.append(process)
return result
# Taken from Django project
def generate_password(length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return ''.join(random.choice(allowed_chars) for i in range(length))
def get_uptime():
""" Return system uptime in seconds
"""
boot_time = psutil.boot_time()
now = time.time()
return now - boot_time
def get_loadavg(core):
""" Return system load average for last minutes, 5 minutes, 15 minutes
"""
system_load1 = core.get_last_metric_value('system_load1', '', 0.0)
system_load5 = core.get_last_metric_value('system_load5', '', 0.0)
system_load15 = core.get_last_metric_value('system_load15', '', 0.0)
return [system_load1, system_load5, system_load15]
def get_clock():
""" Return a number of second since a unspecified point in time
If will use CLOCK_MONOTONIC if available or fallback to time.time()
It's useful to know of some event occurred before/after another one.
It could be also useful to run on action every N seconds (note that
system suspend might stop that clock).
"""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
return time.monotonic()
return time.time()
def is_port_used(address, port, protocol):
""" Return True if the port is known to be used.
Return False in other case (including error)
"""
try:
address = socket.gethostbyname(address)
except (socket.gaierror, TypeError, KeyError):
# gaierror => unable to resolv name
# TypeError => service_info['address'] is None (happen when
# service is on a stopped container)
# KeyError => no 'address' in service_info (happen when service
# is a customer defined using Nagios check).
pass
try:
for conn in psutil.net_connections():
if protocol != conn.type:
continue
if (conn.type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM)
or conn.family not in (socket.AF_INET, socket.AF_INET6)):
continue
if (conn.type == socket.SOCK_STREAM
and conn.status != psutil.CONN_LISTEN):
continue
if (conn.type == socket.SOCK_DGRAM
and conn.status != psutil.CONN_NONE):
continue
(other_address, other_port) = conn.laddr
if other_port != port:
continue
if address in ('0.0.0.0', '::'):
return True
if other_address in ('0.0.0.0', '::'):
return True
if address == other_address:
return True
except OSError:
pass
return False
def is_process_running(process_name, top_info):
""" Return True if give process is known to be running
The search is done on top_info structuct returned by get_top_info
"""
if not top_info:
return False
for process in top_info['processes']:
if process_name == process['name']:
return True
if os.name == "nt" and process_name + ".exe" == process["name"]:
return True
return False
def psstat_to_status(psstat):
""" Convert a ps STAT to status string returned by psutil
Only "ps waux" return this field. It something like
* S => sleeping
* Ss => sleeping
* R+ => running
The possible second (or more) char are ignored. They indicate
additional status that we don't display.
"""
char = psstat[0]
mapping = {
'D': 'disk-sleep',
'R': 'running',
'S': 'sleeping',
'T': 'stopped',
't': 'tracing-stop',
'X': 'dead',
'Z': 'zombie',
}
return mapping.get(char, '?')
def pstime_to_second(pstime):
""" Convert a ps CPU time to a number of second
Only time format from "ps -ef" or "ps waux" is considered.
Example of format:
* 00:16:42 => 1002
* 16:42 => 1002
* 1-02:27:14 => 95234
* 1587:14 => 95234
"""
if pstime.count(':') == 1:
# format is MM:SS
minute, second = pstime.split(':')
return int(minute) * 60 + int(second)
if pstime.count(':') == 2 and '-' in pstime:
# format is DD-HH:MM:SS
day, rest = pstime.split('-')
hour, minute, second = rest.split(':')
return (
int(day) * 86400 +
int(hour) * 3600 +
int(minute) * 60 +
int(second)
)
if pstime.count(':') == 2:
# format is HH:MM:SS
hour, minute, second = pstime.split(':')
return int(hour) * 3600 + int(minute) * 60 + int(second)
if 'h' in pstime:
# format is HHhMM
hour, minute = pstime.split('h')
return int(hour) * 3600 + int(minute) * 60
if 'd' in pstime:
# format is DDdHH
day, hour = pstime.split('d')
return int(day) * 86400 + int(hour) * 3600
raise ValueError('Unknown pstime format "%s"' % pstime)
def format_uptime(uptime_seconds):
""" Format uptime to human readable format
Output will be something like "1 hour" or "3 days, 7 hours"
"""
uptime_days = int(uptime_seconds / (24 * 60 * 60))
uptime_hours = int((uptime_seconds % (24 * 60 * 60)) / (60 * 60))
uptime_minutes = int((uptime_seconds % (60 * 60)) / 60)
if uptime_minutes > 1:
text_minutes = 'minutes'
else:
text_minutes = 'minute'
if uptime_hours > 1:
text_hours = 'hours'
else:
text_hours = 'hour'
if uptime_days > 1:
text_days = 'days'
else:
text_days = 'day'
if uptime_days == 0 and uptime_hours == 0:
uptime_string = '%s %s' % (uptime_minutes, text_minutes)
elif uptime_days == 0:
uptime_string = '%s %s' % (uptime_hours, text_hours)
else:
uptime_string = '%s %s, %s %s' % (
uptime_days, text_days, uptime_hours, text_hours)
return uptime_string
def format_cpu_time(cpu_time):
""" Format CPU time to top-like format
Input is time in seconds.
Output will be "7:29.31" (e.g. 7 minutes, 29.31 second).
For large number (4 digits minute), we show second as integer.
"""
minutes = int(cpu_time / 60)
if minutes > 999:
return '%s:%.0f' % (minutes, cpu_time % 60)
return '%s:%.2f' % (minutes, cpu_time % 60)
def run_command_timeout(command, timeout=10):
""" Run a command and wait at most timeout seconds
Both stdout and stderr and captured and returned.
Returns (return_code, output)
"""
def _kill_proc(proc, wait_event, timeout):
""" function used in a separate thread to kill process """
if not wait_event.wait(timeout):
# event is not set, so process didn't finished itself
proc.terminate()
try:
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
# Most probably: command not found
return (127, b"Unable to run command")
proc_finished = threading.Event()
killer_thread = threading.Thread(
target=_kill_proc, args=(proc, proc_finished, timeout))
killer_thread.start()
(output, _) = proc.communicate()
proc_finished.set()
killer_thread.join()
returncode = proc.returncode
if returncode == -15:
# code -15 means SIGKILL, which is used by _kill_proc thread
# to implement timeout.
# Change returncode from timeout to a critical status
returncode = 2
return (returncode, output)
def clean_cmdline(cmdline):
""" Remove character that may cause trouble.
Known problem:
* new-line: for InfluxDB line-protocol
"""
return cmdline.replace('\r', '\\r').replace('\n', '\\n')
def get_pending_update(core):
# pylint: disable=too-many-locals
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
""" Returns the number of pending update for this system
It return a couple (update_count, security_update_count).
update_count include any security update.
Both counter could be None. It means that this method could
not retrieve the value.
"""
# If running inside a Docker container, it can't run commands
if core.container is not None:
if not core.config['df.host_mount_point']:
return (None, None)
updates_file_name = os.path.join(
core.config['df.host_mount_point'],
'var/lib/update-notifier/updates-available',
)
update_count = None
security_count = None
try:
update_file = open(updates_file_name, 'rb')
except (OSError, IOError):
# File does not exists or permission denied
return (None, None)
else:
with update_file:
data = update_file.read().decode('utf-8')
first_match = True
for line in data.splitlines():
# The RE can't contain exact string like
# "(\d+) packages can be updated" because this
# string get localized.
match = re.search(
r'^(\d+) [\w\s]+.$',
line,
)
if match and first_match:
update_count = int(match.group(1))
first_match = False
elif match:
security_count = int(match.group(1))
return (update_count, security_count)
# At the point, agent is not running inside a container, it can
# use commands
env = os.environ.copy()
if 'LANG' in env:
del env['LANG']
try:
proc = subprocess.Popen(
['/usr/lib/update-notifier/apt-check'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
(output, _) = proc.communicate()
(update_count, security_count) = output.split(b';')
return (int(update_count), int(security_count))
except (OSError, ValueError):
pass
try:
proc = subprocess.Popen(
[
'apt-get',
'--simulate',
'-o', 'Debug::NoLocking=true',
'--quiet', '--quiet',
'dist-upgrade',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
update_count = 0
security_count = 0
security_re = re.compile(
b'[^\\(]*\\(.* (Debian-Security|Ubuntu:[^/]*/[^-]*-security)'
)
(output, _) = proc.communicate()
for line in output.splitlines():
if not line.startswith(b'Inst'):
continue
update_count += 1
if security_re.match(line):
security_count += 1
return (update_count, security_count)
except OSError:
pass
try:
proc = subprocess.Popen(
[
'dnf',
'--cacheonly',
'--quiet',
'updateinfo',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
(output, _) = proc.communicate()
update_count = 0
security_count = 0
match = re.search(
b'^\\s+(\\d+) Security notice\\(s\\)$',
output,
re.MULTILINE,
)
if match is not None:
security_count = int(match.group(1))
results = re.findall(
b'^\\s+(\\d+) \\w+ notice\\(s\\)$',
output,
re.MULTILINE,
)
update_count = sum(int(x) for x in results)
return (update_count, security_count)
except OSError:
pass
try:
proc = subprocess.Popen(
[
'yum',
'--cacheonly',
'--quiet',
'list', 'updates',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
(output, _) = proc.communicate()
update_count = 0
for line in output.splitlines():
if line == b'Updated Packages':
continue
if line.startswith(b'Repo '):
continue
# yum list could add newline when package name is too long,
# in this case the next line with version will start with
# few whitespace.
if line.startswith(b' '):
continue
update_count += 1
proc = subprocess.Popen(
[
'yum',
'--cacheonly',
'--quiet',
'--security',
'list', 'updates',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
(output, _) = proc.communicate()
security_count = 0
for line in output.splitlines():
if line == b'Updated Packages':
continue
if line.startswith(b'Repo '):
continue
# yum list could add newline when package name is too long,
# in this case the next line with version will start with
# few whitespace.
if line.startswith(b' '):
continue
security_count += 1
return (update_count, security_count)
except OSError:
pass
return (None, None)
def get_top_info(core, gather_started_at=None, for_discovery=False):
# pylint: disable=too-many-branches
""" Return informations needed to build a "top" view.
"""
if not gather_started_at:
gather_started_at = time.time()
processes = {}
# Read of (single) attribute is atomic, no lock needed
docker_client = core.docker_client
if docker_client is not None:
processes = _get_docker_process(docker_client)
if (core.container is None
or core.config['container.pid_namespace_host']):
if for_discovery:
# When used for services discovery, do additional check to ensure
# process belong or not to a containers.
_update_process_psutil(
processes, gather_started_at, core.docker_containers,
)
else:
_update_process_psutil(processes, gather_started_at)
now = time.time()
cpu_usage = psutil.cpu_times_percent()
memory_usage = psutil.virtual_memory()
swap_usage = psutil.swap_memory()
result = {
'time': now,
'uptime': get_uptime(),
'loads': get_loadavg(core),
'users': len(psutil.users()),
'processes': list(processes.values()),
'cpu': {
'user': cpu_usage.user,
'nice': getattr(cpu_usage, 'nice', 0.0),
'system': cpu_usage.system,
'idle': cpu_usage.idle,
'iowait': getattr(cpu_usage, 'iowait', 0.0),
'guest': getattr(cpu_usage, 'guest', None),
'guest_nice': getattr(cpu_usage, 'guest_nice', None),
'irq': getattr(cpu_usage, 'irq', None),
'softirq': getattr(cpu_usage, 'softirq', None),
'steal': getattr(cpu_usage, 'steal', None),
},
'memory': {
'total': memory_usage.total / 1024,
'used': memory_usage.used / 1024,
'free': memory_usage.free / 1024,
'buffers': getattr(memory_usage, 'buffers', 0.0) / 1024,
'cached': getattr(memory_usage, 'cached', 0.0) / 1024,
},
'swap': {
'total': swap_usage.total / 1024,
'used': swap_usage.used / 1024,
'free': swap_usage.free / 1024,
}
}
if psutil.version_info < (4, 4):
result['memory']['used'] -= result['memory']['buffers']
result['memory']['used'] -= result['memory']['cached']
return result
def get_top_output(top_info):
""" Return a top-like output
"""
env = jinja2.Environment(
loader=jinja2.PackageLoader('bleemeo_agent', 'templates'),
autoescape=True)
template = env.get_template('top.txt')
if top_info is None:
return 'top - waiting for metrics...'
memory_total = top_info['memory']['total']
processes = []
# Sort process by CPU consumption (then PID, when cpu % is the same)
# Since we want a descending order for CPU usage, we have
# reverse=True... but for PID we want a ascending order. That's why we
# use a negation for the PID.
sorted_process = sorted(
top_info['processes'],
key=lambda x: (x.get('cpu_percent', 0), -int(x['pid'])),
reverse=True)
for metric in sorted_process[:25]:
# convert status (like "sleeping", "running") to one char status
status = {
psutil.STATUS_RUNNING: 'R',
psutil.STATUS_SLEEPING: 'S',
psutil.STATUS_DISK_SLEEP: 'D',
psutil.STATUS_STOPPED: 'T',
psutil.STATUS_TRACING_STOP: 'T',
psutil.STATUS_ZOMBIE: 'Z',
}.get(metric.get('status'), '?')
processes.append(
('%(pid)5s %(user)-9.9s %(res)6d %(status)s '
'%(cpu)5.1f %(mem)4.1f %(time)9s %(cmd)s') %
{
'pid': metric['pid'],
'user': metric.get('username', ''),
'res': metric.get('memory_rss', 0),
'status': status,
'cpu': metric.get('cpu_percent', 0),
'mem':
float(metric.get('memory_rss', 0)) / memory_total * 100,
'time': format_cpu_time(metric.get('cpu_times', 0)),
'cmd': metric['name'],
})
process_total = len(top_info['processes'])
process_running = len([
x for x in top_info['processes']
if x.get('status') == psutil.STATUS_RUNNING
])
process_sleeping = len([
x for x in top_info['processes']
if x.get('status') == psutil.STATUS_SLEEPING
])
process_stopped = len([
x for x in top_info['processes']
if x.get('status') == psutil.STATUS_STOPPED
])
process_zombie = len([
x for x in top_info['processes']
if x.get('status') == psutil.STATUS_ZOMBIE
])
date_top = datetime.datetime.fromtimestamp(top_info['time'])
time_top = date_top.time().replace(microsecond=0)
return template.render(
time_top=time_top,
uptime=bleemeo_agent.util.format_uptime(top_info['uptime']),
top_info=top_info,
loads=', '.join('%.2f' % x for x in top_info['loads']),
process_total='%3d' % process_total,
process_running='%3d' % process_running,
process_sleeping='%3d' % process_sleeping,
process_stopped='%3d' % process_stopped,
process_zombie='%3d' % process_zombie,
cpu_user='%5.1f' % top_info['cpu']['user'],
cpu_system='%5.1f' % top_info['cpu']['system'],
cpu_nice='%5.1f' % top_info['cpu']['nice'],
cpu_idle='%5.1f' % top_info['cpu']['idle'],
cpu_wait='%5.1f' % top_info['cpu']['iowait'],
mem_total='%8d' % top_info['memory']['total'],
mem_used='%8d' % top_info['memory']['used'],
mem_free='%8d' % top_info['memory']['free'],
mem_buffered='%8d' % top_info['memory']['buffers'],
mem_cached='%8d' % top_info['memory']['cached'],
swap_total='%8d' % top_info['swap']['total'],
swap_used='%8d' % top_info['swap']['used'],
swap_free='%8d' % top_info['swap']['free'],
processes=processes,
)
def _get_url(core, name, metric_config):
url = metric_config['url']
url_parsed = urllib_parse.urlparse(url)
if url_parsed.scheme == '' or url_parsed.scheme == 'file':
try:
with open(url_parsed.path) as file_obj:
return file_obj.read()
except (IOError, OSError) as exc:
logging.warning(
'Failed to retrive metric %s: %s',
name,
exc,
)
return None
args = {
'verify': metric_config.get('ssl_check', True),
'timeout': 3.0,
'headers': {'User-Agent': core.http_user_agent},
}
if metric_config.get('username') is not None:
args['auth'] = (
metric_config.get('username'),
metric_config.get('password', '')
)
try:
response = requests.get(
url,
**args
)
except requests.exceptions.ConnectionError as exc:
logging.warning(
'Failed to retrieve metric %s: '
'failed to establish connection to %s: %s',
name,
url,
exc
)
return None
except requests.exceptions.RequestException as exc:
logging.warning(
'Failed to retrieve metric %s: %s',
name,
exc,
)
return None
return response.content
def pull_raw_metric(core, name):
""" Pull a metrics (on HTTP(s)) in "raw" format.
"raw" format means that the URL must return one number in plain/text.
We expect to have the following configuration key under
section "metric.pull.$NAME.":
* url: where to fetch the metric [mandatory]
* item: item to add on your metric [default: '' - no item]
* interval: retrive the metric every interval seconds [default: 10s]
* username: username used for basic authentication [default: no auth]
* password: password used for basic authentication [default: ""]
* ssl_check: should we check that SSL certificate are valid
[default: yes]
"""
metric_config = core.config['metric.pull.%s' % name]
if 'url' not in metric_config:
logging.warning('Missing URL for metric %s. Ignoring it', name)
return
response = _get_url(core, name, metric_config)
if response is not None:
value = None
try:
value = float(response)
except ValueError:
logging.warning(
'Failed to retrive metric %s: response it not a number',
name)
if value is not None:
item = metric_config.get('item', '')
labels = {}
if item:
labels['item'] = item
metric_point = bleemeo_agent.type.DEFAULT_METRICPOINT._replace(
label=name,
labels=labels,
time=time.time(),
value=value,
)
core.emit_metric(metric_point)
def docker_exec(docker_client, container_name, command):
""" Run a command on given container and return output.
On error, returns an empty string
"""
if docker is None and docker_client is None:
logging.debug(
'Unable to get Telegraf version: missing docker-py dependencies'
)
return ''
if docker_client is None:
logging.debug(
'Unable to get Telegraf version: unable to communicate with Docker'
)
return ''
try:
result = docker_client.exec_create(
container_name,
command,
)
output = docker_client.exec_start(result['Id'])
except (docker.errors.APIError,
requests.exceptions.RequestException):
logging.debug(
'Unable to run docker_exec on %s:', container_name, exc_info=True
)
return ''
try:
return output.decode('utf-8')
except UnicodeDecodeError:
return ''
def docker_restart(docker_client, container_name):
""" Restart a Docker container
"""
if docker is None and docker_client is None:
logging.warning(
"Failed to restart Telegraf: missing docker-py dependencies"
)
return
if docker_client is None:
logging.warning(
"Failed to restart Telegraf: unable to communicate with Docker"
)
return
docker_client.stop(container_name)
for _ in range(10):
time.sleep(0.2)
container_info = docker_client.inspect_container(container_name)
running = container_info['State']['Running']
if not running:
break
if running:
logging.info(
'container "%s" still running... restart may fail',
container_name
)
docker_client.start(container_name)
def windows_instdir():
""" Return Windows installation directory
"""
bleemeo_package_dir = os.path.dirname(__file__)
# bleemeo_agent package is located at $INSTDIR\pkgs\bleemeo_agent
install_dir = os.path.dirname(os.path.dirname(bleemeo_package_dir))
return install_dir
def windows_telegraf_path(default="telegraf"):
""" Return path to telegraf. If not found, return default
"""
# On Windows, when installed, telegraf is located as $INSTDIR\telegraf.exe
instdir = windows_instdir()
telegraf = os.path.join(instdir, "telegraf.exe")
if os.path.exists(telegraf):
return telegraf
return default
class JSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, set):
return list(o)
return super().default(o)
def _get_docker_process(docker_client):
if docker is None:
return {}
processes = {}
try:
for container in docker_client.containers():
# container has... nameS
# Also name start with "/". I think it may have mulitple name
# and/or other "/" with docker-in-docker.
container_name = container['Names'][0].lstrip('/')
docker_id = container['Id']
try:
try:
docker_top_waux = (
docker_client.top(container_name, ps_args="waux")
)
except TypeError:
# Older version of Docker-py don't support ps_args option
docker_top_waux = None
docker_top = (
docker_client.top(container_name)
)
except (docker.errors.APIError,
requests.exceptions.RequestException):
# most probably container is restarting or just stopped
continue
for process in decode_docker_top(docker_top):
pid = process['pid']
processes[pid] = process
processes[pid]['instance'] = container_name
processes[pid]['docker_id'] = docker_id
if docker_top_waux:
# Merge information coming from docker_top_waux
for process in decode_docker_top(docker_top_waux):
pid = process['pid']
if pid not in processes:
processes[pid] = process
else:
processes[pid].update(process)
except (docker.errors.APIError,
requests.exceptions.RequestException) as exc:
logging.info('Failed to get Docker containers list: %s', exc)
return processes
def _update_process_psutil(
processes, only_started_before, docker_containers=None):
""" If docker_containers is not None, try to use cgroup to ensure process
without container are really without containers.
"""
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# Process creation time is accurate up to 1/SC_CLK_TCK seconds,
# usually 1/100th of seconds.
# Process must be started at least 1/100th before only_started_before.
# Keep some additional margin by doubling this value.
only_started_before -= 2/100
for process in psutil.process_iter():
try:
if process.pid == 0:
# PID 0 on Windows use it for "System Idle Process".
# PID 0 is not used Linux don't use it.
# Other system are currently not supported.
continue
create_time = process.create_time()
if create_time > only_started_before:
# Ignore process created very recently. This is done to avoid
# issue with process created in a container between the listing
# of container process and this update from psutil. Such
# process would be marked as running outside any container
# could lead to discovery error.
continue
try:
username = process.username()
except (KeyError, psutil.AccessDenied):
# the uid can't be resolved by the system
if os.name == 'nt':
username = ''
else:
username = str(process.uids().real)
# Cmdline may be unavailable (permission issue ?)
# When unavailable, depending on psutil version, it returns
# either [] or ['']
try:
cmdline = process.cmdline()
if cmdline and cmdline[0]:
# Remove empty argument. This is usually generated by
# processes which alter their name and result in
# npm '' '' '' '' '' '' '' '' '' '' '' ''
cmdline = [x for x in cmdline if x]
# shlex.quote is needed if the program path has space in
# the name. This is usually true under Windows but Windows
# has shlex.quote (Python 3.3+).
if hasattr(shlex, 'quote'):
cmdline = ' '.join(shlex.quote(x) for x in cmdline)
else:
cmdline = ' '.join(cmdline)
name = process.name()
else:
cmdline = process.name()
name = cmdline
except psutil.AccessDenied:
cmdline = process.name()
name = cmdline
cpu_times = process.cpu_times()
process_info = processes.get(process.pid, {})
process_info.update({
'pid': process.pid,
'ppid': process.ppid(),
'create_time': create_time,
'cmdline': cmdline,
'name': name,
'memory_rss': process.memory_info().rss / 1024,
'cpu_percent': process.cpu_percent(),
'cpu_times':
cpu_times.user + cpu_times.system,
'status': process.status(),
'username': username,
'_psutil': True,
})
try:
process_info['exe'] = process.exe()
except psutil.AccessDenied:
process_info['exe'] = ''
process_info.setdefault('instance', '')
if docker_containers is not None:
# Check /proc/pid/cgroup to be double sure that this process
# run outside any container.
docker_id = None
try:
with open('/proc/%d/cgroup' % process.pid) as fileobj:
cgroup_data = fileobj.read()
docker_ids = get_docker_id_from_cgroup(cgroup_data)
if len(docker_ids) == 1:
docker_id = docker_ids.pop()
except (OSError, IOError):
pass
if docker_id and docker_id in docker_containers:
container = docker_containers[docker_id]
container_name = container['Name'].lstrip('/')
process_info['instance'] = container_name
logging.debug(
'Base on cgroup, process %d (%s) belong to '
'container %r',
process.pid,
name,
container_name,
)
elif docker_id and create_time > time.time() - 3:
logging.debug(
'Skipping process %d (%s) created recently and seems '
'to belong to a container',
process.pid,
name,
)
continue
processes[process.pid] = process_info
except psutil.NoSuchProcess:
continue
return processes
|
from .canny_detector import canny_detector |
# -*- coding:utf-8 -*-
print("i will now count my chickens:")
print("hens",25+30/6)
#
print("roosters",100-25*3%4)
# 输出句子
print("now i will count the eggs:")
# 输出3+2+1-5+4%2-1/4+6计算值
print(3.0+2.0+1.0-5.0+4.0%2.0-1.0/4.0+6.0)
# 输出句子
print("is it true that 3+2<5-7?")
# 输出计算值(真伪判断)
print(3+2<5-7)
# 输出句子以及3+2的值
print("what is 3+2?",3+2)
# 输出句子以及5-7计算值
print("what is 5-7?",5-7)
# 输出句子
print("oh,that's why it's False.")
# 输出句子
print("how about some more.")
# 输出句子以及判断5>-2的真伪
print("is it greater?",5>-2)
# 输出句子以及判断真伪
print("is it greaer or equal?",5>=-2)
# 输出句子以及判断真伪
print("is it less or equal?",5<=-2) |
'''
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which
a + b + c = 1000.
Find the product abc.
'''
from math import sqrt
target_sum = 1000
try:
for c in range(target_sum):
for b in range(target_sum - c,1,-1):
for a in range(target_sum - c - b,1,-1):
if (a + b + c == 1000) and (a*b*c != 0) and (a**2 + b**2 - c**2)==0:
print("a,b,c",a,b,c)
print("a*b*c",a*b*c)
print("a^2 + b^2 - c^2 = ",a**2 + b**2 - c**2)
raise StopIteration("Problem solution found.")
except StopIteration as e:
print(e) |
#-*- coding:utf-8 -*-
'''
描述:
现有从2002年1月到3月收集的调查数据(url为http://112.124.1.3:8050/getData/101)
每条数据包括 caseid(标识符), prglength(婴儿第几周出生), outcome(怀孕结果,1表示活产),
totalwgt_oz(婴儿出生重量,单位盎司), birthord(第几胎,1表示第一胎), agepreg(怀孕时年龄),
finalwgt(被调查者的统计权重,表明这名调查者所代表的人群在美国总人口中的比例。过采样人群的权重偏低)等信息
另据某研究显示,婴儿出生周数符合方差为16的正态分布,试写函数solve估计婴儿平均出生周数的置信区间(置信水平为95%)。
输入:调查样本数据,格式为
{“status”:"ok","data":[[1, 1, 39, 1, 141, 1, 33.16, 6448.271111704751], [1, 2, 39, 1, 126, 2, 39.25, 6448.271111704751], ...]}
输出:[lower,upper]分别代表平均出生周数的估计下限与上限
注意:
(1)婴儿第几周出生数据由于被调查人选填错误等原因出现了一些不合理数据,
比如错填了月份(5<prglength<=10),其他错填(prglength<=5, 10<prglength<=25, prglength>=49),
对于错填月份的情况,将月份*4.33作为其周数,对于其他错填情况则舍弃此条数据
'''
import urllib2 as url
import scipy.stats as sci
import json
class Solution:
def solve(self):
webpage=url.urlopen("http://112.124.1.3:8050/getData/101")
oriData=json.load(webpage)["data"]
data=[]
for row in oriData:
if row[2]<=5 or (row[2]>10 and row[2]<=25) or row[2]>=49:
continue
elif row[2]>5 and row[2]<=10:
row[2]=row[2]*4.33
data.append(row[2])
else:
data.append(row[2])
sum=0.0
n=len(data)
for t in data:
sum=sum+t
mean=sum/n
z=sci.norm.ppf(0.975)
zs=4/(n**0.5)
return [mean-z*zs,mean+z*zs]
a=Solution()
a.solve() |
import pygame
from os.path import join
import tile
colors = { #integer values of common colors
"white" : 0xFFFFFF,
"black" : 0,
"red" : 0xFF0000,
"blue" : 0x00FF00,
"green" : 0x0000FF
}
class Chunk():
def __init__(self, a, b, image, width = 5, height = 5):
self.width = width
self.height = height
# x and y are in chunk coordinate form
self.x = a
self.y = b
cropped_image = pygame.Surface((self.width,self.height))
cropped_image.blit(image, (0, 0), ((self.x * 5), (self.y * 5), width, height))
pxarray = pygame.PixelArray(cropped_image)
self.chunk_group = pygame.sprite.RenderUpdates()
for x in range(self.width):
for y in range(self.height):
t_type = self.tiletype(pxarray[x,y])
# Make a tile given the coordinates it deserves and the chunk group name.
new_tile = tile.Tile(t_type, self.chunk_group, ((self.x * 5 * 32) + (x * 32)), \
((self.y * 5 * 32) + (y * 32)))
def tiletype(self, color):
if color == colors["black"]:
tiletype = "wall"
elif color == colors["white"]:
tiletype = "empty"
elif color == colors["red"]:
tiletype = "light"
else:
print "Wrong Color Error. Color code was " + str(color)
return tiletype
class Level():
def __init__(self, filename, level_num):
self.img = pygame.image.load(join("data", "levels", filename)).convert()
self.level_num = level_num
# Get the dimensions of the level in tiles
self.tiles_width = self.img.get_width()
self.tiles_height = self.img.get_height()
# Chunk creation preparation
self.num_chunks_x = self.tiles_width / 5
self.num_chunks_y = self.tiles_height / 5
self.chunk_list = [ ]
self.chunk_tuple_list = [ ] # List of tuples of loaded chunks eg. (3, 5) for quick checking.
def update(self, surface, camera):
# Decide what chunks to keep in vision, and add the tuples of those chunks to a list.
# Load all chunks in vision or just outside of vision (buffer zone).
visible_list = [ ]
left_tile_coord = int((camera.rect.left / 32)) - 5 # -5 because of buffer zone
if left_tile_coord < 0:
left_tile_coord = 0
right_tile_coord = int((camera.rect.right / 32) + 1) + 5 # +1 to cope w/ integer division
if right_tile_coord < 0:
right_tile_coord = 0
top_tile_coord = int((camera.rect.top / 32)) - 5
if top_tile_coord < 0:
top_tile_coord = 0
bottom_tile_coord = int((camera.rect.bottom / 32) + 1) + 5
if bottom_tile_coord < 0:
bottom_tile_coord = 0
# The above values rounded down to chunk boundaries
x_chunk_bound = (self.rdtn(left_tile_coord, 5) / 5, self.rdtn(right_tile_coord, 5) / 5)
# (min, max), eg. (3, 10)
y_chunk_bound = (self.rdtn(top_tile_coord, 5) / 5, self.rdtn(bottom_tile_coord, 5) / 5)
# Add visible chunk tuples to visible_list
for x in range(x_chunk_bound[0], x_chunk_bound[1] + 1):
for y in range(y_chunk_bound[0], y_chunk_bound[1] + 1):
visible_list.append((x, y))
# Remove invisible chunks
for item in self.chunk_tuple_list:
if not (item in visible_list):
self.chunk_tuple_list.remove(item)
for chunk in self.chunk_list:
if chunk.x == item[0]:
if chunk.y == item[1]:
self.chunk_list.remove(chunk)
break
# Add new visible tuples to chunk_tuple_list and create the new chunks
for item in visible_list:
if not (item in self.chunk_tuple_list):
self.chunk_tuple_list.append(item)
chunk = Chunk(item[0], item[1], self.img)
self.chunk_list.append(chunk)
# Draw all of the tiles in visible chunks
for chunk in self.chunk_list:
for tile in chunk.chunk_group:
tile.update(surface, camera)
def rdtn(self, num, divisor):
#round down to nearest divisor
return num - (num % divisor)
|
# Generated by Django 2.1.5 on 2019-02-07 14:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shelf', '0004_remove_book_authors'),
]
operations = [
migrations.CreateModel(
name='BookLend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_book', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shelf.Book')),
],
),
]
|
# ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
from abc import abstractmethod
import sys
import threading
import time
import traceback
from twitter.common.lang import Interface
class ClockInterface(Interface):
@abstractmethod
def time(self):
"""The current time.
:rtype: float
"""
pass
@abstractmethod
def tick(self, amount):
"""Advance the clock by `amount`.
:param amount: The amount to advance the clock.
:type amount: Anything that can be coerced to a float.
"""
pass
@abstractmethod
def sleep(self, amount):
"""Block until the clock time is >= clock.time() + amount.
:param amount: The amount of time to wait.
:type amount: Anything that can be coerced to a float.
"""
pass
class _Waiter(object):
def __init__(self, wait_amount, wait_until):
self.wait_amount = wait_amount # the amount that this waiter is waiting
self.wait_until = wait_until # the time at which this waiter expires
self.thread = threading.current_thread() # the waiting thread
self._syn_event = threading.Event()
self._ack_event = threading.Event()
def __lt__(self, other):
if not isinstance(other, _Waiter):
raise TypeError('Can only compare two Waiter objects.')
return self.wait_until < other.wait_until
def syn(self):
self._syn_event.wait()
self._ack_event.set()
def ack(self):
self._syn_event.set()
self._ack_event.wait()
class ThreadedClock(ClockInterface):
THREAD_YIELD_TIMEOUT = 0.1
@classmethod
def thread_yield(cls):
time.sleep(cls.THREAD_YIELD_TIMEOUT)
return cls.THREAD_YIELD_TIMEOUT
def __init__(self, initial_value=0, log=None):
"""Construct a ThreadedClock.
:keyword initial_value: The initial value of the clock. Defaults to 0.
:keyword logger: A callable for accepting log messages. Defaults to writing to sys.stderr.
"""
self._time = float(initial_value)
self._waiters = [] # queue of Waiters
self._log = log or (lambda msg: sys.stderr.write(msg + '\n'))
def converged(self, threads):
"""Determine whether supplied threads are either finished or sleeping on this clock.
:param threads: An iterable of :class:`threading.Thread` objects to test for blocking.
:returns: True if all threads are finished or sleeping on this clock, otherwise False.
"""
thread_ids = set(thread.ident for thread in threads if thread.is_alive())
waiting_ids = set(waiter.thread.ident for waiter in self._waiters
if waiter.thread.is_alive() and waiter.wait_until > self._time)
return thread_ids == waiting_ids
def converge(self, threads, timeout=None):
"""Wait until the supplied threads are finished or sleeping on this clock.
This method should be called to ensure deterministic tests.
:param threads: An iterable of :class:`threading.Thread` objects to test for blocking.
:keyword timeout: A *real wall clock* timeout to wait for the threads to converge. If
timeout is None, wait forever.
:returns: True once all threads are finished or sleeping, False if the timeout expires
without convergence.
"""
# flush the queue at the current timeslice
self.tick(0)
total_time = 0
while not self.converged(threads):
total_time += self.thread_yield()
if timeout and total_time >= timeout:
return False
return True
def assert_waiting(self, thread, amount=None):
"""Make an assertion that `thread` is waiting, possibly for a specific `amount`.
:param thread: A :class:`threading.Thread` object.
:param amount: The amount that the thread should be waiting, if specified.
"""
waiters = [waiter for waiter in self._waiters if waiter.thread == thread]
if len(waiters) != 1:
assert False, 'Thread %s is not currently sleeping.' % thread
if amount is not None and waiters[0].wait_amount != amount:
assert False, 'Thread %s is sleeping %s, expected %s.' % (
thread, waiters[0].wait_amount, amount)
def assert_not_waiting(self, thread):
"""Make an assertion that `thread` is not waiting.
:param thread: A :class:`threading.Thread` object.
"""
assert not any(waiter for waiter in self._waiters if waiter.thread == thread), (
'Thread %s is unexpectedly waiting.' % thread)
# --- Rest of the ClockInterface implementation.
def time(self):
return self._time
def _pop_waiter(self, end):
if self._waiters and self._waiters[0].wait_until <= end:
return self._waiters.pop(0)
def tick(self, amount):
now = self._time
end = now + amount
while True:
waiter = self._pop_waiter(end)
if not waiter:
break
if self._log:
self._log('[%r] Time now: %s' % (self, self._time))
self._time = waiter.wait_until
waiter.ack()
if self._log:
self._log('[%r] Time now: %s' % (self, self._time))
self._time = end
def sleep(self, amount):
if amount < 0:
# mirror time.time semantics.
raise IOError('Cannot sleep < 0.')
waiter = _Waiter(amount, self._time + amount)
self._waiters.append(waiter)
self._waiters.sort()
waiter.syn()
|
#!/usr/bin/env python
"""This is an attempt to drive the gait2d model with a controller derived
from real data."""
# standard library
import os
# external
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from pydy.codegen.code import generate_ode_function
from pydy.viz import Scene
import pygait2d
from pygait2d import derive, simulate
from dtk import process
# local
import utils
import simulation
from grf_landmark_settings import settings
# load the data and find a controller
trial_number = '068'
trial = utils.Trial(trial_number)
trial._write_event_data_frame_to_disk('Longitudinal Perturbation')
event_data_frame = trial.event_data_frames['Longitudinal Perturbation']
event_data_frame = simulation.estimate_trunk_somersault_angle(event_data_frame)
# TODO : will likely need to low pass filter the two time derivatives
event_data_frame['Trunk.Somersault.Rate'] = \
process.derivative(event_data_frame.index.values.astype(float),
event_data_frame['Trunk.Somersault.Angle'],
method='combination')
event_data_frame['RGTRO.VelY'] = \
process.derivative(event_data_frame.index.values.astype(float),
event_data_frame['RGTRO.PosY'], method='combination')
# TODO : Ensure that the modified data frame is infact the one in the Trial
# object.
trial._write_inverse_dynamics_to_disk('Longitudinal Perturbation', force=True)
trial._section_into_gait_cycles('Longitudinal Perturbation', force=True)
gait_data = trial.gait_data_objs['Longitudinal Perturbation']
sensor_labels, control_labels, result, solver = \
trial.identification_results('Longitudinal Perturbation', 'joint isolated')
# Define a simulation controller based off of the results of the
# identification.
# TODO : I likely need to take the mean of the left at right gains so things are
# symmetric.
mean_cycle_time = gait_data.gait_cycle_stats['Stride Duration'].mean()
percent_gait_cycle = gait_data.gait_cycles.iloc[0].index.values.astype(float) # n
m_stars = result[1] # n x q
gain_matrices = result[0] # n x q x p
state_indices = simulation.state_indices_for_controller()
control_indices = simulation.control_indices_for_specified()
# TODO : This is a hack to get the right state signs when computing the
# controller.
state_sign = np.ones(18)
state_sign[4] = -1.0
state_sign[5] = -1.0
state_sign[6] = -1.0
state_sign[7] = -1.0
state_sign[13] = -1.0
state_sign[14] = -1.0
state_sign[16] = -1.0
state_sign[17] = -1.0
# This is a cheap trick to set the correct signs for the moments.
specified_sign = np.array([1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0, -1.0])
def controller(x, t):
# this will need extrapolation (the first answer doesn't work for
# interpolating N dimenaional arrays).
# http://stackoverflow.com/questions/2745329/how-to-make-scipy-interpolate-give-an-extrapolated-result-beyond-the-input-range
x = state_sign * x
current_percent_gait_cycle = t % mean_cycle_time
f_gains = interp1d(percent_gait_cycle, gain_matrices, axis=0,
bounds_error=False, fill_value=0.0)
f_m_stars = interp1d(percent_gait_cycle, m_stars, axis=0,
bounds_error=False, fill_value=0.0)
current_gain = f_gains(current_percent_gait_cycle)
current_m_star = f_m_stars(current_percent_gait_cycle)
# 6 joint torques in the order of the control identifier
joint_torques = current_m_star - np.dot(current_gain, x[state_indices])
lift_force = 9.81 * trial.meta_data['subject']['mass']
lift_force = 0.0
return specified_sign * np.hstack(([0.0, lift_force, 0.0], 0.1 * joint_torques[control_indices]))
# This loads an open loop control solution that way precomputed with Ton's
# Matlab code.
open_loop_states, open_loop_specified, open_loop_duration = simulation.load_open_loop_trajectories()
open_loop_percent_gait_cycle = np.linspace(0.0, 100.0, num=open_loop_states.shape[1])
def open_loop_controller(x, t):
current_percent_gait_cycle = t % open_loop_duration
f_specified = interp1d(open_loop_percent_gait_cycle,
open_loop_specified, axis=1,
bounds_error=False, fill_value=0.0)
return np.squeeze(f_specified(current_percent_gait_cycle))
def combined_controller(x, t):
"""
x : ndarray, shape(18,)
t : float
"""
current_percent_gait_cycle = t % open_loop_duration
f_m0 = interp1d(open_loop_percent_gait_cycle,
open_loop_specified, axis=1,
bounds_error=False, fill_value=0.0)
m0 = np.squeeze(f_m0(current_percent_gait_cycle)) # shape(9,)
f_s0 = interp1d(open_loop_percent_gait_cycle,
open_loop_states, axis=1,
bounds_error=False, fill_value=0.0)
s0 = np.squeeze(f_s0(current_percent_gait_cycle)) # shape(18,)
f_gains = interp1d(percent_gait_cycle, gain_matrices, axis=0,
bounds_error=False, fill_value=0.0)
current_gain = f_gains(current_percent_gait_cycle) # shape(6, 12)
x = state_sign * x
s0 = state_sign * s0
joint_torques = np.squeeze(np.dot(current_gain, (s0[state_indices] - x[state_indices])))
return m0 + specified_sign * np.hstack(([0.0, 0.0, 0.0], joint_torques[control_indices]))
# Generate the system.
(mass_matrix, forcing_vector, kane, constants, coordinates, speeds,
specified, visualization_frames, ground, origin) = derive.derive_equations_of_motion()
rhs = generate_ode_function(mass_matrix, forcing_vector,
constants, coordinates, speeds,
specified=specified, generator='cython')
# Get all simulation and model parameters.
model_constants_path = os.path.join(os.path.split(pygait2d.__file__)[0], '../data/example_constants.yml')
constant_values = simulate.load_constants(model_constants_path)
args = {'constants': np.array([constant_values[c] for c in constants]),
'specified': combined_controller}
time_vector = np.linspace(0.0, 0.5, num=1000)
mean_of_gait_cycles = gait_data.gait_cycles.mean(axis='items')
initial_conditions = np.zeros(18)
initial_conditions[0] = 0.0
initial_conditions[1] = mean_of_gait_cycles['RGTRO.PosY'][0]
initial_conditions[2] = -mean_of_gait_cycles['Trunk.Somersault.Angle'][0] # not sure why I had to set this to negative
initial_conditions[3] = mean_of_gait_cycles['Right.Hip.Flexion.Angle'][0]
initial_conditions[4] = -mean_of_gait_cycles['Right.Knee.Flexion.Angle'][0]
initial_conditions[5] = -mean_of_gait_cycles['Right.Ankle.PlantarFlexion.Angle'][0] - np.pi / 2.0 # seems like the inverse dynamics angles for ankle are not based on nominal position, thus the pi/2
initial_conditions[6] = mean_of_gait_cycles['Left.Hip.Flexion.Angle'][0]
initial_conditions[7] = -mean_of_gait_cycles['Left.Knee.Flexion.Angle'][0]
initial_conditions[8] = -mean_of_gait_cycles['Left.Ankle.PlantarFlexion.Angle'][0] - np.pi / 2.0
initial_conditions[9] = mean_of_gait_cycles['RightBeltSpeed'][0]
initial_conditions[10] = mean_of_gait_cycles['RGTRO.VelY'][0]
initial_conditions[11] = mean_of_gait_cycles['Trunk.Somersault.Rate'][0]
initial_conditions[12] = mean_of_gait_cycles['Right.Hip.Flexion.Rate'][0]
initial_conditions[13] = -mean_of_gait_cycles['Right.Knee.Flexion.Rate'][0]
initial_conditions[14] = -mean_of_gait_cycles['Right.Ankle.PlantarFlexion.Rate'][0]
initial_conditions[15] = mean_of_gait_cycles['Left.Hip.Flexion.Rate'][0]
initial_conditions[16] = -mean_of_gait_cycles['Left.Knee.Flexion.Rate'][0]
initial_conditions[17] = -mean_of_gait_cycles['Left.Ankle.PlantarFlexion.Rate'][0]
initial_conditions = open_loop_states[:, 0]
# Integrate the equations of motion
trajectories = odeint(rhs, initial_conditions, time_vector, args=(args,))
# Visualize
scene = Scene(ground, origin, *visualization_frames)
scene.generate_visualization_json(coordinates + speeds, constants,
trajectories, args['constants'])
scene.display()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 14:57:36 2019
@author: zhangtong
"""
from sklearn.metrics import *
from sklearn.linear_model import LinearRegression
from al_data_tool import *
import numpy as np
import pandas as pd
import math
def to_score(x):
import math
if x <=0.001:
x =0.001
elif x >=0.999:
x =0.999
A = 404.65547022
B = 72.1347520444
result = int(round(A-B*math.log(x/(1-x))))
if result < 300:
result=300
if result>900:
result=900
return result
# 单列woe值
def cal_woe(Xvar, Yvar):
"""
:param Xvar: 分箱列
:param Yvar: y_label列
:return: dataframe
"""
def woe(df):
d = pd.Series(df['y'].value_counts(),index=[0,1]).fillna(0)
bin_good = d[0] if d[0] > 0 else d[1] * 0.1
bin_bad = d[1] if d[1] > 0 else d[0] * 0.1
woe = math.log((bin_bad / total_bad) * (total_good / bin_good))
iv = (bin_bad/total_bad - bin_good/total_good)*woe
return pd.Series({'woe': woe,'iv':iv})
y_value_counts = Yvar.value_counts()
total_good = y_value_counts[0]
total_bad = y_value_counts[1]
df_data=pd.DataFrame({'bin':Xvar, 'y':Yvar})
df_result = df_data.groupby('bin').apply(lambda df:woe(df)).reset_index()
df_result['feature'] = Xvar.name
return df_result
# 单列iv值
def cal_iv(Xvar, Yvar):
"""
:param Xvar: 分箱列
:param Yvar: y_label列
:return: list
"""
return cal_woe(Xvar, Yvar)['iv'].sum()
# 批量计算woe值
def cal_woe_batch(df, X, y, n_jobs=10):
"""
:param df: 分完箱的dataframe
:param X: 需要计算woe值的列名
:param y: y的列名
:param n_jobs: 最大的并行任务数
:return: dataframe
"""
woe = Parallel(n_jobs=n_jobs)(delayed(cal_woe)(df[col], df[y]) for col in X)
woe = pd.concat(woe)
return woe
# 批量计算iv值
def cal_iv_batch(df, X, y, n_jobs=10):
"""
:param df: 分完箱的dataframe
:param X: 需要计算iv值的列名
:param y: y的列名
:param n_jobs: 最大的并行任务数
:return: dataframe
"""
iv_list = Parallel(n_jobs=n_jobs)(delayed(cal_iv)(df[col], df[y]) for col in X)
names = X
df_iv = pd.DataFrame({'feature': names, 'iv': iv_list})
return df_iv
# 等距分箱的iv值
def get_df_iv_equidistance(df_src, X, y, bins, n_jobs):
df = cut_batch(df_src, X, bins, n_jobs)
return cal_iv_batch(df, X, y, n_jobs)
# 等频分箱的iv值
def get_df_iv_equifrequency(df_src, X, y, bins, n_jobs):
df = qcut_batch(df_src, X, bins, n_jobs)
return cal_iv_batch(df, X, y, n_jobs)
# 等距分箱的woe值
def get_df_woe_equidistance(df_src, X, y, bins, n_jobs):
df = cut_batch(df_src, X, bins, n_jobs)
return cal_woe_batch(df, X, y, n_jobs)
# 等频分箱的woe值
def get_df_woe_equifrequency(df_src, X, y, bins, n_jobs):
df = qcut_batch(df_src, X, bins, n_jobs)
return cal_woe_batch(df, X, y, n_jobs)
# 卡方分箱的woe值
def get_df_woe_chi_merge(df, X, flag, confidenceVal=3.841, bin=10, sample_rate=0.03, sample=None, n_jobs=10):
"""
:param df: 传入一个数据集包含需要卡方分箱的变量与正负样本标识(正样本为1,负样本为0)
:param X: 需要卡方分箱的变量(数组)
:param flag: 正负样本标识的名称(字符串)
:param confidenceVal: 置信度水平(默认是不进行抽样95%)
:param bin: 最多箱的数目
:param sample_rate: 若某一组里的样本数量比例小于该值,进行合并
:param sample: 为抽样的数目(默认是不进行抽样),因为如果观测值过多运行会较慢
:param n_jobs: 最大的并行任务数
:return:dataframe
"""
df = chi_merge_cut_batch(df, X, flag, confidenceVal, bin, sample_rate, sample, n_jobs)
return cal_woe_batch(df, X, flag, n_jobs=n_jobs)
# 卡方分箱的iv值
def get_df_iv_chi_merge(df, X, flag, confidenceVal=3.841, bin=10, sample_rate=0.03, sample=None, n_jobs=10):
"""
:param df: 传入一个数据集包含需要卡方分箱的变量与正负样本标识(正样本为1,负样本为0)
:param X: 需要卡方分箱的变量(数组)
:param flag: 正负样本标识的名称(字符串)
:param confidenceVal: 置信度水平(默认是不进行抽样95%)
:param bin: 最多箱的数目
:param sample_rate: 若某一组里的样本数量比例小于该值,进行合并
:param sample: 为抽样的数目(默认是不进行抽样),因为如果观测值过多运行会较慢
:param n_jobs: 最大的并行任务数
:return:dataframe
"""
df = chi_merge_cut_batch(df, X, flag, confidenceVal, bin, sample_rate, sample, n_jobs)
return cal_iv_batch(df, X, flag, n_jobs)
# auc
def get_roc_auc_score(y_true, y_pred):
if y_true.nunique() != 2:
return np.nan
else:
return roc_auc_score(y_true, y_pred)
# ks
def get_ks(y_true, y_pred):
fpr, tpr, thre = roc_curve(y_true, y_pred, pos_label=1)
return abs(fpr - tpr).max()
def get_classifier_ks(y_true, y_pred):
fpr, tpr, thre = roc_curve(y_true, y_pred, pos_label=1)
return abs(fpr - tpr).max()
# corr
def get_corr(df):
return df.corr()
# accuracy
def get_accuracy(y_true, y_pred):
return accuracy_score(y_true, y_pred)
# precision
def get_precision(y_true, y_pred):
if len(np.unique(y_true)) == 2:
return precision_score(y_true, y_pred)
else:
return precision_score(y_true, y_pred, average='macro')
# recall
def get_recall(y_true, y_pred):
if len(np.unique(y_true)) == 2:
return recall_score(y_true, y_pred)
else:
return precision_score(y_true, y_pred, average='macro')
# f1
def get_f1(y_true, y_pred):
if len(np.unique(y_true)) == 2:
return f1_score(y_true, y_pred)
else:
return f1_score(y_true, y_pred, average='macro')
def get_vif(X:pd.DataFrame, y:pd.Series):
model = LinearRegression()
model.fit(X,y)
y_pred = model.predict(X)
r2 = r2_score(y,y_pred)
vif = 1 / (1 - r2)
return vif
|
print "Welcome, to the real world, Neo"
def matrix():
print "You take the blue pill, the story ends. You wake up in your bed and believe whatever you want to believe. You take the red pill, you stay in Wonderland, and I show you how deep the rabbit hole goes"
answer = raw_input("Type blue or red").lower()
if answer == "red":
print "Let\'s see how deep the rabit hole goes"
elif answer == "blue":
print "You were never really the one"
else:
print "You didn't pick red or blue! Let's try again"
matrix()
matrix()
#comparators:
# == equal
# != not equal
# < less than
# <= less than or equal to
# > greater than
# >= greater than or equal to
#'and' is not capitalized (and not & as in other languages).
#the below will return false
print 1 == 1 and 2 > 2
#the below will return true
print 1 == 1 and 2 == 2
#'or' replaces || from other languages
#the below will return false
print 100 ** 0.5 >= 50 or 1 == 2
#the below will return true
print 2 ** 3 == 108 % 100 or 'Brian' == 'Bryan'
#'not' is like using !
#the below will return false
print not True
#the below will return true
print not 3 ** 2 + 4 ** 2 != 5 ** 2
#operators are ordered as follows: not, and, or. For example, the below will return true
print True or not False and False
#if, elif, else example with a function:
def my_cool_function(argument):
if argument > 9000:
return "Over 9000! WHAAAA"
elif 1000 < argument < 9000:
return "Easy fight for One Punch Man... errr I mean Vageta"
else:
return "You can't just go around destroying planets cuz you're bored"
print my_cool_function(10000)
print my_cool_function(2000)
print my_cool_function(25)
|
# -*- coding: utf-8 -*-
#import is_pricelist
|
from abc import ABCMeta, abstractmethod
class Sequence(metaclass=ABCMeta):
@abstractmethod
def __len__(self):
"""return the length"""
pass
@abstractmethod
def __getitem__(self, i):
"""return the element at this index i"""
pass
def __contains__(self, val):
"""check if contain this value"""
for i in range(len(self)):
if self[i] == val: return True
return False
def index(self, val):
"""return the leftmost index >= 0 at which val is found,
or raise ValueError"""
for j in range(len(self)):
if self[j] == val: return j
raise ValueError("value not in sequence")
def count(self, val):
"""return the number of elements equal to this value"""
counter = 0
for j in range(len(self)):
if self[j] == val: counter += 1
return counter
|
import numpy as np
a1 = np.array(([1,2,3],
[4,5,6]))
print('matrix a1 dengan ukuran:', a1.shape)
print(a1)
#resize matrix
print("resize matrix a1:")
a1.resize(3,2)
print(a1)
print('matrix a dengan ukuran:',a1.shape) |
def Punctuation(string):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
for x in string.lower():
if x in punctuations:
string = string.replace(x, "")
print(string)
string = input('Enter the string')
Punctuation(string) |
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
List,
)
from wechaty_puppet import get_logger
if TYPE_CHECKING:
from .tag import Tag
log = get_logger('Favorite')
# pylint: disable=R
class Favorite:
"""
favorite object which handle the url_link content
"""
def __init__(self, favorite_id: str):
self.favorite_id = favorite_id
def get_id(self) -> str:
"""
get favorite_id
:return:
"""
log.info('get_id() <%s>', self)
return self.favorite_id
async def tags(self) -> List[Tag]:
"""
get favorite tags
"""
# TODO -> favorite tags
return []
async def find_all(self) -> Any:
"""
get all favorite tags
"""
# TODO -> find_all
return
|
import sys
import urllib2
import urllib
import json
url = 'http://localhost:8083/parse?'
for line in sys.stdin:
sent = line.strip()
data = urllib.urlencode(dict(sent=sent))
resp = urllib.urlopen(url + data)
results = json.loads(resp.read())
for x in results['result']:
score = x['score']
answers = x['answers']
query = x['query']
for a in answers:
print '%s\t%s\t%s\t%s' % (sent, a, score, query)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 22 11:59:36 2014
@author: toli
"""
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMaya as OpenMaya
class DoublerNode(OpenMayaMPx.MPxNode):
kPlugNodeId = OpenMaya.MTypeId(0x00047251)
aInputA = OpenMaya.MObject();
aInputB = OpenMaya.MObject();
aOutput = OpenMaya.MObject();
aPercent = OpenMaya.MObject();
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def compute(self, plug, data):
if plug != DoublerNode.aOutput:
return OpenMaya.MStatus.kUnknownParameter
worldMatrixA = data.inputValue(DoublerNode.aInputA).asMatrix()
worldMatrixB = data.inputValue(DoublerNode.aInputB).asMatrix()
multi = data.inputValue(DoublerNode.aPercent).asFloat()
#MTransformationMatrix
mTMA = OpenMaya.MTransformationMatrix(worldMatrixA)
mTMB = OpenMaya.MTransformationMatrix(worldMatrixB)
#getting the translation datka from the world matrix
transA = mTMA.getTranslation ( OpenMaya.MSpace.kTransform )
transB = mTMB.getTranslation ( OpenMaya.MSpace.kTransform )
#setting the output
hOutput = data.outputValue(DoublerNode.aOutput)
resultTrans = OpenMaya.MFloatVector((transA.x +transB.x)*multi,
(transA.y +transB.y)*multi,
(transA.z +transB.z)*multi
)
hOutput.setMfFloatVector(resultTrans)
data.setClean(plug)
return OpenMaya.MStatus.kStatus
def creator():
return OpenMayaMPx.asMPxPtr(DoublerNode())
#define new attributes here
def initialize():
nAttr = OpenMaya.MFnNumericAttribute()
nMAttr = OpenMaya.MFnMatrixAttribute()
DoublerNode.aPercent = nAttr.create('percent', 'per', OpenMaya.MFnNumericData.kFloat, 0.5)
nMAttr.setWritable(True)
nMAttr.setSortable(True)
nMAttr.setReadable(True)
nAttr.setKeyable(True)
DoublerNode.aInpuitB = nMAttr.create('inMatrixB', 'inB', OpenMaya.MFnMatrixAttribute.kDouble)
nMAttr.setWritable(True)
nMAttr.setSortable(True)
nMAttr.setReadable(True)
nAttr.setKeyable(True)
DoublerNode.aInputA = nMAttr.create('inMatrixA', 'inA', OpenMaya.MFnMatrixAttribute.kDouble)
nMAttr.setWritable(True)
nMAttr.setSortable(True)
nMAttr.setReadable(True)
nAttr.setKeyable(True)
DoublerNode.aOutput = nAttr.createPoint('outputTranslate', 'ot')
nMAttr.setWritable(True)
nMAttr.setSortable(True)
nMAttr.setReadable(True)
DoublerNode.addAttribute(DoublerNode.aPercent)
DoublerNode.addAttribute(DoublerNode.aOutput)
DoublerNode.addAttribute(DoublerNode.aInputB)
DoublerNode.addAttribute(DoublerNode.aInputA)
DoublerNode.attributeAffects(DoublerNode.aPercent, DoublerNode.aOutput)
DoublerNode.attributeAffects(DoublerNode.aInputB, DoublerNode.aInputA)
DoublerNode.attributeAffects(DoublerNode.aInputA, DoublerNode.aInputB)
def initializePlugin(obj):
plugin = OpenMayaMPx.MPnPlugin(obj, 'Asim', '1.0', 'Any')
try:
plugin.registerNode()
except:
raise RuntimeError, 'Failed to register node'
|
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
# TODO(wickman)
#
# 1. open(foo) should always be done in a with context.
#
# 2. if you see acquire/release on the same variable in a particular ast
# body, warn about context manager use.
import ast
from ..common import CheckstylePlugin
class MissingContextManager(CheckstylePlugin):
"""Recommend the use of contextmanagers when it seems appropriate."""
def nits(self):
with_contexts = set(self.iter_ast_types(ast.With))
with_context_calls = set(node.context_expr for node in with_contexts
if isinstance(node.context_expr, ast.Call))
for call in self.iter_ast_types(ast.Call):
if isinstance(call.func, ast.Name) and call.func.id == 'open' and (
call not in with_context_calls):
yield self.warning('T802', 'open() calls should be made within a contextmanager.', call)
|
from django.apps import AppConfig
class EducacaoConfig(AppConfig):
name = 'educacao'
|
# Crie um algoritmo que leia um número e mostre o seu dobro, triplo e raiz quadrada.
n = int(input('Digite um número: '))
print('O dobro de {} vale {}.'.format(n, n * 2))
print('O triplo de {} vale {}.'.format(n, n * 3))
print('A raiz quadrada de {} é {:.2f}'.format(n, n ** (1 / 2)))
|
# apply stack DS
import ch1_Stack.stack as stack
string = "gninraeL nIdekniL htiw tol a nraeL"
reversed_string = ""
s = stack.stack()
for c in string:
s.push(c)
while not s.is_empty():
reversed_string += s.pop()
print(reversed_string) |
from django.conf.urls import url
from . import views
from . import cart
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include, url
from django.contrib import admin
from orders.views import Home, success, failure
app_name = 'orders'
urlpatterns = [
#url(r'^make_purchase/(?P<model_id>\d+)/(?P<model_slug>[-\w]+)/$', views.order_details, name='user_cart'),
url(r'^net_amount/$', cart.Cart.get_total_price, name='total_price'),
url(r'^orders/$', views.cart_detail, name='cart_detail'),
url(r'^make_purchase/add/(?P<model_id>\d+)/(?P<model_slug>[-\w]+)/$', views.cart_add, name='cart_add'),
url(r'^remove/(?P<model_id>\d+)/$', views.cart_remove, name='cart_remove'),
url(r'^delivery/apply/(?P<model_id>\d+)/(?P<delivery_value>[-\w]+)/$', views.delivery_charge, name='delivery_charge'),
#-----------------------Payments urls begin----------------------------#
#url(r'^admin/', include(admin.site.urls)),
url(r'^redirect_to_payment/', Home),
url(r'^success/', success),
url(r'^failure/', failure),
# -----------------------Payments urls ends----------------------------#
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,HttpResponse
from rest_framework.generics import RetrieveUpdateDestroyAPIView,CreateAPIView,ListAPIView,ListCreateAPIView
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from django.utils.datastructures import MultiValueDictKeyError
from appointments.models import Appointment
from .models import Cvxcodes,Icd10problemcodes,Lioniccode,Cptcodes,Icd10pcscodes,Icdsymtomscodes,Hspccodes,Medicalcodesfordrug
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
#from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from .serializers import *
from .models import *
import jwt,json
from rest_framework import status
from rest_framework import views
from rest_framework.response import Response
from django.http import JsonResponse
#from usermanagement.customerpermissionsehr import Isauthorizedonpatientsdata
from django.core.exceptions import ValidationError
from django.http import Http404
from django.shortcuts import get_object_or_404 as _get_object_or_404
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
#rom usermanagement.models import Patientpersonalinfo,Patientcontactinfo
from rest_framework.parsers import MultiPartParser, FormParser,FileUploadParser
from pharmacy.models import Medicine
from django.db.models import OuterRef, Subquery
# Create your views here.
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404
# class Ehrlistview(ListAPIView):
class Cutomecrateview(ListCreateAPIView):
# authentication_classes = (JSONWebTokenAuthentication,)
#permission_classes = (Isauthorizedonpatientsdata,)
def create(self, request, *args, **kwargs):
#print "kkkkkk",request.data
# #print validated_data
# user_type=Usertypes.objects.get(days=2)
# request.data['user_type']=user_type
# request.data['orgnastion']=Userorganisation.objects.create(orgnastion=self.validate_orgnastion(request.data['orgnastion']))
#request.data["orgnastion"] = request.user.orgnastion.id
#request.data["user"] = request.user.id
newdata = request.data
serializer = self.get_serializer(data=newdata)
# newdata = request.data
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
# responedata={"Evaluvation parameters":serializer.data,"risk scores":b}
# #print headers
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class Createvitalview(Cutomecrateview):
# permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = vitalsserializer
queryset = vitals.objects.all()
class Createallergiesview(Cutomecrateview):
# permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Allerirsserializer
queryset = vitals.objects.all()
class CreateSocialhistoryview(Cutomecrateview):
# permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Socialhistoryserializer
queryset = Socialhistory.objects.all()
class CreateFamilyhistoryview(Cutomecrateview):
# permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Familyhistoryserializer
queryset = Familyhistory.objects.all()
class CreateHealthhistoryview(Cutomecrateview):
serializer_class = Healthhistoryserializer
queryset = Healthhistory.objects.all()
class CreatePrescriptionview(Cutomecrateview):
serializer_class = Prescriptionserializer
queryset = Prescription.objects.all()
class CreateInpatientdetailsview(Cutomecrateview):
serializer_class = Inpatientdetailsserializer
queryset = Inpatientdetails.objects.all()
class Createtestsresultsview(Cutomecrateview):
serializer_class = testsresultsserializer
queryset = testsresults.objects.all()
class Createvisitreasonview(Cutomecrateview):
serializer_class = Visitresonserializer
queryset = Visitreson.objects.all()
class Createattachfileview(Cutomecrateview):
parser_classes = (MultiPartParser,FileUploadParser,)
serializer_class = Reportfilesserializer
queryset = Reportfiles.objects.all()
class Createproceduresview(Cutomecrateview):
serializer_class = Procedureserializer
queryset = Procedure.objects.all()
class Createvaccineview(Cutomecrateview):
serializer_class = Vaccinesserializer
queryset = Vaccines.objects.all()
class Createderivativesview(Cutomecrateview):
serializer_class = Prescriptionserializer
queryset = Prescription.objects.all()
class Createnoteview(Cutomecrateview):
serializer_class = Doctornoteserializer
queryset = Doctornote.objects.all()
class Creategoalview(Cutomecrateview):
serializer_class = Goalsserializer
queryset = Goals.objects.all()
class Createsymtomsview(Cutomecrateview):
serializer_class = Symtomesserializer
queryset = Symtomes.objects.all()
class Createrefferalview(Cutomecrateview):
serializer_class = Referallsserializer
queryset = Referalls.objects.all()
class Createstatusview(Cutomecrateview):
serializer_class = PatientStatusserializer
queryset = PatientStatus.objects.all()
class Createproblemsview(Cutomecrateview):
serializer_class = Patientproblemsserializer
queryset = Patientproblems.objects.all()
class Createalertview(Cutomecrateview):
serializer_class = PatientAllertserializer
queryset = Alert.objects.all()
class Createlabtestdetailsview(Cutomecrateview):
serializer_class = Inpatientdetailsserializer
queryset = vitals.objects.all()
class CreateAmendmentsview(Cutomecrateview):
serializer_class = Amendmentsserializer
queryset = Amendments.objects.all()
class CreateAdvancederivativeview(Cutomecrateview):
serializer_class = Advancederivativesserializer
queryset = Advancederivatives.objects.all()
#Get methods fromhere om
class Customelistapiview(ListAPIView):
lookup_field = 'appoint_id'
# permission_classes = (Isauthorizedonpatientsdata,)
def get_queryset(self):
patient_id = self.kwargs['appoint_id']
queryset = self.queryset.filter(appointment_id=patient_id)
return queryset
class Customevitallistapiview(ListAPIView):
lookup_field = 'patient_id'
#permission_classes = (Isauthorizedonpatientsdata,)
def get_queryset(self):
patient_id = self.kwargs['patient_id']
queryset = self.queryset.filter(patient_id=patient_id)
queryset = queryset[::-1]
queryset =queryset[0:1]
return queryset
class Getvitalview(Customevitallistapiview):
# permission_classes = (Isauthorizedonpatientsdata,)
queryset = vitals.objects.all()
serializer_class = vitalsserializer
class Getvitallistview(ListAPIView):
# permission_classes = (Isauthorizedonpatientsdata,)
lookup_field = 'patient_id'
queryset = vitals.objects.all()
serializer_class = vitalsserializer
# permission_classes = (Isauthorizedonpatientsdata,)
def get_queryset(self):
patient_id = self.kwargs['patient_id']
queryset = self.queryset.filter(patient_id=patient_id)
return queryset
class Getallergiesview(Customelistapiview):
# permission_classes = (Isauthorizedonpatientsdata,)
queryset = Allerirs.objects.all()
serializer_class = Allerirsserializer
class GetSocialhistoryview(Customelistapiview):
queryset = Socialhistory.objects.all()
serializer_class = Socialhistoryserializer
class GetFamilyhistoryview(Customelistapiview):
queryset = Familyhistory.objects.all()
serializer_class = Familyhistoryserializer
class GetHealthhistoryview(Customelistapiview):
serializer_class = Healthhistoryserializer
class GetMedicationsview(Customelistapiview):
queryset = Prescription.objects.all()
serializer_class = Prescriptionserializer
class GetInpatientdetailsview(Customelistapiview):
queryset = Inpatientdetails.objects.all()
serializer_class = Inpatientdetailsserializer
class Gettestsresultsview1(ListAPIView):
queryset = testsresults.objects.all()
serializer_class = testsresultsserializer1
def get_queryset(self):
#patient_id = self.kwargs['patient_id]
#queryset = self.queryset.all().distinct()
all_rows = testsresults.objects.all()
# Query against the full list to return a list of objects
item_list = [all_rows.filter(patient_id=item['patient_id']).last() for item in
testsresults.objects.values('patient_id').distinct()]
return item_list
# Fetch all rows to limit num of queries
class Gettestsresultsview(ListAPIView):
lookup_field ="patient_id"
queryset = testsresults.objects.all()
serializer_class = testsresultsserializer1
# def get(self, request, *args, **kwargs):
# print(self.lookup_field,'sfdfsd')
# print(self.kwargs.get(),'ghfghfgh')
#
# return self.list(request, *args, **kwargs)
def get_queryset(self):
try:
if self.request.GET["appointment"]:
objdic = Appointment.objects.get(id =self.request.GET["appointment"])
except MultiValueDictKeyError:
doctor = self.request.user.id
print("SDsdsddss")
objdic = Appointment.objects.filter(patient=self.kwargs[self.lookup_field], doctor=doctor).last()
obj = self.queryset.filter(appointment=objdic.id)
return obj
class Getvisitreasonview(Customelistapiview):
queryset = Visitreson.objects.all()
serializer_class = Visitresonserializer
class Getproceduresview(Customelistapiview):
queryset = Procedure.objects.all()
serializer_class = Procedureserializer
class Getvaccinesview(Customelistapiview):
queryset = Vaccines.objects.all()
serializer_class = Vaccinesserializer
class getmedicationview(Customelistapiview):
serializer_class = getmedicationsserializer
queryset = Prescription.objects.all()
class Getnoteview(Customelistapiview):
queryset =Doctornote.objects.all()
serializer_class = Doctornoteserializer
class Getgoalview(Customelistapiview):
queryset = Goals.objects.all()
serializer_class = Goalsserializer
class Getsymtomsview(Customelistapiview):
queryset = Symtomes.objects.all()
serializer_class = Symtomesserializer
class Getrefferalview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset =Referalls.objects.all()
serializer_class = Referallsserializer
class Getattachfileview(Customelistapiview):
queryset = Reportfiles.objects.all()
serializer_class = Reportfilesserializer
class Getlabtestdetailsview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Inpatientdetailsserializer
class Getstatusview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset =PatientStatus.objects.all()
serializer_class = PatientStatusserializer
class Getproblemssview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset =Patientproblems.objects.all()
serializer_class = Patientproblemsserializer
class Getalertview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset =PatientAllert.objects.all()
serializer_class = PatientAllertserializer
class Getderivativesview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Prescriptionserializer
class GetAmendmentsview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Amendments.objects.all()
serializer_class = Amendmentsserializer
class GetAdvancederivativesview(Customelistapiview):
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Advancederivatives.objects.all()
serializer_class = Advancederivativesserializer
# class CreateHealthhistoryview(CreateAPIView):
# from here on we have update and delete views
class CustomeRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView):
# authentication_classes = (JSONWebTokenAuthentication,)
#permission_classes = (Isauthorizedonpatientsdata,)
def get_object(self):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
# #print lookup_url_kwarg,"lookup_url_kwarg"
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
# #print filter_kwargs,"filter_kwargs"
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class RUDvitalview(CustomeRetrieveUpdateDestroyAPIView):
# authentication_classes = (BasicAuthentication,)
lookup_field = 'id'
queryset = vitals.objects.all()
serializer_class = vitalsserializer
class RUDallergiesview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Allerirs.objects.all()
serializer_class = Allerirsserializer
class RUDSocialhistoryview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Socialhistory.objects.all()
serializer_class = Socialhistoryserializer
class RUDFamilyhistoryview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Familyhistory.objects.all()
serializer_class = Familyhistoryserializer
class RUDHealthhistoryview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Healthhistory.objects.all()
serializer_class = Healthhistoryserializer
class RUDproblemssview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Patientproblems.objects.all()
serializer_class = Patientproblemsserializer
class RUDMedicationsview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Prescription.objects.all()
serializer_class = Prescriptionserializer
class RUDInpatientdetailsview(CustomeRetrieveUpdateDestroyAPIView):
# authentication_classes = (JSONWebTokenAuthentication, )
lookup_field = 'id'
queryset = Inpatientdetails.objects.all()
serializer_class = Inpatientdetailsserializer
class RUDtestsresultsview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = testsresults.objects.all()
serializer_class = testsresultsserializer
class RUDvisitreasonview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Visitreson.objects.all()
serializer_class = Visitresonserializer
class RUDproceduresview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Procedure.objects.all()
serializer_class = Procedureserializer
class RUDvaccineview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Vaccines.objects.all()
serializer_class = Vaccinesserializer
class RUDnoteview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Doctornote.objects.all()
serializer_class = Doctornoteserializer
class RUDgoalview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Goals.objects.all()
serializer_class = Goalsserializer
class RUDsymtomsview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Symtomes.objects.all()
serializer_class = Symtomesserializer
class RUDrefferalview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Referalls.objects.all()
serializer_class = Referallsserializer
class RUDattachfileview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = Reportfiles.objects.all()
serializer_class = Reportfilesserializer
class RUDlabtestdetailsview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = testsresults.objects.all()
serializer_class = Inpatientdetailsserializer
class RUDstatusview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = PatientStatus.objects.all()
serializer_class = PatientStatusserializer
class RUDalertview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
#permission_classes = (Isauthorizedonpatientsdata,)
queryset = PatientAllert.objects.all()
serializer_class = PatientAllertserializer
class RUDderivativesview(CustomeRetrieveUpdateDestroyAPIView):
#permission_classes = (Isauthorizedonpatientsdata,)
serializer_class = Prescriptionserializer
class RUDAmendmentsview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Amendments.objects.all()
serializer_class = Amendmentsserializer
class RUDAdvancederivativesview(CustomeRetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Advancederivatives.objects.all()
serializer_class = Advancederivativesserializer
def homepage(request,patient_id):
return render(request,'extend_ehr.html' ,{"patient_id":patient_id})
def ehrpatient(request):
return render(request, 'extend_ehrpatient.html')
def manageuser(request):
return render(request, 'extend_manageuser.html',{"orgnastion_id":request.user.orgnastion_id})
def patientpart(request,patient_id):
return render(request, 'extend_patientparts.html',{"patient_id":patient_id})
def userprofile(request,user_id):
return render(request, 'extend_userprofile.html',{"user_id":user_id})
def vitalsummaryview(request,pat_id):
patient = Patient.objects.get(pat_id=pat_id)
return render(request, 'vital-summary.html',{"patient":pat_id,"UHID":patient.UHID})
from django.core.mail import get_connection, send_mail
from django.core.mail.message import EmailMessage
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from django.utils.html import strip_tags
class Referalview(APIView):
def post(self, request, *args, **kwargs):
#print request.data,self.request.data.get("patient")
# patientdata=Patientpersonalinfo.objects.get(id=self.request.data.get("patient"))
subject, from_email, to = 'Patient referal', 'mailauthentication@cygengroup.com', self.request.data.get("doctoremail")
html_content = render_to_string('email.html', {'user': request.user,"referedto":request.data,"client":patientdata})
text_content =strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
send_mail(
"sample test results email",
"sent the email but content is missing",
'mailauthentication@cygengroup.com',
[ self.request.data],
fail_silently=True
)
return Response({"sucess":"email sent"}, status=status.HTTP_201_CREATED)
class Testshaeview(APIView):
def post(self, request, *args, **kwargs):
#print request.data,self.request.data.get("patient_id")
#patientdata=Patientpersonalinfo.objects.get(id=self.request.data.get("patient_id"))
subject, from_email, to = 'test referal', 'mailauthentication@cygengroup.com', self.request.data.get("email")
html_content = render_to_string('lab.html', {'user': request.user,"referedto":request.data,"client":patientdata})
text_content =strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
send_mail(
"sample test results email",
"sent the email but content is missing",
'mailauthentication@cygengroup.com',
[ self.request.data],
fail_silently=True
)
return Response({"sucess":"email sent"}, status=status.HTTP_201_CREATED)
class Pharmacyview(APIView):
def post(self, request, *args, **kwargs):
#print request.data,self.request.data.get("patient_id")
#patientdata=Patientpersonalinfo.objects.get(id=self.request.data.get("patient_id"))
subject, from_email, to = 'test referal', 'mailauthentication@cygengroup.com', self.request.data.get("doctoremail")
html_content = render_to_string('pharmacy.html', {'user': request.user,"referedto":request.data,"client":patientdata})
text_content =strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
send_mail(
"sample test results email",
"sent the email but content is missing",
'mailauthentication@cygengroup.com',
[ self.request.data],
fail_silently=True
)
return Response({"sucess":"email sent"}, status=status.HTTP_201_CREATED)
@csrf_exempt
def Cvxcodesview(request):
data=list(Cvxcodes.objects.all().values())
# data = serializers.serialize('json', qs)
# #print data
return JsonResponse({"data":data})
def Systomsview(request):
data=list(Icdsymtomscodes.objects.all().values())
return JsonResponse({"data":data})
def Problemview(request):
# k =request.GET['next']
key = request.GET['searchText']
#print key
# if key is None:
# return render(request,'ehr_physican.html',{"data":None})
# else:
# data=list(Icd10problemcodes.objects.filter().values())[:800]
data=list(Icd10problemcodes.objects.filter(description__icontains=key).values()| Icd10problemcodes.objects.filter(code__icontains=key).values())[:50]
#print data
return JsonResponse({"data":data})
# return render(request,'ehr_physican.html',{"data":data})
def Testorderview(request):
key = request.GET['searchText']
# data=list(Lioniccode.objects.all().values())
data=list(LabAssigntest2.objects.filter(description__icontains=key).values()| LabAssigntest2.objects.filter(codes__icontains=key).values())[:20]
return JsonResponse({"data":data})
def icd10Procedureview(request):
key = request.GET['searchText']
# data=list(Icd10pcscodes.objects.all().values())
data=list(Icd10pcscodes.objects.filter(description__icontains=key).values()| Icd10pcscodes.objects.filter(code__icontains=key).values())[:20]
return JsonResponse({"data":data})
def CptProcedureview(request):
key = request.GET['searchText']
#data=list(Cptcodes.objects.all().values())
data=list(Cptcodes.objects.filter(description__icontains=key).values()| Cptcodes.objects.filter(coodes__icontains=key).values())[:20]
return JsonResponse({"data":data})
def HspcsProcedureview(request):
key = request.GET['searchText']
#data=list(Hspccodes.objects.all().values())
data=list(Hspccodes.objects.filter(description__icontains=key).values()| Hspccodes.objects.filter(code__icontains=key).values())[:20]
return JsonResponse({"data":data})
def medicineview(request):
key = request.GET['searchText']
data=list(Medicine.objects.filter(id__icontains=key).values()| Medicine.objects.filter(name__icontains=key).values())[:20]
# data=list(Medicalcodesfordrug.objects.all().values())
return JsonResponse({"data":data})
|
a = 37
print('num a convertir:', a)
global i
i=0
lst = ''
base = 2
def printWithPower(var, val):
global i,base
if val != 0:
print(var,'x '+str(int(base))+'^'+str(i), ' = ',int(val))
else:
print(var,'x '+str(int(base))+'^'+str(i))
pass
i=i+1
pass
while a > 0:
r=0
if a%base != 0:
r= a%base
a = a-r
pass
else:
pass
a=a/base
printWithPower(int(r),a)
lst += str(int(r))
pass
tsl = reversed(lst)
print('\nTermino la division papa')
print('resutado:', list(tsl)) |
from game.scrabble_box import Rulebook
from game.scrabble_players import ComputerPlayer
from unittest import TestCase
import os
import string
# Move up to the parent directory so that we can access the correct ground files.
os.chdir("../..")
rb = Rulebook()
class TestComputerPlayer(TestCase):
def test_find_words(self):
player = ComputerPlayer(id=1, rulebook=rb, init_tiles=['A', 'P', 'P', 'L', 'E', '?', 'Z'], name='test1')
"""
Tests using player's tiles
"""
# Test basic anagram of tiles
found_words = set(player.find_words())
self.assertTrue('APPLE' in found_words and 'APE' in found_words and 'PLEA' in found_words)
# Test that blank words are being created appropriately.
found_words = set(player.find_words())
self.assertTrue('APPLEs' in found_words)
"""
Tests using tiles on board, and with meeting the restrictions of given move parameters.
"""
# Test using mandated tiles on the board
found_words = set(player.find_words(fixed_tiles=[('M', 0)]))
self.assertTrue('MAPLE' in found_words and 'MAZE' in found_words and 'APPLE' not in found_words)
# Assert empty sets are returned when confronted with impossible demands
found_words = set(player.find_words(fixed_tiles=[('M', 0), ('Z', 1)]))
self.assertEqual(found_words, set([]))
"""
Check that correct words are being generated with the tighter constraints of minimum and maximum word
lengths.
"""
found_words = set(player.find_words(min_length=4, max_length=9, fixed_tiles=[('D', 4), ('S', 5)]))
self.assertTrue('PLEADS' in found_words)
self.assertTrue('SPA' not in found_words)
"""
Check that all words meet the minimum length for move legality, and that for the words which surpass that
length have the required tiles in the correct positions.
(Some of the words found here will be invalid, but that's checked at the next position)
"""
self.assertTrue(all([
(len(word) == 5 and word[4] == 'D') or
(len(word) == 6 and word[4] == 'D' and word[5] == 'S')
for word in found_words
]))
"""
Similar check, but with the starting words.
"""
found_words = set(player.find_words(min_length=1, max_length=8, fixed_tiles=[('S', 0)]))
self.assertTrue('SAP' in found_words)
self.assertTrue('SPAcE' in found_words)
self.assertTrue(all([
word[0] == 'S'
for word in found_words
]))
"""
Check that if a word terminates one tile behind a required tile, that word is not considered valid.
"""
found_words = set(player.find_words(min_length=1, max_length=8, fixed_tiles=[('S', 4)]))
self.assertTrue('PLEA' not in found_words and 'PLEAS' in found_words)
def test_get_move_params(self):
player = ComputerPlayer(id=1, rulebook=rb, init_tiles=['A', 'P', 'P', 'L', 'E', '?', 'Z'], name='test1')
board_state = [' '*15 for _ in range(15)]
"""
We'll place an S tile in the center of the board. This is an illegal board position, but we'll overlook that
for testing as it makes for a valuable test case.
"""
board_state[7] = ' '*7 + 'S' + ' '*7
"""
We check that it's discovered that at least two tiles must be played in order to meet legality, and that the
third tile in the eventual word must be S if we start from the provided coordinates.
"""
move_param = player.get_move_params((7, 5), 'R', board_state)
self.assertEqual(move_param, (2, [('S', 2)]))
"""
We now check that if we are starting our move from this center tile with an S on it, it is correctly labeled
as the first letter of the board, this first tile asserts the game validity, and the maximum length of any
played word is 8 tiles.
"""
move_param = player.get_move_params((7, 7), 'R', board_state)
self.assertEqual(move_param, (1, [('S', 0)]))
"""
Check that if the board letters trail the seven played tiles, we correctly assert not only the validity of this
move, but that the maximum length of a word resulting from this move is 8 tiles.
"""
move_param = player.get_move_params((7, 0), 'R', board_state)
self.assertEqual(move_param, (7, [('S', 7)]))
"""
Having asserted that this works for single letters, we'll provide and additional check for longer strings.
"""
board_state[7] = ' ' + ''.join([string.ascii_uppercase[i] for i in range(14)])
assert(len(board_state[7]) == 15)
presumed_fixed = [(letter, index+1) for index, letter in enumerate(string.ascii_uppercase[:14])]
move_param = player.get_move_params((7, 0), 'R', board_state)
self.assertEqual(move_param, (1, presumed_fixed))
"""
We also check for interspaced tiles, such as a board with " X A B ...".
"""
board_state[7] = ' ' + ''.join([string.ascii_uppercase[i] if i%2 == 0 else ' ' for i in range(14)])
presumed_fixed = [(letter, index+1) for index, letter in enumerate(string.ascii_uppercase[:14])
if index % 2 == 0]
move_param = player.get_move_params((7, 0), 'R', board_state)
self.assertEqual(move_param, (1, presumed_fixed))
"""
Check that words neighboring, but not intersecting, existing words are treated as valid.
"""
board_state[7] = ' '*7 + 'S' + ' '*7
move_param = player.get_move_params((6, 7), 'R', board_state)
self.assertEqual(move_param, (1, []))
"""
Lastly, check for both cases where early legality is determined by a move neighboring an existing tile,
but also saves the correct information regarding maximum length and fixed tiles due to later intersectionality.
"""
board_state[7] = ' '*7 + 'S' + ' '*7
board_state[6] = ' '*13 + 'NG'
move_param = player.get_move_params((6, 7), 'R', board_state)
self.assertEqual(move_param, (1, [('N', 6), ('G', 7)]))
|
from django.urls import path
from rtmeli.accounts.views import (
login_view,
logout_view,
authorize_view,
signout_view
)
app_name = "accounts"
urlpatterns = [
path("login/", view=login_view, name="login"),
path("logout/", view=logout_view, name="logout"),
path("signout/", view=signout_view, name="signout"),
path("authorize/", view=authorize_view, name="authorize"),
]
|
from django.urls import path
from .views import (
ProductFilterListAPIView,
ProductImageListAPIView,
all_product,
ProductMarkaListAPIView,
SearchListAPIView,
)
app_name = 'product_apis'
urlpatterns = [
path('filter-api-product/', ProductFilterListAPIView.as_view(), name='filter_api_product'),
# path('filter-api-product-images/', ProductImageListAPIView.as_view(), name='filter_api_product_images'),
# path('filter-api-product-markas/', ProductMarkaListAPIView.as_view(), name='filter_api_product_markas'),
# path('products/<str:value>/', all_product, name='all_product'),
# path('search/<str:title>/', SearchListAPIView.as_view(), name='all_product'),
path('search/', SearchListAPIView.as_view(), name='all_product')
]
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from decimal import Decimal
from typing import Literal, Protocol
from typing_extensions import TypedDict
PaymentMethod = Literal['free', 'cc', 'manual']
PaymentState = Literal['open', 'paid', 'failed', 'cancelled']
class PriceDict(TypedDict):
amount: float
currency: str | None
fee: float
credit_card_payment: bool
class FeePolicy(Protocol):
def from_amount(self, __amount: Decimal | float) -> Decimal | float:
...
def compensate(self, __amount: Decimal | float) -> Decimal | float: ...
|
#product dictionary
products = {
"americano":{"name":"Americano","price":150.00},
"brewedcoffee":{"name":"Brewed Coffee","price":110.00},
"cappuccino":{"name":"Cappuccino","price":170.00},
"dalgona":{"name":"Dalgona","price":170.00},
"espresso":{"name":"Espresso","price":140.00},
"frappuccino":{"name":"Frappuccino","price":170.00},
}
#problem 1
def get_product(code):
return products[code]
#problem 2
def get_property(code,property):
return products[code][property]
#problem 3
def main():
i=0
raw_orders = []
while True:
clerk = input("Input customer's orders: ")
if clerk =="/":
break
else:
order_list = clerk.split(",")
raw_orders += [order_list]
i+=1
a_qty=0
b_qty=0
c_qty=0
d_qty=0
e_qty=0
f_qty=0
total =0
for j in raw_orders:
order_code = j[0]
order_qty = j[1]
details = get_product(order_code)
price = get_property(order_code,"price")
name = details["name"]
if name == "Americano":
a_qty += int(order_qty)
a_subtotal = str(price * a_qty)
elif name == "Brewed Coffee":
b_qty += int(order_qty)
b_subtotal = str(price * b_qty)
elif name == "Cappuccino":
c_qty += int(order_qty)
c_subtotal = str(price * c_qty)
elif name == "Dalgona":
d_qty += int(order_qty)
d_subtotal = str(price * d_qty)
elif name == "Espresso":
e_qty += int(order_qty)
e_subtotal = str(price * e_qty)
elif name == "Frappuccino":
f_qty += int(order_qty)
f_subtotal = str(price * f_qty)
with open("receipt.txt","w") as f:
f.write("CODE\t\t\t\t\t\t\tNAME\t\t\t\t\t\tQUANTITY\t\t\t\t\tSUBTOTAL")
with open("receipt.txt","a+") as f:
if a_qty>0:
f.write("\namericano\t\t\t\t\tAmericano\t\t\t\t"+str(a_qty)+"\t\t\t\t\t\t\t\t\t"+a_subtotal)
total+=float(a_subtotal)
if b_qty>0:
f.write("\nbrewedcoffee\t\t\tBrewed Coffee\t\t"+str(b_qty)+"\t\t\t\t\t\t\t\t\t"+b_subtotal)
total+=float(b_subtotal)
if c_qty>0:
f.write("\ncappuccino\t\t\t\tCappuccino\t\t\t"+str(c_qty)+"\t\t\t\t\t\t\t\t\t"+c_subtotal)
total+=float(c_subtotal)
if d_qty>0:
f.write("\ndalgona\t\t\t\t\t\tDalgona\t\t\t\t\t"+str(d_qty)+"\t\t\t\t\t\t\t\t\t"+d_subtotal)
total+=float(d_subtotal)
if e_qty>0:
f.write("\nespresso\t\t\t\t\tEspresso\t\t\t\t"+str(e_qty)+"\t\t\t\t\t\t\t\t\t"+e_subtotal)
total+=float(e_subtotal)
if f_qty>0:
f.write("\nfrappuccino\t\t\t\tFrappuccino\t\t\t"+str(f_qty)+"\t\t\t\t\t\t\t\t\t"+f_subtotal)
total+=float(f_subtotal)
f.write("\n")
f.write("\nTotal:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"+str(total))
main()
|
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import argparse
from preprocess import rotate
from utils import for_each_sample, isCancerSample
# Globals
rotated_num = 0
def handle_sample(sample, patient, patientDir):
global rotated_num
if isCancerSample(sample) and not "R" in sample:
print("Rotating: {} {}".format(patient, sample))
rotated_num += 1
sampleArray = np.load(os.path.join(patientDir, sample))
for deg in [90, 180, 270]:
rotated = rotate(sampleArray, deg)
fileName = "R{}-".format(deg).join(sample.split("-"))
np.save(os.path.join(patientDir, fileName), rotated)
def main():
parser = argparse.ArgumentParser(description="Rotate images.")
parser.add_argument(
"relativePath",
type=str,
default=os.path.join("data", "preprocessed"),
nargs="?",
help="Relative path to proprocessed data",
)
args = parser.parse_args()
for_each_sample(args.relativePath, handle_sample)
print("Rotated: {}".format(rotated_num))
if __name__ == "__main__":
main()
|
import os
import matplotlib
if os.name == 'posix':
matplotlib.use('Qt4Agg') # Force Mac users to use this backend
import tkinter as tk
from tkinter import ttk
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import __version__ as pltVersion
from matplotlib.patches import Rectangle
from astropy.io import fits
import warnings
import time
import csv
from spectrum import *
import pdb
from gui_utils import *
class Eyecheck(object):
def __init__(self, specObj, options):
# *** Store input variables ***
self.specObj = specObj # The Spectrum object created in the pyhammer script
self.options = options # The list of options input by the user in the pyhammer script
# *** Define useful information ***
self.specType = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L']
self.subType = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.metalType = ['-2.0', '-1.5', '-1.0', '-0.5', '+0.0', '+0.5', '+1.0']
self.templateDir = os.path.join(os.path.split(__file__)[0], 'resources', 'templates')
# *** Read the infile ***
self.inData = []
with open(self.options['infile'], 'r') as file:
for line in file:
line = line.strip()
if line.find(',') > 0: line = line.replace(',',' ')
self.inData.append(' '.join(line.split()).rsplit(' ',1))
self.inData[-1][0] = os.path.basename(self.inData[-1][0])
self.inData = np.asarray(self.inData)
# *** Read the outfile ***
with open(self.options['outfile'], 'r') as file:
reader = csv.reader(file)
self.outData = np.asarray(list(reader)[1:]) # Ignore the header line
# *** Define user's spectrum ***
self.specIndex = 0 # The index in the inData where we should start from
# Loop through the outData and see if some classification has occured already
for i in range(len(self.outData)):
# If classification by the user has already occured for the
# current spectrum, then move to the next one.
if self.outData[self.specIndex,4] != 'nan' or self.outData[self.specIndex,5] != 'nan':
self.specIndex += 1
else:
# Break out if we can get to a spectrum that hasn't been
# classified by the user yet
break
# If the outfile already has eyecheck results, ask if they want
# to start where they left off
if self.specIndex != 0:
# If every outfile spectra already has an eyecheck result
# they've classified everything and ask them if they want
# to start over instead.
if self.specIndex == i+1:
modal = ModalWindow('Every spectrum in the output file already has\nan eyecheck result. Do you want to start over?')
if modal.choice == 'yes':
self.specIndex = 0
else:
return
else:
modal = ModalWindow('The output file already has eyecheck results. The\n'
'eyecheck will start with the next unclassified\n'
'spectrum. Do you want to start over instead?')
if modal.choice == 'yes':
self.specIndex = 0
# Now use the Spectrum object to read in the user's appropriate starting spectrum
fname = self.outData[self.specIndex,0]
ftype = np.extract(self.inData[:,0] == os.path.basename(fname), self.inData[:,1])[0]
self.specObj.readFile(self.options['spectraPath']+fname, ftype) # Ignore returned values
self.specObj.normalizeFlux()
# *** Define GUI variables ***
self.root = tk.Tk() # Create the root GUI window
# Define String and Int variables to keep track of widget states
self.spectrumEntry = tk.StringVar(value = os.path.basename(self.outData[self.specIndex,0]))
self.specState = tk.IntVar(value = self.specType.index(self.outData[self.specIndex,2][0]))
self.subState = tk.IntVar(value = int(self.outData[self.specIndex,2][1]))
self.metalState = tk.IntVar(value = self.metalType.index(self.outData[self.specIndex,3]))
self.subButtons = [] # We keep track of these radio buttons so
self.metalButtons = [] # they can be enabled and disabled if need be
self.smoothState = tk.BooleanVar(value = False)
self.lockSmooth = tk.BooleanVar(value = False)
self.showTemplateError = tk.BooleanVar(value = True)
self.removeSdssSpikeState = tk.BooleanVar(value = False)
# *** Define figure variables ***
plt.ion() # Turn on plot interactivity
plt.style.use('ggplot') # Makes the plot look nice
self.full_xlim = None # +--
self.full_ylim = None # | Store these to keep track
self.zoomed_xlim = None # | of zoom states on the plot
self.zoomed_ylim = None # |
self.zoomed = False # +--
self.updatePlot() # Create the plot
# Other variables
self.pPressNum = 0
self.pPressTime = 0
# *** Initialize the GUI
self.setupGUI() # Call the method for setting up the GUI layout
self.root.mainloop() # Run the GUI
###
# Setup/Close Methods
#
def areYouSure(self):
"""
Description:
In some cases, if the user wants to quit, we should ask if they're
sure they want to quit before moving on to the _exit function.
"""
modal = ModalWindow('Are you sure you want to quit?', parent = self.root, title = 'Quit')
self.root.wait_window(modal.modalWindow)
if modal.choice == 'yes':
self._exit()
def _exit(self):
"""
Description:
This method is called anytime the user wants to quit the program.
It will be called if the "X" out of the GUI window, if they choose
quit from the menu, or if they finish classifying and say they're
done. This function will first write the self.outData variable
contents to their outfile, then clean up the GUI and matplotlib
window.
"""
# Write the outData to the output file
with open(self.options['outfile'], 'w') as outfile:
outfile.write('#Filename,Radial Velocity (km/s),Guessed Spectral Type,Guessed [Fe/H],User Spectral Type,User [Fe/H]\n')
for i, spectra in enumerate(self.outData):
for j, col in enumerate(spectra):
outfile.write(col)
if j < 5: outfile.write(',')
if i < len(self.outData)-1: outfile.write('\n')
# Close down the GUI and plot window
self.root.destroy()
plt.close('all')
def setupGUI(self):
"""
Description:
This handles setting up all the widgets on the main GUI window and
defining the initial state of the GUI.
"""
# *** Set root window properties ***
self.root.title('PyHammer Eyecheck')
if os.name == 'nt': self.root.iconbitmap(os.path.join(os.path.split(__file__)[0],'resources','sun.ico'))
self.root.resizable(False, False)
self.root.geometry('+100+100')
# Set the close protocol to call this class' personal exit function
self.root.protocol('WM_DELETE_WINDOW', self.areYouSure)
# *** Define menubar ***
menubar = tk.Menu() # Create the overall menubar
# Define the options menu
optionsMenu = tk.Menu(menubar, tearoff = 1)
optionsMenu.add_checkbutton(label = 'Show Template Error', variable = self.showTemplateError, command = self.updatePlot)
optionsMenu.add_checkbutton(label = 'Smooth Spectrum', variable = self.smoothState, command = self.callback_smooth)
optionsMenu.add_checkbutton(label = 'Lock Smooth State', variable = self.lockSmooth)
optionsMenu.add_checkbutton(label = 'Remove SDSS Stitch Spike', variable = self.removeSdssSpikeState, command = self.updatePlot)
optionsMenu.add_separator()
optionsMenu.add_command(label = 'Quit', command = self.areYouSure)
# Define the about menu
helpMenu = tk.Menu(menubar, tearoff = 0)
helpMenu.add_command(label = 'Help', command = self.callback_help)
helpMenu.add_command(label = 'About', command = self.callback_about)
# Put all menus together
menubar.add_cascade(label = 'Options', menu = optionsMenu)
menubar.add_cascade(label = 'Help', menu = helpMenu)
self.root.config(menu = menubar)
# *** Define labels ***
for i, name in enumerate(['Spectrum', 'Type', 'Subtype', '[Fe/H]', 'Switch Type', 'Switch [Fe/H]', 'Spectrum Choices']):
ttk.Label(self.root, text = name).grid(row = i, column = 0, columnspan = 1+(i>3),
stick = 'e', pady = (10*(i==4),10*(i==0)))
# *** Define entry box ***
# This defines the entry box and relevant widgets for indicating the spectrum
ttk.Entry(self.root, textvariable = self.spectrumEntry).grid(row = 0, column = 1, columnspan = 9, pady = (0,10), sticky = 'nesw')
but = ttk.Button(self.root, text = 'Go', width = 3, command = self.jumpToSpectrum)
but.grid(row = 0, column = 10, pady = (0,10))
ToolTip(but, 'Enter a new spectrum file name in the entry box\nand hit Go to skip directly to that spectrum.')
# *** Define radio buttons ***
# First the radio buttons for the spectral type
for ind, spec in enumerate(self.specType):
temp = ttk.Radiobutton(self.root, text = spec, variable = self.specState, value = ind,
command = lambda: self.callback_specRadioChange(True))
temp.grid(row = 1, column = ind+1, sticky = 'nesw')
# Now the sub spectral type radio buttons
for ind, sub in enumerate(self.subType):
self.subButtons.append(ttk.Radiobutton(self.root, text = sub, variable = self.subState, value = ind,
command = lambda: self.callback_subRadioChange(True)))
self.subButtons[-1].grid(row = 2, column = ind+1, sticky = 'nesw')
# Finally the radio buttons for the metallicities
for ind, metal in enumerate(self.metalType):
self.metalButtons.append(ttk.Radiobutton(self.root, text = metal, variable = self.metalState, value = ind,
command = lambda: self.callback_metalRadioChange(True)))
self.metalButtons[-1].grid(row = 3, column = ind+1, sticky = 'nesw')
# *** Define buttons ***
# These will be the buttons for interacting with the data (e.g., smooth it, next, back)
ttk.Button(self.root, text = 'Earlier', command = self.callback_earlier).grid(row = 4, column = 2, columnspan = 4, sticky = 'nesw', pady = (10,0))
ttk.Button(self.root, text = 'Later', command = self.callback_later).grid(row = 4, column = 6, columnspan = 4, sticky = 'nesw', pady = (10,0))
ttk.Button(self.root, text = 'Lower', command = self.callback_lower).grid(row = 5, column = 2, columnspan = 4, sticky = 'nesw')
ttk.Button(self.root, text = 'Higher', command = self.callback_higher).grid(row = 5, column = 6, columnspan = 4, sticky = 'nesw')
ttk.Button(self.root, text = 'Odd', underline = 0, command = self.callback_odd).grid(row = 6, column = 2, columnspan = 2, sticky = 'nesw')
ttk.Button(self.root, text = 'Bad', underline = 0, command = self.callback_bad).grid(row = 6, column = 4, columnspan = 2, sticky = 'nesw')
ttk.Button(self.root, text = 'Back', underline = 3, command = self.callback_back).grid(row = 6, column = 6, columnspan = 2, sticky = 'nesw')
ttk.Button(self.root, text = 'Next', command = self.callback_next).grid(row = 6, column = 8, columnspan = 2, sticky = 'nesw')
# *** Set key bindings ***
self.root.bind('<Control-o>', lambda event: self.callback_odd())
self.root.bind('<Control-b>', lambda event: self.callback_bad())
self.root.bind('<Control-s>', lambda event: self.callback_smooth(toggle = True))
self.root.bind('<Control-e>', lambda event: self.showTemplateError.set(not self.showTemplateError.get()))
self.root.bind('<Control-e>', lambda event: self.updatePlot(), add = '+')
self.root.bind('<Control-l>', lambda event: self.lockSmooth.set(not self.lockSmooth.get()))
self.root.bind('<Control-r>', lambda event: self.removeSdssSpikeState.set(not self.removeSdssSpikeState.get()))
self.root.bind('<Control-r>', lambda event: self.updatePlot(), add = '+')
self.root.bind('<Return>', lambda event: self.callback_next())
self.root.bind('<Control-k>', lambda event: self.callback_back())
self.root.bind('<Left>', lambda event: self.callback_earlier())
self.root.bind('<Right>', lambda event: self.callback_later())
self.root.bind('<Down>', lambda event: self.callback_lower())
self.root.bind('<Up>', lambda event: self.callback_higher())
self.root.bind('<Control-p>', lambda event: self.callback_hammer_time())
# Force the GUI to appear as a top level window, on top of all other windows
self.root.lift()
self.root.call('wm', 'attributes', '.', '-topmost', True)
self.root.after_idle(self.root.call, 'wm', 'attributes', '.', '-topmost', False)
def updatePlot(self):
"""
Description:
This is the method which handles all the plotting on the matplotlib
window. It will plot the template (if it exists), the user's spectrum
and do things like control the zoom level on the plot.
"""
# Before updating the plot, check the current axis limits. If they're
# set to the full limit values, then the plot wasn't zoomed in on when
# they moved to a new plot. If the limits are different, they've zoomed
# in and we should store the current plot limits so we can set them
# to these limits at the end.
if self.full_xlim is not None and self.full_ylim is not None:
if (self.full_xlim == plt.gca().get_xlim() and
self.full_ylim == plt.gca().get_ylim()):
self.zoomed = False
else:
self.zoomed = True
self.zoomed_xlim = plt.gca().get_xlim()
self.zoomed_ylim = plt.gca().get_ylim()
# *** Define Initial Figure ***
fig = plt.figure('Pyhammer Spectrum Matching', figsize = (12,6))
plt.cla() # Clear the plot
if plt.get_current_fig_manager().toolbar._active != 'ZOOM':
# Make it so the zoom button is selected by default
plt.get_current_fig_manager().toolbar.zoom()
# *** Plot the template ***
# Determine which, if any, template file to load
templateFile = self.getTemplateFile()
if templateFile is not None:
# Load in template data
with warnings.catch_warnings():
# Ignore a very particular warning from some versions of astropy.io.fits
# that is a known bug and causes no problems with loading fits data.
warnings.filterwarnings('ignore', message = 'Could not find appropriate MS Visual C Runtime ')
hdulist = fits.open(templateFile)
lam = np.power(10,hdulist[1].data['loglam'][::10])
flux = hdulist[1].data['flux'][::10]
std = hdulist[1].data['std'][::10]
# The templates are all normalized to 8000 Angstroms. The loaded spectrum
# are normalized to this by default as well, but if they're not defined at 8000 Angstroms,
# it is normalized to a different value that the template needs to be normalized to
if self.specObj.normWavelength != 8000:
flux = Spectrum.normalize(lam, self.specObj.normWavelength, flux)
# Plot template error bars and spectrum line
plt.plot(lam, flux, '-k', label = 'Template')
if self.showTemplateError.get(): # Only plot template error if option is selected to do so
plt.fill_between(lam, flux+std, flux-std, color = 'b', edgecolor = 'None', alpha = 0.1, label = 'Template RMS')
# Determine and format the template name for the title, from the filename
templateName = os.path.basename(os.path.splitext(templateFile)[0])
if '_' in templateName:
ii = templateName.find('_')+1 # Index of first underscore, before metallicity
templateName = templateName[:ii] + '[Fe/H] = ' + templateName[ii:]
templateName = templateName.replace('_',',\;')
else:
# No template exists, plot nothing
templateName = 'Not\;Available'
# *** Plot the user's data ***
# Get the flux and fix it as the user requested
if self.smoothState.get() == False:
flux = self.specObj.flux
else:
flux = self.specObj.smoothFlux
if self.removeSdssSpikeState.get() == True:
flux = Spectrum.removeSdssStitchSpike(self.specObj.wavelength, flux)
# Plot it all up and define the title name
plt.plot(self.specObj.wavelength, flux, '-r', alpha = 0.75, label = 'Your Spectrum')
spectraName = os.path.basename(os.path.splitext(self.outData[self.specIndex,0])[0])
# *** Set Plot Labels ***
plt.xlabel(r'$\mathrm{Wavelength\;[\AA]}$', fontsize = 16)
plt.ylabel(r'$\mathrm{Normalized\;Flux}$', fontsize = 16)
plt.title(r'$\mathrm{Template:\;' + templateName + '}$\n$\mathrm{Spectrum:\;' + spectraName.replace('_','\_') + '}$', fontsize = 16)
# *** Set Legend Settings ***
handles, labels = plt.gca().get_legend_handles_labels()
# In matplotlib versions before 1.5, the fill_between plot command above
# does not appear in the legend. In those cases, we will fake it out by
# putting in a fake legend entry to match the fill_between plot.
if pltVersion < '1.5' and self.showTemplateError.get() and templateFile is not None:
labels.append('Template RMS')
handles.append(Rectangle((0,0),0,0, color = 'b', ec = 'None', alpha = 0.1))
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
leg = plt.legend(handles, labels, loc = 0)
leg.get_frame().set_alpha(0)
# Set legend text colors to match plot line colors
if templateFile is not None:
# Don't adjust the template error if it wasn't plotted
if self.showTemplateError.get():
plt.setp(leg.get_texts()[1], color = 'b', alpha = 0.5) # Adjust template error, alpha is higher to make more readable
plt.setp(leg.get_texts()[2], color = 'r', alpha = 0.75) # Adjust spectrum label
else:
plt.setp(leg.get_texts()[1], color = 'r', alpha = 0.75) # Adjust spectrum table
else:
plt.setp(leg.get_texts()[0], color = 'r', alpha = 0.6)
# *** Set Plot Spacing ***
plt.subplots_adjust(left = 0.075, right = 0.975, top = 0.9, bottom = 0.15)
# *** Set Plot Limits ***
plt.xlim([3000,11000]) # Set x axis limits to constant value
fig.canvas.toolbar.update() # Clears out view stack
self.full_xlim = plt.gca().get_xlim() # Pull out default, current x-axis limit
self.full_ylim = plt.gca().get_ylim() # Pull out default, current y-axis limit
fig.canvas.toolbar.push_current() # Push the current full zoom level to the view stack
if self.zoomed: # If the previous plot was zoomed in, we should zoom this too
plt.xlim(self.zoomed_xlim) # Set to the previous plot's zoom level
plt.ylim(self.zoomed_ylim) # Set to the previous plot's zoom level
fig.canvas.toolbar.push_current() # Push the current, zoomed level to the view stack so it shows up first
# *** Draw the Plot ***
plt.draw()
##
# Menu Item Callbacks
#
def callback_help(self):
mainStr = (
'Welcome to the main GUI for spectral typing your spectra.\n\n'
'Each spectra in your spectra list file will be loaded in '
'sequence and shown on top of the template it was matched '
'to. From here, fine tune the spectral type by comparing '
'your spectrum to the templates and choose "Next" when '
"you've landed on the correct choice. Continue through each "
'spectrum until finished.')
buttonStr = (
'Upon opening the Eyecheck program, the first spectrum in your '
'list will be loaded and displayed on top of the template determined '
'by the spectral type guesser.\n\n'
'Use the "Earlier" and "Later" buttons to change the spectrum '
'templates. Note that not all templates exist for all spectral '
'types. This program specifically disallows choosing K8 and K9 '
'spectral types as well.\n\n'
'The "Higher" and "Lower" buttons change the metallicity. Again, '
'not all metallicities exist as templates.\n\n'
'The "Odd" button allows you to mark a spectrum as something other '
'than a standard classification, such as a white dwarf or galaxy.\n\n'
'The "Bad" button simply marks the spectrum as BAD in the output '
'file, indicating it is not able to be classified.\n\n'
'You can cycle between your spectra using the "Back" and "Next" buttons. '
'Note that hitting "Next" will save the currently selected state as '
'the classification for that spectra.')
keyStr = (
'The following keys are mapped to specific actions.\n\n'
'<Left>\tEarlier spectral type button\n'
'<Right>\tLater spectral type button\n'
'<Up>\tHigher metallicity button\n'
'<Down>\tLower metallicity button\n'
'<Enter>\tAccept spectral classification\n'
'<Ctrl-K>\tMove to previous spectrum\n'
'<Ctrl-O>\tClassify spectrum as odd\n'
'<Ctrl-B>\tClassify spectrum as bad\n'
'<Ctrl-E>\tToggle the template error\n'
'<Ctrl-S>\tSmooth/Unsmooth the spectrum\n'
'<Ctrl-L>\tLock the smooth state between spectra\n'
'<Ctrl-R>\tToggle removing the stiching spike in SDSS spectra\n'
'<Ctrl-P>')
tipStr = (
'The following are a set of tips for useful features of the '
'program.\n\n'
'Any zoom applied to the plot is held constant between switching '
'templates. This makes it easy to compare templates around specific '
'features or spectral lines. Hit the home button on the plot '
'to return to the original zoom level.\n\n'
'The entry field on the GUI will display the currently plotted '
'spectrum. You can choose to enter one of the spectra in your list'
'and hit the "Go" button to automatically jump to that spectrum.\n\n'
'The smooth menu option will allow you to smooth or unsmooth '
'your spectra in the event that it is noisy. This simply applies '
'a boxcar convolution across your spectrum, leaving the edges unsmoothed.\n\n'
'By default, every new, loaded spectrum will be unsmoothed and '
'the smooth button state reset. You can choose to keep the smooth '
'button state between loading spectrum by selecting the menu option '
'"Lock Smooth State".\n\n'
'In SDSS spectra, there is a spike that occurs between 5569 and 5588'
'angstroms caused by stitching together the results from both detectors.'
'You can choose to artificially remove this spike for easier viewing by'
'selecting the "Remove SDSS Stitch Spike" from the Options menu.\n\n'
'Some keys may need to be hit rapidly.')
contactStr = (
'Aurora Kesseli\n'
'aurorak@bu.edu')
InfoWindow(('Main', mainStr),
('Buttons', buttonStr),
('Keys', keyStr),
('Tips', tipStr),
('Contact', contactStr), parent = self.root, title = 'PyHammer Help', height = 8)
def callback_about(self):
aboutStr = (
'This project was developed by a select group of graduate students '
'at the Department of Astronomy at Boston University. The project '
'was lead by Aurora Kesseli with development help and advice provided '
'by Andrew West, Mark Veyette, Brandon Harrison, and Dan Feldman. '
'Contributions were further provided by Dylan Morgan and Chris Theissan.\n\n'
'See the acompanying paper Kesseli et al. (2016) for further details.')
InfoWindow(aboutStr, parent = self.root, title = 'PyHammer About')
##
# Button and Key Press Callbacks
#
def callback_odd(self):
# Open an OptionWindow object and wait for a user response
choice = OptionWindow(['Wd', 'Wdm', 'Carbon', 'Gal', 'Unknown'], parent = self.root, title = 'PyHammer', instruction = 'Pick an odd type')
self.root.wait_window(choice.optionWindow)
if choice.name is not None:
# Store the user's response in the outData
self.outData[self.specIndex,4] = choice.name
self.outData[self.specIndex,5] = 'nan'
# Move to the next spectra
self.moveToNextSpectrum()
def callback_bad(self):
# Store BAD as the user's choices
self.outData[self.specIndex,4] = 'BAD'
self.outData[self.specIndex,5] = 'BAD'
# Move to the next spectra
self.moveToNextSpectrum()
def callback_smooth(self, toggle = False):
if toggle:
self.smoothState.set(not self.smoothState.get())
# Update the plot now
self.updatePlot()
def callback_next(self):
# Store the choice for the current spectra
self.outData[self.specIndex,4] = self.specType[self.specState.get()] + str(self.subState.get())
self.outData[self.specIndex,5] = self.metalType[self.metalState.get()]
# Move to the next spectra
self.moveToNextSpectrum()
def callback_back(self):
self.moveToPreviousSpectrum()
def callback_earlier(self):
curSub = self.subState.get()
curSpec = self.specState.get()
# If user hasn't selected "O" spectral type and they're
# currently selected zero sub type, we need to loop around
# to the previous spectral type
if curSpec != 0 and curSub == 0:
# Set the sub spectral type, skipping over K8 and K9
# since they don't exist.
self.subState.set(7 if curSpec == 6 else 9)
# Decrease the spectral type
self.specState.set(curSpec - 1)
else:
# Just decrease sub spectral type
self.subState.set(curSub - 1)
# Call the spectral and sub spectral type radio
# button change callbacks as if the user changed
# the buttons themselves
self.callback_specRadioChange(True)
self.callback_subRadioChange(False)
def callback_later(self):
curSub = self.subState.get()
curSpec = self.specState.get()
# If the user hasn't selected "L" spectral type and
# they're currently selecting "9" spectral sub type
# (or 7 if spec type is "K"), we need to loop around
# to the next spectral type
if curSpec != 7 and (curSub == 9 or (curSpec == 5 and curSub == 7)):
self.specState.set(curSpec + 1)
self.subState.set(0)
else:
# Just increase the sub spectral type
self.subState.set(curSub + 1)
# Call the spectral and sub spectral type radio
# button change callbacks as if the user changed
# the buttons themselves.
self.callback_specRadioChange(True)
self.callback_subRadioChange(False)
def callback_higher(self):
curMetal = self.metalState.get()
# If the user isn't at the max metallicity option,
# then increase the metallicity
if curMetal != 6:
self.metalState.set(curMetal + 1)
# Call the metallicity radio button change callback
# as if the user changed the button themselves.
self.callback_metalRadioChange(True)
def callback_lower(self):
curMetal = self.metalState.get()
# If the user isn't at the min mellaticity option,
# then decrease the metallicity
if curMetal != 0:
self.metalState.set(curMetal - 1)
# Call the metallicity radio button change callback
# as if the user changed the button themselves.
self.callback_metalRadioChange(True)
def callback_hammer_time(self):
timeCalled = time.time()
if self.pPressTime == 0 or timeCalled - self.pPressTime > 1.5:
# Reset
self.pPressTime = timeCalled
self.pPressNum = 1
return
else:
self.pPressNum += 1
if self.pPressNum == 5:
chrList = [(10,1),(32,18),(46,1),(39,1),(47,1),(32,26),(10,1),(32,1),(42,1),(32,3),(39,1),(42,1),(32,10),(47,1),(32,1),(40,1),(95,11),
(46,1),(45,12),(46,1),(32,2),(10,1),(32,9),(42,1),(32,7),(91,1),(32,1),(93,1),(95,11),(124,1),(47,2),(80,1),(121,1),(72,1),
(97,1),(109,2),(101,1),(114,1),(47,2),(124,1),(32,2),(10,1),(32,14),(42,1),(32,2),(41,1),(32,1),(40,1),(32,11),(39,1),(45,12),
(39,1),(32,2),(10,1),(32,17),(39,1),(45,1),(39,1),(32,1),(42,1),(32,25),(10,1),(32,13),(42,1),(32,33),(10,1),(32,19),(42,1),(32,27)]
InfoWindow(''.join([chr(c[0])*c[1] for c in chrList]), parent = self.root, height = 9, fontFamily = 'Courier')
##
# Radiobutton Selection Change Callbacks
#
def callback_specRadioChange(self, callUpdatePlot):
# If the spectral type radio button has changed,
# check to see if the user switched to a "K" type.
# If they have, turn off the option to pick K8 and K9
# Since those don't exist. Otherwise, just turn those
# sub spectral type buttons on.
if self.specState.get() == 5:
self.subButtons[-1].configure(state = 'disabled')
self.subButtons[-2].configure(state = 'disabled')
if self.subState.get() == 8 or self.subState.get() == 9:
self.subState.set(7)
else:
self.subButtons[-1].configure(state = 'normal')
self.subButtons[-2].configure(state = 'normal')
if callUpdatePlot: self.updatePlot()
def callback_subRadioChange(self, callUpdatePlot):
if callUpdatePlot: self.updatePlot()
def callback_metalRadioChange(self, callUpdatePlot):
if callUpdatePlot: self.updatePlot()
##
# Utility Methods
#
def moveToNextSpectrum(self):
"""
Description:
This method handles moving to the next spectrum. All it really
does is determines if the user is at the end of the list of
spectrum, and, if so, asks if they're done. If they aren't at
the end, it moves to the next spectrum (by incrementing self.specIndex)
and calling self.getUserSpectrum.
"""
if self.specIndex+1 >= len(self.outData):
self.root.bell()
modal = ModalWindow("You've classified all the spectra. Are you finished?", parent = self.root)
self.root.wait_window(modal.modalWindow)
if modal.choice == 'yes':
self._exit()
else:
self.specIndex += 1
self.getUserSpectrum()
def moveToPreviousSpectrum(self):
"""
Description:
This method handles moving to the previous spectrum. It will simply
decrement the self.specIndex variable if they're not already on
the first index and call self.getUserSpectrum.
"""
if self.specIndex > 0:
self.specIndex -= 1
self.getUserSpectrum()
def jumpToSpectrum(self):
"""
Description:
This method handles moving to a different spectrum in the list which
may or may not be before or after the current spectrum. This is handled
by looking for a spectrum in the inData list which matches the spectrum
in the Entry field in the GUI. If a match is found, that spectrum is
loaded, otherwise they're informed that spectrum could not be found.
"""
spectrumFound = False
# Get the user's input spectrum name and make sure it has an extension
userInput = os.path.basename(self.spectrumEntry.get())
if userInput.find('.') == -1:
message = 'Make sure to provide a file extension in your name.'
InfoWindow(message, parent = self.root, title = 'PyHammer Error')
return
# Scan through the outData file and try to find a matching spectrum
for i, spectrum in enumerate(self.outData):
if userInput == os.path.basename(spectrum[0]):
self.specIndex = i
spectrumFound = True
break
# If one wasn't found, inform the user of a problem
if not spectrumFound:
message = ('The spectrum you input could not be matched '
'to any of the spectrum in your output file '
'list. Check your input and try again.')
InfoWindow(message, parent = self.root, title = 'PyHammer Error')
else:
# If a match was found, load that spectrum
self.getUserSpectrum()
def getUserSpectrum(self):
"""
Description:
This handles loading a new spectrum based on the self.specIndex variable
and updates the GUI and plot window accordingly.
"""
# Read in the next spectrum file indicated by self.specIndex
fname = self.outData[self.specIndex,0]
ftype = np.extract(self.inData[:,0] == os.path.basename(fname), self.inData[:,1])[0]
self.specObj.readFile(self.options['spectraPath']+fname, ftype) # Ignore returned values
self.specObj.normalizeFlux()
# Set the spectrum entry field to the new spectrum name
self.spectrumEntry.set(os.path.basename(self.outData[self.specIndex,0]))
# Set the radio button selections to the new spectrum's guessed classifcation
self.specState.set(self.specType.index(self.outData[self.specIndex,2][0]))
self.subState.set(int(self.outData[self.specIndex,2][1]))
self.metalState.set(self.metalType.index(self.outData[self.specIndex,3]))
# Reset the indicator for whether the plot is zoomed. It should only stay zoomed
# between loading templates, not between switching spectra.
self.full_xlim = None
self.full_ylim = None
self.zoomed = False
# Reset the smooth state to be unsmoothed, unless the user chose to lock the state
if not self.lockSmooth.get():
self.smoothState.set(False)
# Update the plot
self.updatePlot()
def getTemplateFile(self, specState = None, subState = None, metalState = None):
"""
Description:
This will determine the filename for the template which matches the
current template selection. Either that selection will come from
whichever radio buttons are selected, or else from input to this
function. This will search for filenames matching a specific format.
The first attempt will be to look for a filename of the format
"SS_+M.M_Dwarf.fits", where the SS is the spectral type and subtype
and +/-M.M is the [Fe/H] metallicity. The next next format it will
try (if the first doesn't exist) is "SS_+M.M.fits". After that it
will try "SS.fits".
"""
# If values weren't passed in for certain states, assume we should
# use what is chosen on the GUI
if specState is None: specState = self.specState.get()
if subState is None: subState = self.subState.get()
if metalState is None: metalState = self.metalState.get()
# Try using the full name
filename = self.specType[specState] + str(subState) + '_' + self.metalType[metalState] + '_Dwarf'
fullPath = os.path.join(self.templateDir, filename + '.fits')
if os.path.isfile(fullPath):
return fullPath
# Try using only the spectra and metallicity in the name
filename = filename[:7]
fullPath = os.path.join(self.templateDir, filename + '.fits')
if os.path.isfile(fullPath):
return fullPath
# Try to use just the spectral type
filename = filename[:2]
fullPath = os.path.join(self.templateDir, filename + '.fits')
if os.path.isfile(fullPath):
return fullPath
# Return None if file could not be found
return None
|
import errno
import os
import re
import shutil
import sys
import yaml
import json
import tempfile
def expand_at_params(s, fn, listfn=None):
def subfn(m):
result = fn(m.group(1))
if result is None:
raise RuntimeError("Unexpected @-parameter '{}' in {}".format(m.group(1), s))
return result
if isinstance(s, list):
seq = [ ]
for item in s:
# Try to replace the whole item.
if listfn is not None:
m = re.fullmatch(r'@(\w+)@', item)
if m:
result = listfn(m.group(1))
if result is not None:
seq.extend(result)
continue
seq.append(re.sub(r'@(\w+(:[^@]+)?)@', subfn, item))
return seq
else:
assert isinstance(s, str)
return re.sub(r'@(\w+(:[^@]+)?)@', subfn, s)
def try_mkdir(path):
try:
os.mkdir(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def try_rmfile(path):
try:
os.remove(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def try_rmtree(path):
try:
shutil.rmtree(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def touch(path):
with open(path, 'w'):
pass
def yaml_to_string(yml):
return yaml.dump(yml, Dumper=yaml.SafeDumper)
def write_yaml_file(f, yml):
return yaml.dump(yml, f, Dumper=yaml.SafeDumper)
def yaml_from_string(string):
from simexpal.base import YmlLoader
return yaml.load(string, Loader=YmlLoader)
def read_yaml_file(f):
from simexpal.base import YmlLoader
return yaml.load(f, Loader=YmlLoader)
def read_setup_file(setup_file):
from simexpal.base import YmlLoader
with open(setup_file, 'r') as f:
setup_dict = yaml.load(f, Loader=YmlLoader)
last_mod = os.fstat(f.fileno()).st_mtime
return setup_dict, last_mod
def read_json_file(json_file):
with open(json_file, 'r') as f:
json_dict = json.load(f)
last_mod = os.fstat(f.fileno()).st_mtime
return json_dict, last_mod
def validate_setup_file(basedir, setup_file, setup_file_schema_name):
""" Reads, validates and sanitizes the setup file
"""
def _validate_dict(dictionary, source, schema):
from jsonschema import Draft7Validator
validator = Draft7Validator(schema)
validation_errors = list(validator.iter_errors(dictionary))
if len(validation_errors) > 0:
for err in validation_errors:
err_source = "[{}]".format("][".join(str(x) for x in err.absolute_path))
print("simexpal: Validation error in {} at {}:\n{}\n{}".format(
source, err_source, err.instance, err.message), file=sys.stderr, end="\n\n")
# The error comes from subschemas in anyOf, oneOf or allOf.
if err.context:
print("Below are the validation errors of each respective subschema:")
schema_index = None
for sub_error in sorted(err.context, key=lambda e: e.schema_path[0]):
cur_schema_index = sub_error.schema_path[0]
if cur_schema_index != schema_index:
schema_index = cur_schema_index
print("\nValidation errors in subschema [{}]:".format(cur_schema_index))
print(sub_error.message)
print()
return False
return True
cur_file_path = os.path.abspath(os.path.dirname(__file__))
validation_cache_dict = {}
try:
with open(os.path.join(basedir, 'validation.cache'), 'r') as f:
validation_cache_dict = json.load(f)
except FileNotFoundError:
pass
# Validate setup file and potentially cache results.
setup_file_path = os.path.join(basedir, setup_file)
setup_file_dict, setup_file_last_mod = read_setup_file(setup_file_path)
setup_file_schema_path = os.path.join(cur_file_path, 'schemes', setup_file_schema_name)
setup_file_schema, setup_file_schema_last_mod = read_json_file(setup_file_schema_path)
setup_file_is_valid = None
if (setup_file not in validation_cache_dict
or setup_file_last_mod != validation_cache_dict[setup_file]
or setup_file_schema_name not in validation_cache_dict
or setup_file_schema_last_mod != validation_cache_dict[setup_file_schema_name]):
setup_file_is_valid = _validate_dict(setup_file_dict, setup_file, setup_file_schema)
if setup_file_is_valid:
validation_cache_dict[setup_file] = setup_file_last_mod
validation_cache_dict[setup_file_schema_name] = setup_file_schema_last_mod
# Validate launchers.yml file and potentially cache results.
launchers_yml_is_valid = None
try:
launchers_yml_dict, launchers_yml_last_mod = read_setup_file(os.path.expanduser('~/.simexpal/launchers.yml'))
launchers_yml_schema_path = os.path.join(cur_file_path, 'schemes', 'launchers.json')
launchers_yml_schema, launchers_yml_schema_last_mod = read_json_file(launchers_yml_schema_path)
if ('launchers.yml' not in validation_cache_dict
or launchers_yml_last_mod != validation_cache_dict['launchers.yml']
or launchers_yml_schema_last_mod != validation_cache_dict['launchers.json']):
launchers_yml_is_valid = _validate_dict(launchers_yml_dict, 'launchers.yml', launchers_yml_schema)
if launchers_yml_is_valid:
validation_cache_dict['launchers.yml'] = launchers_yml_last_mod
validation_cache_dict['launchers.json'] = launchers_yml_schema_last_mod
except FileNotFoundError:
pass
writeback_cache = False
do_exit = False
if setup_file_is_valid is not None:
if setup_file_is_valid:
writeback_cache = True
else:
do_exit = True
if launchers_yml_is_valid is not None:
if launchers_yml_is_valid:
writeback_cache = True
else:
do_exit = True
if writeback_cache:
fd, path = tempfile.mkstemp(dir=basedir)
with os.fdopen(fd, 'w') as tmp:
json.dump(validation_cache_dict, tmp)
os.rename(path, 'validation.cache')
if do_exit:
sys.exit(1)
return setup_file_dict
def compute_network_size(path, out):
import networkit as nk
try:
g = nk.readGraph(path, nk.Format.EdgeList,
separator=' ', firstNode=0, continuous=False, directed=False)
except Exception: # Exception due the attempt of reading a non-network file
return
data = {
'n': g.numberOfNodes(),
'm': g.numberOfEdges()
}
yaml.dump(data, out, default_flow_style=False)
def ensure_list_type(arg):
if isinstance(arg, list):
return arg
assert isinstance(arg, str)
return [arg]
def read_file(path):
try:
f = open(path, 'r')
except FileNotFoundError:
return ''
else:
with f:
return f.read()
def extract_file_prefix_from_path(file_path, suffix=None):
"""
:param file_path: absolute file path
:param suffix: (optional) suffix which will be removed from the extensionless basename of the file
:return: prefix of file
"""
prefix = os.path.splitext(os.path.basename(file_path))[0]
if suffix is not None:
prefix = prefix.split(suffix)[0]
return prefix
|
#coding: utf-8
#利用filter输出回文数
def is_palindrome(n) :
n = str(n)
return n == n[::-1]
output = filter(is_palindrome,range(0,1000))
for i in list(output) :
print(i)
|
#!/usr/bin/python
class Node:
weight = 0
name = ""
parent = ""
children = ""
def __init__(self, weight, name, parent, children):
self.weight = weight
self.name = name
self.parent = parent
self.children = children
def recursiveCircus(file):
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
nodes = []
print content
for i in range(0, len(content))
#weight =
recursiveCircus("input/day7_sample.txt") |
# дебильный калькулятор v2
from colorama import init
from colorama import Fore, Back, Style
init()
print( Fore.GREEN )
what = input ("что делаем (+,-,)")
print( Fore.CYAN )
a = float( input("Веди первое число: ") )
b = float( input ("Введи второе число: ") )
print( Fore.YELLOW )
if what == "+":
c = a + b
print("Результат: " + str(c))
elif what == "-":
c = a - b
print("Результат: " + str(c))
#elif what == "/"
# c = a / b
# print("Результат: " + str(c))
#elif what == "*":
# c = a * b
# print("Результат: " + str(c))
#eliif what == "**":
# c = a ** b
# print("Результат: " + str(c))
#else:
print(" Выбрана неверная операция! ")
|
#雪花曲线
mport turtle
tr = turtle.getturtle()
def koch(n,len):
if(n==0):
tr.forward(len)
elif(n==1):
tr.forward(len/3.0)
tr.left(60)
tr.forward(len/3.0)
tr.right(120)
tr.forward(len/3.0)
tr.left(60)
tr.forward(len/3.0)
else:
koch(n-1,len/3.0)
tr.left(60)
koch(n-1,len/3.0)
tr.right(120)
koch(n-1,len/3.0)
tr.left(60)
koch(n-1,len/3.0)
koch(4,300)
#二维受限制的无规行走
from __future__ import print_function
from __future__ import division
import random
MAX_COL = 9
MAX_ROW = 9
maxcycle = 10000
left = (0, -1)
right = (0, 1)
up = (-1, 0)
down = (1, 0)
matrix = [[0] * MAX_COL for _ in range(MAX_ROW)]
row = 5
col = 5
matrix[row][col] = 1
for _ in range(maxcycle):
direct = [up, down, left, right]
if col == 0:
direct.remove(left)
elif col == MAX_COL - 1:
direct.remove(right)
if row == 0:
direct.remove(up)
elif row == MAX_ROW - 1:
direct.remove(down)
di = random.choice(direct)
row += di[0]
col += di[1]
matrix[row][col] += 1
for row in range(MAX_ROW):
print(matrix[row])
from matplotlib import pyplot
pyplot.matshow(matrix)
pyplot.show()
#一维无规行走
import numpy as np
import pylab as plt
from math import *
import random
x=[0 for i in range(101)]
t=np.linspace(0,100,101)
n=100
for i in range(n):
x0=random.random()
if x0<0.5:
x[i+1]=x[i]+1
if x0>=0.5:
x[i+1]=x[i]-1
x1=[0 for i in range(101)]
for i in range(n):
x2=random.random()
if x2<0.5:
x1[i+1]=x1[i]+1
if x2>=0.5:
x1[i+1]=x1[i]-1
x4=[0 for i in range(101)]
for i in range(n):
x3=random.random()
if x3<0.5:
x4[i+1]=x4[i]+1
if x3>=0.5:
x4[i+1]=x4[i]-1
o=np.linspace(0,0,101)
def plot():
plt.plot(t,o)
plt.plot(t,x,'go')
plt.plot(t,x1,'ro')
plt.plot(t,x4,'ko')
plt.xlim(0,100)
plt.xlabel('step number')
plt.ylabel('x')
plt.title('random walk in one dimension')
plt.show()
plot()
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
steps = np.linspace(0, 100, 101)
x_ave = np.zeros(101)
x_y0 = np.zeros(101)
x_now = np.zeros(10000)
for i in range(100):
for j in range(10000):
ruler = np.random.rand()
if ruler<=0.5:
x_now[j] = x_now[j] + 1
else:
x_now[j] = x_now[j] - 1
average = sum(x_now)/10000
x_ave[i+1] = average
#扩散过程
plt.scatter(steps, x_ave)
plt.plot(steps, x_y0)
plt.xlim(0,100)
plt.ylim(-1,1)
plt.grid(True)
plt.xlabel('step number(= time)')
plt.ylabel('<x>')
plt.title('<x> of 10000 walkers')
plt.show()
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
import math
r0=[[[0 for i in range(21)]for j in range(21)]for k in range(100)]
for j in range(5,16):
for i in range(5,16):
r0[0][j][i]=1
dx=1
dt=0.25
D=1
k=0
while True:
for j in range(1,20):
for i in range(1,20):
r0[k+1][j][i]=r0[k][j][i]+D*dt/(dx)**2*(r0[k][j][i+1]+r0[k][j][i-1]+r0[k][j+1][i]+r0[k][j-1][i]-4*r0[k][j][i])
k=k+1
if k>98:
break
x1=np.array(r0[0])
x2=np.array(r0[10])
x3=np.array(r0[90])
x=np.linspace(-10,10,21)
y=np.linspace(-10,10,21)
X,Y=np.meshgrid(x,y)
fig = figure(figsize=[8,8])
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, x1,rstride=1, cstride=1,cmap=cm.coolwarm,linewidth=0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('density')
title('t=0')
fig = figure(figsize=[8,8])
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, x2,rstride=1, cstride=1,cmap=cm.coolwarm,linewidth=0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('density')
ax.set_zlim(0,1)
title('t=10')
fig = figure(figsize=[8,8])
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, x3,rstride=1, cstride=1,cmap=cm.coolwarm,linewidth=0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('density')
ax.set_zlim(0,1)
title('t=90')
show()
|
from waitress import serve
import web_app
serve(web_app.app, port=9000, threads=6)
|
#!/usr/bin/python
#############################################################################
### ###
### Antisense Project - 2018 ###
### Density Plots From Output of PeakCaller_mNET-Seq (Rui Luis) ###
### Author: Rui Luis (MCFonsecaLab) ###
### ###
#############################################################################
#Import Packages
import argparse
import pandas as pd
import matplotlib.pyplot as plt
#Input Arguments
parser = argparse.ArgumentParser(description='DensityPlot mNET-Seq')
parser.add_argument('PeakCallingFiles', nargs='+', help='Output file from PeakCalling Script for mNEt-seq (Rui Luis, 2018). For more than one replicate '
'It will merge the results, according to the given function.')
parser.add_argument('--outNameFile', dest='outNameFile', default= None, help='Base bamFile where peaks are investigated')
args = parser.parse_args()
globalDT = pd.DataFrame()
for replicate in args.PeakCallingFiles:
print replicate
x = pd.read_csv(replicate, header=None, delimiter="\t", names =["chr","start","end","FeatureID",
"Nline","Strand","DistFromTSS",
"FeatureSize","Extrainfo","Coverage"])
x.sort_values(["Nline"], inplace=True,ascending=True)
x.reset_index(drop=True, inplace=True)
globalDT = globalDT.append(x)
globalDT.sort_values(["Nline"],inplace=True)
plt.figure(1)
plt.title('Conserved peaks between replicates per Feature')
plt.xlabel("Distance From TSS")
counts = globalDT.groupby("Nline").filter(lambda x: len(x) == len(args.PeakCallingFiles)).drop_duplicates(subset=["chr","start","end","FeatureID",
"Nline","Strand","DistFromTSS",
"FeatureSize","Extrainfo"])
counts.sort_values(["Nline"],inplace=True)
try:
counts["DistFromTSS"].str.strip("DistFromTSS=").astype('int64').where(counts["DistFromTSS"].str.strip("DistFromTSS=").astype('int64') <= 200)\
.plot.hist(stacked=True, alpha=0.5, bins=200, xlim=[0,200])
plt.show(1)
except:
pass
plt.figure(2)
plt.title('All peaks per Feature')
plt.xlabel("Distance From TSS")
globalDT["DistFromTSS"].str.strip("DistFromTSS=").astype('int64').where(globalDT["DistFromTSS"].str.strip("DistFromTSS=").astype('int64') <= 1000)\
.plot.hist(stacked=True, alpha=0.5, bins=200, xlim=[0,1000])
plt.show(2)
plt.figure(3)
plt.title('All peaks per Feature')
plt.xlabel("Distance From TSS")
globalDT["DistFromTSS"].str.strip("DistFromTSS=").astype('int64').where(globalDT["DistFromTSS"].str.strip("DistFromTSS=").astype('int64') <= 200)\
.plot.hist(stacked=True, alpha=0.5, bins=75, xlim=[0,200])
plt.show(3)
########################################################
## Highest peak per Gene
plt.figure(4)
plt.title('Highest peak per Feature')
plt.xlabel("Distance From TSS")
globalDT["DistFromTSS"] = globalDT["DistFromTSS"].str.strip("DistFromTSS=")
globalDT["DistFromTSS"] = globalDT["DistFromTSS"].astype('int64')
globalDT = globalDT.reset_index(drop=True)
CoverageDataMAX = globalDT.loc[globalDT.reset_index().groupby(['FeatureID'])['Coverage'].idxmax()]
CoverageDataMAX = CoverageDataMAX[CoverageDataMAX['DistFromTSS'] < 1200]
CoverageDataMAX['DistFromTSS'].plot.hist(stacked=True, alpha=0.5, bins=200, xlim=[0,1200])
print "0-200: {} %".format(str(sum((CoverageDataMAX['DistFromTSS'] < 200) & (CoverageDataMAX['DistFromTSS'] > 0))/65.00))
print "200-850: {} %".format(str(sum((CoverageDataMAX['DistFromTSS'] < 850) & (CoverageDataMAX['DistFromTSS'] > 200))/65.00))
print "850-1200: {} %".format(str(sum((CoverageDataMAX['DistFromTSS'] < 1200) & (CoverageDataMAX['DistFromTSS'] > 850))/65.00))
print "1200-inf: {} %".format(str(sum(CoverageDataMAX['DistFromTSS'] > 1200)/65.00))
plt.show(4)
plt.figure(5)
plt.title('Highest peak per Feature')
plt.xlabel("Distance From TSS")
CoverageDataMAX = CoverageDataMAX[CoverageDataMAX['DistFromTSS'] < 200]
CoverageDataMAX['DistFromTSS'].plot.hist(stacked=True, alpha=0.5, bins=75, xlim=[0,200])
plt.show(5)
|
from objects.RandomObject import RandomObject
# Dungeon is a blank object used for flavor at the moment.
class Dungeon(RandomObject):
parameter_types = []
def describe(self,from_perspective=None):
print(super().describe())
|
from intent_handling.signal import Signal
class ClassTitleIntent:
NAME = 'CLASS_TITLE'
def __init__(self, parameters):
self.parameters = parameters
def execute(self, db):
code = db.course_code(self.parameters.class_name)
sql = 'SELECT pretty_name ' \
'FROM main_courses ' \
'WHERE code="{}"'.format(code.split(' ')[-1])
result = db.call(sql)
output = "{}'s course name is {}".format(code, result[0][0])
return Signal.NORMAL, output
|
def add(x,y):
"""Add function"""
return x+y
print(add(5,7))
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 23:15:55 2018
无线网工程相关的操作
@author: lenovo
"""
from project.jntele_sap import OperateLteSAP
from project.jntele_zaijian import OperateZaijian
import os
import warnings
class OperateLteProject(object):
'''无线网工程操作处理整合类'''
def __init__(self):
self.dir_base = 'gongcheng\\'
self.dir_log = 'log\\'
# 获取SAP相关数据
def getLteSapData(self,file_in,zhucai_file='lte_zhucai.xlsx',dir_in='sap_old\\',dir_out='sap_ok\\'):
op = OperateLteSAP(self.dir_base,self.dir_log)
file_out = op.getLteSapData(file_in,zhucai_file,dir_in,dir_out)
warnings.filterwarnings('ignore')
os.popen(file_out)
warnings.filterwarnings('default')
# 从SAP基础信息表中更新主材单价
def updateZhucaiPrice(self,file_in,zhucai_file='lte_zhucai.xlsx',dir_in='sap_old\\'):
op = OperateLteSAP(self.dir_base,self.dir_log)
file_out = op.updateZhucaiPrice(file_in,zhucai_file,dir_in)
warnings.filterwarnings('ignore')
os.popen(file_out)
warnings.filterwarnings('default')
# 从在建工程表更新基础表
def updateProjectBase(self,file_in,dir_in='zaijian_in\\',file_base = 'base\\wbs-ok.xlsx'):
op = OperateZaijian(self.dir_base,self.dir_log)
return op.updateProjectBase(file_in,dir_in,file_base)
if __name__ == '__main__':
sap_file_in = 'CDMA-ALL-0517.xlsx'
# zaijian_file_in = '在建工程明细总表(实时)导出(0504).xlsx'
op = OperateLteProject()
op.getLteSapData(sap_file_in)
# op.updateZhucaiPrice(sap_file_in)
# op.updateProjectBase(zaijian_file_in) |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import cgi
from caesar import encrypt
from helpers import alphabet_position, rotate_character
form = """
<form method="post" action="/formhandler">
<label> Rotate by:
<br>
<input type="text" name="rotation" value="%(rotation)s">
</label>
<br>
<br>
<label> Enter text to encrypt below:
<br>
<textarea type="text" name="message" style="height: 100px; width: 400px;">%(message)s</textarea>
</label>
<br>
<br>
<input type="submit">
</form>
"""
def html_escape(s):
return cgi.escape(s, quote = True)
class MainHandler(webapp2.RequestHandler):
def write_form(self, rotation="0", message=""):
self.response.out.write(form % {"rotation": html_escape(rotation), "message": html_escape(message)})
def get(self):
self.write_form()
class FormHandler(MainHandler):
def post(self):
user_rotation = self.request.get('rotation')
int_rotation = int(user_rotation)
encrypt_message = encrypt(self.request.get('message'), int_rotation)
self.write_form("0", encrypt_message)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/formhandler', FormHandler)
], debug=True)
|
import os
import sys
import argparse
import datetime
import time
import re
import numpy as np
import pandas as pd
import ee
import geopandas as gpd
import rasterio
import rioxarray
import shapely
import requests
import lxml.html
import urllib
from dateutil.relativedelta import *
from osgeo import ogr, osr, gdal
from rasterio.crs import CRS
from rasterio.warp import reproject, Resampling
from subprocess import Popen
from getpass import getpass
from netrc import netrc
from data_processing.knmi_interpolation import Knmi_Interpolator
class ModisRetrieval(Knmi_Interpolator):
vars_names = []
bounds = []
def __init__(self, parent_folder='./data'):
Knmi_Interpolator.__init__(self, parent_folder)
super().__init__(parent_folder)
try:
ee.Initialize()
print('Google Earth Engine has initialized successfully!')
except ee.EEException as e:
print('Google Earth Engine has failed to initialize!')
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def get_crs_image_collection(self, image_collection):
return ee.Image(image_collection.first()).projection().crs()
def set_bounds(self, bounds):
self.bounds = bounds
def set_vars_names(self, vars_names):
self.vars_names = vars_names
def get_ee_data(self, dataset_name):
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 0:
raise Exception('Start date should be set before end date')
start_date_filter = self.start_date.strftime('%Y-%m-%d')
end_date_filter = self.end_date.strftime('%Y-%m-%d')
self.set_interpolation_boundaries()
try:
data_ee = ee.ImageCollection(dataset_name).filterDate(start_date_filter, end_date_filter).select(self.vars_names)
mosaic = data_ee.mean()
proj = ee.Projection(self.interpolation_output_crs);
ext_factor = 1
aoi = ee.Geometry.Polygon(
[[[self.boundaries[1] * ext_factor, self.boundaries[2] * ext_factor],
[self.boundaries[1] * ext_factor, self.boundaries[0] * ext_factor],
[self.boundaries[3] * ext_factor, self.boundaries[0] * ext_factor],
[self.boundaries[3] * ext_factor, self.boundaries[2] * ext_factor]]], proj, False)
reprojected = mosaic.reproject(proj, None, 1);
band_arrs = reprojected.sampleRectangle(region=aoi).getInfo()
return band_arrs
except ee.EEException as e:
print('error', e)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
class NASARetrieval(Knmi_Interpolator):
fileList = None
urs = None
netrcDir = None
vars_names = []
tiles = None
product = None
def __init__(self, parent_folder='./data'):
Knmi_Interpolator.__init__(self, parent_folder)
super().__init__(parent_folder)
def set_urs(self, urs):
self.urs = urs
def set_file_list(self, files):
self.fileList = list(files)
def set_netrcDir(self, netrc_name):
self.netrcDir = os.path.expanduser(netrc_name)
def set_vars_names(self, vars_names):
self.vars_names = vars_names
def set_type_data(self, type_data):
self.type_data = type_data
def set_tiles(self, tiles):
self.tiles = tiles
def set_product(self, product):
self.product = product
def download_list(self, nmonths):
fileList_2 = []
for i in range(1, nmonths + 1):
today = self.start_date
j = i - 1
d = today.replace(day=1) + relativedelta(months=j)
dt = d.strftime('%Y.%m.%d')
url = "https://e4ftl01.cr.usgs.gov/{2}/{0}/{1}/".format(self.product, dt, self.type_data)
f = requests.get(url)
element_tree = lxml.html.fromstring(f.text)
url_ahref = element_tree.xpath('//a/@href')
arr = np.array(url_ahref)
arr_f = []
for element in arr:
if self.tiles in element and element.endswith('.hdf'):
arr_f.append(True)
else:
arr_f.append(False)
arr_filterd = arr[arr_f]
filename_hrz = arr_filterd[0]
download_url = url + filename_hrz
fileList_2.append(download_url)
self.set_file_list(fileList_2)
def derive_file_list(self):
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 0:
raise Exception('Start date should be set before end date')
start_date_filter = self.start_date.strftime('%Y/%m/%d')
end_date_filter = self.end_date.strftime('%Y/%m/%d')
range_dates = pd.date_range(start_date_filter, end_date_filter, freq="MS")
diff_months = len(range_dates)
self.download_list(diff_months)
def set_up_credentials(self):
prompts = [
'Enter NASA Earthdata Login Username \n(or create an account at urs.earthdata.nasa.gov): ',
'Enter NASA Earthdata Login Password: '
]
try:
self.set_netrcDir("~/.netrc")
netrc(self.netrcDir).authenticators(self.urs)[0]
except FileNotFoundError:
homeDir = os.path.expanduser("~")
Popen('touch {0}.netrc | chmod og-rw {0}.netrc | echo machine {1} >> {0}.netrc'.\
format(homeDir + os.sep, self.urs), shell=True)
Popen('echo login {} >> {}.netrc'.format(getpass(prompt=prompts[0]), homeDir + os.sep), shell=True)
Popen('echo password {} >> {}.netrc'.format(getpass(prompt=prompts[1]), homeDir + os.sep), shell=True)
except TypeError:
homeDir = os.path.expanduser("~")
Popen('echo machine {1} >> {0}.netrc'.format(homeDir + os.sep, self.urs), shell=True)
Popen('echo login {} >> {}.netrc'.format(getpass(prompt=prompts[0]), homeDir + os.sep), shell=True)
Popen('echo password {} >> {}.netrc'.format(getpass(prompt=prompts[1]), homeDir + os.sep), shell=True)
tries = 0
while tries < 30:
try:
netrc(netrcDir).authenticators(urs)[2]
except:
time.sleep(2.0)
tries += 1
def download_files(self):
for f in self.fileList:
self.set_filename_output(f.split('/')[5]+'_'+self.product+'.hdf')
with requests.get(f.strip(),\
verify=False,\
stream=True,\
auth=(netrc(self.netrcDir).authenticators(self.urs)[0],\
netrc(self.netrcDir).authenticators(self.urs)[2])) as response:
if response.status_code != 200:
print("{} not downloaded. Verify that your username and password are correct in {}".\
format(f.split('/')[-1].strip(), self.netrcDir))
else:
response.raw.decode_content = True
content = response.raw
with open(self.output_filename, 'wb') as d:
while True:
chunk = content.read(16 * 1024)
if not chunk:
break
d.write(chunk)
print('Downloaded file: {}'.format(self.output_filename ))
def array_to_tif(self, data_to_save):
band_array = data_to_save['band_array']
band_ds = data_to_save['template']
band_path = data_to_save['name']
band_array[band_array == -3000] = -32768
scale_raster_values = 1.0000
if data_to_save['layer'] == 'NVDI':
scale_raster_values = 0.0001
elif data_to_save['layer'] == 'EVI':
scale_raster_values = 1.0000
try:
band_array = np.round(band_array*scale_raster_values, decimals=4)
out_ds = gdal.GetDriverByName('GTiff').Create(band_path,
band_array.shape[0],
band_array.shape[1],
1,
gdal.GDT_Int16,
['COMPRESS=LZW', 'TILED=YES'])
out_ds.SetGeoTransform(band_ds.GetGeoTransform())
out_ds.SetProjection(band_ds.GetProjection())
out_ds.GetRasterBand(1).SetNoDataValue(-32768 * scale_raster_values)
out_ds.GetRasterBand(1).WriteArray(band_array)
print('file {0} created successfully'.format(band_path))
except Exception as e: print(e)
def __print_layers_subdatasets(self, subdatasets):
for fname, name in subdatasets:
print (name, "------", fname)
def read_sub_datasets(self, filename_raster):
filename_raster_no_folder = filename_raster.split('/')[-1]
nasa_raster = gdal.Open(filename_raster)
if nasa_raster is None:
print ("Problem opening file!")
return None
else:
subdatasets = nasa_raster.GetSubDatasets()
self.__print_layers_subdatasets(subdatasets)
if self.type_data == 'MOLT':
file_template = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_monthly_1km_VI:%s'
elif self.type_data == 'MOTA':
file_template = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:%s'
for i, layer in enumerate ( self.vars_names ):
data = {}
this_file = file_template % ( filename_raster, layer )
g = gdal.Open(this_file)
if g is None:
raise IOError
data['band_array'] = g.ReadAsArray().astype(np.int16)
data['name'] = '.'.join(filename_raster_no_folder.split('.')[:-1])+'_'+layer+'.tiff'
data['template'] = g
data['layer'] = layer
self.array_to_tif(data)
return data
def hdf_to_tiff(self):
directory = self.parent_folder + self.target_folder
for filename in os.listdir(directory):
if filename.endswith(self.product+".hdf"):
filename_raster = os.path.join(directory, filename)
data_NASA = self.read_sub_datasets(filename_raster)
else:
continue
|
# if in dict -> 그대로 반영
# if not in dict -> 반영 X
word_n = int(input())
words = []
words_dict = {}
max_length = 0
answer = 0
alph_value = 9
for _ in range(word_n):
word = input()
if len(word) > max_length: # max_length엔 길이가 저장되게 될 것.
max_length = len(word)
words.append(word) # 현재 words는 스트링 형태
words.sort(key=lambda x:len(x), reverse=True)
print(words)
print(max_length)
for l in range(max_length, 0, -1): # max_length~1까지 반복
current_i = max_length-l # 그 때 인덱스는 0~max_length가 되겠지.
savepoints = []
for word in words: # 각 단어마다
if len(word) >= l: # 지금의 길이에 해당한다면
if word[current_i] in words_dict: # 사전에 있으면 그 값을 반영해주고
answer += words_dict[current_i]
else: # 사전에 없으면..
savepoints.append(words_dict[current_i])
if savepoints: # 아 여기서부터 어떻게 접근하지?
for i in
for each in savepoints: # 그 다음에 존재하는지 확인해야 할것.
for word in words:
if len(word) >= l:
if word[current_i+1] == each
|
# dictionary use {}
a_dict = {'apple':'a',1:1} #{'key':'value'}
print(a_dict)
print(a_dict['apple'])
del a_dict['apple'] #delete apple key
print(a_dict)
a_dict['pear'] = 'p' #add key pear value 'p'
print(a_dict)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 15:19:25 2015
Homework 2: Implement perceptron training using data given in
/u/cs448/data/pos. What is your accuracy on the test
file when training on the train file? Plot a graph of accuracy vs iteration
@author: Md Iftekhar Tanveer (itanveer@cs.rochester.edu)
"""
import numpy as np
import random
import matplotlib.pyplot as plt
# Reads the list of tags from file
def readalltags(tagsetfile):
with open(tagsetfile) as f:
tags = [item.strip() for item in f]
return tags
# Applies dynamic programming to find the best tag sequence
def viterbi(line,E,T,tags):
wrdlist = line.split(' ')
x = np.ones((len(tags),len(wrdlist)))*-1*np.inf
b = np.zeros((len(tags),len(wrdlist)))
for i,aword in enumerate(wrdlist):
# As I didn't see any start or end tag in the tagset, I am assuming
# all the weights for transition from the start tag to any other tag
# is zero (which is not true in reality).
# So for the first word, I don't consider the transition prob
if i==0:
for tagid,atag in enumerate(tags):
x[tagid,i] = E.get((atag,aword.lower()),-1*np.inf)
b[tagid,i] = -1 # Means this is the first word
continue
# if not the first word, consider both transition and emission prob
for atagid,atag in enumerate(tags):
# theoretically, the weights should be -ve inf if a specific
# pair is not found in the corpus. However, something didn't
# appear in the corpus doesn't mean that its probability is
# totally zero. So, I am assigning a small value instead of
# -ve inf.
emmval = E.get((atag,aword.lower()),-1*1e8) #emission prob
for atagid_prev,atag_prev in enumerate(tags):
trval = T.get((atag_prev,atag),-1*1e8) #transition prob
total = x[atagid_prev,i-1]+emmval+trval
# Debug
# print 'currtag',atag+'('+str(atagid)+')','prevtag',atag_prev+\
# '('+str(atagid_prev)+')','i',str(i),'word',aword,\
# 'emm',emmval,'trans',trval,'tot',total
if total>x[atagid,i]:
x[atagid,i] = total # Take the maximum logprob
b[atagid,i] = atagid_prev # keep a backward pointer
idx = np.argmax(x[:,-1])
annot=[]
# Trace back the sequence using the back pointer
for idx_ in xrange(np.size(b,axis=1),0,-1):
annot.append(tags[int(idx)])
idx = b[idx,idx_-1]
annot.reverse()
return wrdlist,annot
# Calculate the accuracy of viterbi over a given test file
def calcaccuracy(file,E,T,tags):
with open(file) as f:
totalWords=0.
countCorrect=0.
for aline in f:
data = [item.strip() for index, item in \
enumerate(aline.strip().split(' ')) if not index==0]
testline = ' '.join(data[0::2])
annotGT = data[1::2]
wrdlst,annt=viterbi(testline,E,T,tags)
countCorrect=countCorrect+sum([a1==a2 for a1,a2 in zip(annotGT,annt)])
totalWords=totalWords+len(annotGT)
return float(countCorrect)/totalWords,countCorrect,totalWords
# Learn weights of POS tagger using perceptron
def perceptron(aline,E,T,tags,countCorrect,totalWords):
# reading words and tags from training file
data = [item.strip() for index, item in \
enumerate(aline.strip().split(' ')) if not index==0]
words = data[0::2]
annotGT = data[1::2]
# Applying viterbi decoding
testline = ' '.join(words)
wrdlst,annt=viterbi(testline,E,T,tags)
# Using perceptron to modify weights
if not annotGT==annt:
for i,(tag_pred,tag_GT) in enumerate(zip(annt,annotGT)):
# Modify the emission weights:
#Add true value
if not (tag_GT,words[i].lower()) in E:
E[tag_GT,words[i].lower()] = 1
else:
E[tag_GT,words[i].lower()] += 1
# Subtract predicted value
if not (tag_pred,words[i].lower()) in E:
E[tag_pred,words[i].lower()] = -1
else:
E[tag_pred,words[i].lower()] -= 1
# Modify the transition weights if it is not the first tag
if i>0:
# add true value
if not (annotGT[i-1],tag_GT) in T:
T[annotGT[i-1],tag_GT] = 1
else:
T[annotGT[i-1],tag_GT] += 1
# subtract predicted value
if not (annt[i-1],tag_pred) in T:
T[(annt[i-1],tag_pred)] = -1
else:
T[(annt[i-1],tag_pred)] -= 1
# Counting total words, correct words and accuracy
countCorrect=countCorrect+sum([a1==a2 \
for a1,a2 in zip(annotGT,annt)])
totalWords=totalWords+len(annotGT)
acc = float(countCorrect)/totalWords
# print accuracy
#print 'acc=',acc,'correct=',countCorrect,\
#'total=',totalWords
return acc,countCorrect,totalWords,E,T
def saveweights(filename,E,T):
with open(filename,'w') as f:
for item in E.items():
print>>f,'E_'+item[0][0]+'_'+item[0][1]+' '+str(item[1])
for item in T.items():
print>>f,'T_'+item[0][0]+'_'+item[0][1]+' '+str(item[1])
# Main method
def main():
# Assuming 5 pass over the data
iterations = 5
# Initialize the emission and transition weights as blank hash maps
E={}
T={}
tags = readalltags('./alltags') # read all the tags
accperit =[] # list to record accuracy in each iteration
accperit_dev = [] # list of accuracy for dev set
plt.figure(1)
# The perceptron will run for a constant number of iterations
for it_ in xrange(iterations):
totalWords=0.
countCorrect=0.
# Read training files and shuffle
f=open('./train')
alldata = f.readlines();
random.shuffle(alldata)
# run perceptron
print 'training perceptron. Please wait ...'
count=0.
# Read each line and run perceptron command
for aline in alldata:
count+=1
acc,countCorrect,totalWords,E,T = perceptron(aline,E,T,tags,\
countCorrect,totalWords)
if((round(count/len(alldata)*100.0)%1)==0):
print int(count/len(alldata)*100.0),'%'
# Calculate accuracy on dev set
devacc = calcaccuracy('./dev',E,T,tags)[0]
accperit.append(acc)
accperit_dev.append(devacc)
# Print status
print 'Iteration:',it_,'acc_train:',acc,'acc_dev:',devacc
# plot accuracy vs iteration
plt.plot(np.array(accperit)*100,'r-')
plt.hold(True)
plt.plot(np.array(accperit_dev)*100,'b-')
plt.hold(False)
plt.xlabel('Iteration')
plt.ylabel('Accuracy, %')
plt.legend(['acc_train','acc_dev'])
plt.draw()
plt.pause(1)
plt.savefig('output_plot.png',dpi=300)
# Save the weights and display results on test data
saveweights('weights',E,T)
testacc,correctword,totalword = calcaccuracy('./test',E,T,tags)
print 'Result on test data:'
print 'Total Words =',totalword
print 'Correctly tagged =',correctword
print 'accuracy=',testacc*100,'%'
if __name__=='__main__':
main()
|
#!/usr/bin/python
#\file ros_wait_srvp.py
#\brief Test wait service.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Aug.30, 2015
import roslib; roslib.load_manifest('std_srvs')
import rospy
import std_srvs.srv
#import rospy_tutorials.srv
def SetupServiceProxy(name, srv_type, persistent=False, time_out=None):
print 'Waiting for %s... (t/o: %r)' % (name, time_out)
try:
rospy.wait_for_service(name, time_out)
except rospy.exceptions.ROSException as e:
print 'Failed to connect the service %s' % name
print ' Error:',str(e)
return None
srvp= rospy.ServiceProxy(name, srv_type, persistent=persistent)
return srvp
if __name__=='__main__':
rospy.init_node('ros_wait_srvp')
servp= SetupServiceProxy('/test_srv', std_srvs.srv.Empty, time_out=1.0)
'''NOTE: This (service type is wrong) works, but when using servp()
it fails with an exception:
ospy.service.ServiceException: unable to connect to service: remote error reported: request from [/ros_wait_srvp]: md5sums do not match: [6a2e34150c00229791cc89ff309fff21] vs. [d41d8cd98f00b204e9800998ecf8427e]'''
#servp= SetupServiceProxy('/test_srv', rospy_tutorials.srv.AddTwoInts, time_out=1.0)
print 'Got the service:',servp
if servp!=None: servp()
|
from django.db import models
from django.utils.encoding import smart_text
from django.utils import timezone
from django.utils.timesince import timesince
from django.utils.text import slugify
from django.db.models.signals import pre_save,post_save,pre_delete, post_delete
import datetime
from datetime import timedelta
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=250)
age = models.IntegerField(null=True)
class PostQuerySet(models.QuerySet):
def published(self):
return self.filter(status='published')
def featured(self):
return self.filter(featured=True)
def starts_with(self, start):
return self.filter(title__istartswith=start)
def time_frame(self, start_date=datetime.datetime(2015,1,1), end_date=datetime.datetime(2017,11,10)):
# inclusive
# start_date = datetime.datetime.combine(start_date, datetime.time.min, tzinfo=timezone.utc)
# end_date = datetime.datetime.combine(end_date, datetime.time.max, tzinfo=timezone.utc)
# exclusive
start_date = datetime.datetime(2015, 1, 1, tzinfo=timezone.utc)
end_date = datetime.datetime(2017,11,10, tzinfo=timezone.utc)
return self.filter(publish_date__range=(start_date, end_date))
# return self.filter(publish_date__gte=start_date, publish_date__lt=end_date)
class PostManager(models.Manager):
def get_queryset(self):
return PostQuerySet(self.model, using=self._db)
# return super().get_queryset().filter(status='published')
def all(self):
return self.get_queryset()
class Post(models.Model):
# objects = models.Manager()
objects = PostManager()
statuses = (
('draft','Draft'),
('published','Published'),
('active','Active')
)
id = models.BigAutoField(primary_key=True, auto_created=True)
title = models.CharField(max_length=250,unique=True, error_messages={
"unique": "The post with this title already exists, pick another",
"required": "You have to fill this field"
}, help_text="Must be unique value")
slug = models.CharField(max_length=250,null=True,blank=True)
featured = models.BooleanField(default=True)
status = models.CharField(max_length=10,choices=statuses,default=statuses[1])
body = models.TextField(blank=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True, blank=True)
author_email = models.CharField(editable=False, max_length=240, null=True,blank=True, validators=[])
publish_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=timezone.now)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args,**kwargs):
super().save(*args,**kwargs)
class Meta:
verbose_name = "Post"
verbose_name_plural = "Posts"
def __str__(self):
return smart_text(self.title)
@property
def age(self):
# now = datetime.now(timezone.utc) # works in django and without it
now = timezone.now() # works only in django
difference = now - self.publish_date
if difference <= timedelta(minutes=1):
return "just now"
return "{} ago".format(timesince(self.publish_date))
def before_save(sender, instance, *args,**kwargs):
print("before save")
if not instance.slug:
instance.slug = slugify(instance.title)
def after_save(sender, instance, created, *args,**kwargs):
print("after save")
print(instance.slug)
# if not instance.slug:
# instance.slug = slugify(instance.title)
# instance.save()
# events
pre_save.connect(before_save,sender=Post)
post_save.connect(after_save,sender=Post) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.