text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 8/23/2017 3:28 PM
# @Author : Winnichen
# @File : settings_global.py
import pytest
testcase_path="./autocase/testCase.xlsx"
browser=pytest.config.getoption('--browser').lower()
if browser not in ("chrome","ie","firefox"):
browser="chrome"
|
from .verb import Verb
from .exceptions import WorkloadExceededError
from .interpreter import Interpreter, build_node_tree
|
import RPi.GPIO as GPIO
import time
class Flash:
def __init__(self, BCM_NUM=19):
self.bcm_num = BCM_NUM
GPIO.setmode(GPIO.BCM) #GPIOへアクセスする番号をBCMの番号で指定することを宣言します。
GPIO.setup(self.bcm_num, GPIO.OUT) #BCMの{BCM_NUM}ピンを出力に追加する.j
def flash(self, COUNT):
try:
for _i in range(COUNT):
GPIO.output(self.bcm_num, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(self.bcm_num, GPIO.LOW)
time.sleep(0.1)
except KeyboardInterrupt:
GPIO.output(self.bcm_num, GPIO.LOW)
def __del__(self):
GPIO.cleanup(self.bcm_num)
if __name__ == '__main__':
blue = Flash(BCM_NUM=13)
blue.flash(15)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
MIT License
Copyright (c) [2019] [Orlin Dimitrov]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import math
class Kinematics():
# Страница 67 таблица 190
__H = 190;
# Не я знам от къде идва
# Константата трябва да се редактира!!!
__C = 20;
# Не я знам от къде идва
# Константата трябва да се редактира!!!
__LG = 30;
# Не я знам от къде идва
# Константата трябва да се редактира!!!
__R1 = 40;
# Страница 80 фигура 4.12
# Константата трябва да се редактира!!!
__L1 = 40;
# Страница 80 фигура 4.12
# Константата трябва да се редактира!!!
__L2 = 40;
def __sign(self, value):
sign = 1.0
if(value > 0):
sign = 1.0
elif(value < 0):
sign = -1.0
else:
sign = 0.0
return sign
def iverse_kinematic(self, X0, Y0, Z0, P0, R0):
RR = 0.0
LF = 0.0
RM = 0.0
GA = 0.0
AL = 0.0
if (Z0 < 0):
Z0 = 0
if (Z0 < 300 and X0 < 0):
X0 = 100
RR = math.sqrt(X0 * X0 + Y0 * Y0)
LF = 2 * self.__L1 + self.__LG
if (Z0 == self.__H):
RM = LF
elif (Z0 == 0):
RM = math.sqrt((LF * LF) - (self.__H * self.__H))
else:
RM = math.sqrt((LF * LF) - ((self.__H - Z0) * (self.__H - Z0)))
if (RR > RM):
RR = RM
P0 = P0 / self.__C
R0 = R0 / self.__C
R0 = RR - self.__LG * math.cos(P0)
Z0 = self.__H - Z0 - self.__LG * math.sin(P0)
if (R0 == 0):
GA = self.__sign(Z0) * math.PI / 2
else:
GA = math.atan(Z0 / R0)
AL = math.sqrt((R0 * R0) + (Z0 * Z0)) / 2
AL = math.atan(math.sqrt((self.__L1 * self.__L1) - (AL * AL)) / AL)
if (X0 == 0):
T1 = self.__sign(Y0) * math.PI / 2
else:
T1 = math.atan(Y0 / X0)
T2 = GA - AL
T3 = GA + AL
T4 = P0 + R0 + self.__R1 * T1
T5 = P0 - R0 - self.__R1 * T1
return (T1, T2, T3, T4, T5)
def rights_kinematic(self, T1, T2, T3, T4, T5):
RP = 0.0
PP = (T4 + T5) / 2
RR = (T4 - T5) / 2 - self.__R1 * T1
RP = self.__L1 * math.cos(T2) + self.__L2 * math.cos(T3) + self.__LG * math.cos(PP)
XX = RP * math.cos(T1)
YY = RP * math.sin(T1)
ZZ = self.self.__H - self.__L1 * math.sin(T2) - self.__L2 * math.sin(T3) - self.__LG * math.sin(PP)
PP = PP * self.__C
RR = RR * self.__C
return (XX, YY, ZZ, PP, RR)
|
from django.urls import path
from . import views
app_name='survey'
urlpatterns = [
path('', views.notice, name='notice'),
path('recognize/', views.recognize, name='recognize'),
path('temperature/', views.temperature, name='temperature'),
path('<int:student_id>/<str:temp>/survey/', views.survey, name='survey'),
path('<int:student_id>/<str:temp>/submit/', views.submit, name='submit'),
path('stream/',views.streaming, name='streaming'),
path('recognize2/',views.recognize2, name='recognize2'),
path('temperature2/',views.temperature2, name='temperature2')
]
|
from base64 import b64encode, b64decode
from json import dump, load
from os.path import isfile
from security.encryption import Encryption
class Database:
def __init__(self, filename):
self.filename = filename
self.PWs = {}
self.hashedPW = None
self.salt = None
self.crypto = Encryption()
def verifyPW(self, inputPW):
if self.salt is None:
self.crypto.verifyPW(inputPW, self.hashedPW, self.salt, isCreateInitial=True)
self.hashedPW = self.crypto.hashing(inputPW)
self.salt = self.crypto.salt
else:
self.crypto.verifyPW(inputPW, self.hashedPW, self.salt, isCreateInitial=False)
def loadFile(self):
if isfile(self.filename) is False:
print('There is no file named as ' + self.filename)
raise FileNotFoundError
with open(self.filename, 'r', encoding='utf-8') as dataFile:
temp = load(dataFile)
if 'HPW' in temp:
self.hashedPW = temp['HPW']
if 'SALT' in temp:
self.salt = b64decode(temp['SALT'])
if 'PW' in temp:
self.PWs = temp['PW']
def saveFile(self):
with open(self.filename, 'w', encoding='utf-8') as dataFile:
totalData = {'HPW': self.hashedPW, 'SALT': b64encode(self.salt).decode('utf-8'), 'PW': self.PWs}
dump(totalData, dataFile, indent=4)
def delSite(self, siteName):
site = self.findSiteName(siteName)
if site is not False:
self.PWs.pop(site)
else:
raise AttributeError
def delID(self, siteName, ID):
temp = self.find(siteName, ID)
site = temp['siteName']
i = temp['ID']
if site is not None and i is not None:
self.PWs[site].pop(i)
def addSite(self, siteName):
if self.findSiteName(siteName) is False:
encryptedSiteName = self.crypto.encryptPW(siteName)
self.PWs[encryptedSiteName] = {}
else:
raise AttributeError
def addID(self, siteName, ID):
try:
i = self.findSiteID(siteName, ID)
if i is False:
# 해당 아이디가 존재하지 않는다면 새로 저장.
self.PWs[self.findSiteName(siteName)][self.crypto.encryptPW(ID)] = ''
else:
# 존재하지 않는다면 그냥 종료
print('there is already existed id ' + ID + ' in database.')
return
except TypeError:
raise
def addPW(self, siteName, ID, PW):
temp = self.find(siteName, ID)
if temp['siteName'] is not None and temp['ID'] is not None:
self.PWs[temp['siteName']][temp['ID']] = self.crypto.encryptPW(PW)
else:
raise AttributeError
def findSiteName(self, siteName):
for i in self.PWs:
if self.crypto.decryptPW(i) == siteName:
return i
return False
def findSiteID(self, siteName, ID):
for i in self.PWs[self.findSiteName(siteName)]:
if self.crypto.decryptPW(i) == ID:
return i
return False
def find(self, siteName, ID):
if self.findSiteName(siteName) is not False:
if self.findSiteID(siteName, ID) is not False:
return {'siteName': self.findSiteName(siteName), 'ID': self.findSiteID(siteName, ID)}
else:
return {'siteName': self.findSiteName(siteName), 'ID': None}
return {'siteName': None, 'ID': None}
def getSiteList(self):
result = []
for i in self.PWs:
result.append(self.crypto.decryptPW(i))
return result
def getIDList(self, siteName):
site = self.findSiteName(siteName)
if site is not False:
result = []
for i in self.PWs[site]:
result.append(self.crypto.decryptPW(i))
return result
else:
raise AttributeError
def getPW(self, siteName, ID):
temp = self.find(siteName, ID)
site = temp['siteName']
i = temp['ID']
if site is not None and i is not None:
return self.crypto.decryptPW(self.PWs[site][i])
else:
raise AttributeError
|
import cv2
video_capture = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)640, height=(int)480, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
while True:
video_capture_result, frame = video_capture.read()
if video_capture_result == False:
raise ValueError('Error reading the frame from camera')
cv2.imshow('Input', frame)
if cv2.waitKey(1) == 27:
break
|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import redirect
from datetime import datetime
from crawler.algo import *
from crawler.crawl import *
def main_page(request):
projected_likes = 0
projected_days = 0
mean_like=0
mean_days=0
print "LLL"
# company_url = "https://graph.facebook.com/antaragni.iitk/posts/?key=value&access_token=176565146081574|9f55220446aa4c2d44560f2ebde2430b"
# crawl(company_url)
list_companies = ["antaragni","ritambhara"]
company_a = request.POST.get('company',False)
hashtag = request.POST.get('hashtag',False)
if company_a != False:
print "post successful"
nominalized_likes = nominalize(company_a,list_companies)
mean_like,mean_days = predict(nominalized_likes,hashtag,list_companies)
projected_likes = int(mean_like)
projected_days = int(mean_days)
return render(request,'index.html',{'projected_likes':projected_likes})
|
def calculate_discount(item_cost, relative_discount, absolute_discount):
""" Caluclate the sale price"""
percent = float(relative_discount * 0.01)
temp = item_cost - (item_cost * percent)
if temp <= 0:
raise ValueError("relative_discount too large")
print temp
final = temp - absolute_discount
if final <= 0:
raise ValueError("Overall Discount is too large")
return final
def main():
register_says = calculate_discount(100, 10, 30)
print "This item costs ${:.2f}".format(register_says)
if __name__ == "__main__":
main()
|
# class Pessoa:
# def _init_(self, nome, telefone):
# self.nome = nome
# self.telefone = telefone
# -------------------------------------------
# class Queue:
# def _int_(self):
# self.q = []
#
# def isEmpty(self):
# return (len(self.q)) == 0)
#
# def enqueue(self, item):
# return self.q.append(item)
#
# def dequeue(self):
# return self.q.pop(0)
class Conta:
def __init__(self, nome, numero):
self.cliente = nome
self.num = numero
self.saldo = 0.0
def Saldo(self):
return self.saldo
def getCliente(self):
return self.cliente
def Depositar(self, valor):
self.saldo += valor
conta1 = Conta('Joao', 1)
conta1.Depositar(100.0)
print(conta1.saldo)
print(conta1.getCliente())
conta2 = Conta('Maria', 2)
conta2.Depositar(200.0)
print(conta2.Saldo())
print(conta2.getCliente())
|
from classes.background import Background
import pygame
class Game:
def __init__(self):
self.clock = pygame.time.Clock()
self.playing = True
self.screen = pygame.display.set_mode((500, 500))
self.__background = Background()
def flip(self):
"""
No se que es pero hay que hacer flip xD
"""
pygame.display.flip()
def __tick(self):
"""
Configuramos los fps del juego a 60
"""
self.clock.tick(60)
def start_hearthbeat(self):
"""
Al empezar el bucle limpiamos la pantalla y demás
"""
self.__background.blit(self.screen)
def end_hearthbeat(self):
"""
Despues de cada vuelta al bucle "Latido" hacemos flip y tick
"""
self.flip()
self.__tick()
def trigger_quit_game(self):
"""
Miramos si se ha pulsado salir del juego
"""
for event in pygame.event.get():
self.playing = not (event.type == pygame.QUIT)
def quit_game(self):
"""
Quitamos el juego
"""
pygame.quit()
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
ext_modules=[ Extension("QF_utilities",
["QF_utilities.pyx"],
libraries=["m"],
extra_compile_args = ["-ffast-math"])]
setup(
name='Utilities for Quadrant-Folding',
ext_modules=ext_modules,
cmdclass = {"build_ext": build_ext},
include_dirs=[numpy.get_include()]
)
#
# from distutils.core import setup
# from Cython.Build import cythonize
# import numpy
#
# setup(
# name='Utilities for Quadrant-Folding',
# ext_modules=cythonize("QF_utilities.pyx"),
# include_dirs=[numpy.get_include()]
# )
|
# 수집한 mouse file을 시간(T)을 단위로 split
# base_data_save 이전에 수행
import ai_pred_pattern as ap
import os
import pandas as pd
# mouse filed에 client timestamp를 기준으로 T size로 file을 나눠 저장함
# file(file dictionary)
def mouse_time_split_save(file):
T = 300000 # window size
df = pd.read_csv(file['path'], engine='python')
new_filename = file['filename']+"_"
write_file_path = file['upper_dir']+'/'+new_filename
c = 0 # window 번호
start_i = 0 # winodw size로 자를 때 첫번째 인덱스
for i in range(df.shape[0]):
q = df.iloc[i]['client timestamp']//T
if(q != c or i == df.shape[0]-1):
data = df.iloc[start_i:i,:]
if(i == df.shape[0]-1):
data = df.iloc[start_i:,:]
if (i-start_i == 1):
data = pd.DataFrame(columns=['client timestamp', 'button','state','x','y'])
data.to_csv(write_file_path +str(int(c))+'.csv', header=True, index=False)
start_i = i
c = q # idle 포함할 경우, c=c+1로 설정
os.remove(file['path']) # 기존 파일 삭제
if __name__ == "__main__":
m_data_dir = './data/mouse'
user_list = os.listdir(m_data_dir)
m_files = []
for user in user_list:
m_files.extend(ap.find_files(m_data_dir, user))
for file in m_files:
mouse_time_split_save(file)
print("Done!")
|
'''
Created on Mar 5, 2013
@author: pvicente
'''
from astar_exceptions import FromCityNotFound, GoalCityNotFound
from Queue import PriorityQueue
class Cities_AStarSolver(object):
def __init__(self, city_map):
self._cities = city_map
@property
def cities(self):
return self._cities
def route(self, from_city, target_city):
'''
A-Star algorithm returns (route, cost) tuple
route: [cityname1, cityname2 ... citynamen]
cost: cost over route
'''
if self.cities[from_city] is None:
raise FromCityNotFound(from_city)
if self.cities[target_city] is None:
raise GoalCityNotFound(target_city)
from_city = self.cities[from_city]
goal_city = self.cities[target_city]
#If goal or from city have not links with other cities there isn't no route
if len(from_city.links) == 0 or len(goal_city.links) == 0:
return None, 0
#Helper functions to encapsulate common tasks
def add_open(city, f_score):
'''
Add a city to open priority queue and open_set to retrieve later based on f_score
'''
priority_open_set.put_nowait((f_score, city))
open_set.add(city)
def get_open():
'''
Retrieve city with less cost from open set priority queue and remove from open_set
'''
_, city=priority_open_set.get_nowait()
open_set.remove(city)
return city
def reconstruct_path(from_city):
'''
Returns a tuple (route, cost)
route: list of city names
cost: cost to go to from city
'''
ret = [from_city.name]
current_city = came_from.get(from_city.name)
while not current_city is None:
ret.insert(0, current_city)
current_city = came_from.get(current_city)
return ret, g_score[from_city.name]
def heuristic_cost(from_city, to_city):
'''
Heuristic function to manage path finding. Now heuristic function is the euclidean distance from -> to cities
'''
return from_city.distance(to_city)
###Initialize lookup structures
priority_open_set = PriorityQueue()
open_set = set()
closed_set = set()
came_from = {}
current_fscore = 0 + heuristic_cost(from_city, goal_city)
add_open(from_city, current_fscore)
g_score={from_city.name: 0}
f_score={from_city.name: current_fscore}
while len(open_set) > 0:
current = get_open()
if current == goal_city:
return reconstruct_path(current)
closed_set.add(current)
for neighbor in current.links:
tentative_g_score = g_score[current.name] + current.distance(neighbor)
if neighbor in closed_set:
if tentative_g_score >= g_score[neighbor.name]:
continue
if not neighbor in open_set or tentative_g_score < g_score[neighbor.name]:
came_from[neighbor.name] = current.name
g_score[neighbor.name] = tentative_g_score
current_f_score = tentative_g_score + heuristic_cost(neighbor, goal_city)
f_score[neighbor.name] = current_f_score
if not neighbor in open_set:
add_open(neighbor, current_f_score)
return None, 0
|
import time
import numpy
from smqtk.representation import DescriptorElement
# Try to import required module
try:
import solr
except ImportError:
solr = None
class SolrDescriptorElement (DescriptorElement):
"""
Descriptor element that uses a Solr instance as the backend storage medium.
Fields where data is stored in the Solr documents are specified at
construction time. We additionally set the ``id`` field to a string UUID.
``id`` is set because it is a common, required field for unique
identification of documents. The value set to the ``id`` field is
reproducible from this object's key attributes.
"""
@classmethod
def is_usable(cls):
return solr is not None
def __init__(self, type_str, uuid, solr_conn_addr,
type_field, uuid_field, vector_field, timestamp_field,
timeout=10, persistent_connection=False, commit_on_set=True):
"""
Initialize a new Solr-stored descriptor element.
:param type_str: Type of descriptor. This is usually the name of the
content descriptor that generated this vector.
:type type_str: str
:param uuid: Unique ID reference of the descriptor.
:type uuid: collections.Hashable
:param solr_conn_addr: HTTP(S) address for the Solr index to use
:type solr_conn_addr: str
:param type_field: Solr index field to store descriptor type string
value.
:type type_field: str
:param uuid_field: Solr index field to store descriptor UUID string
value in.
:type uuid_field: str
:param vector_field: Solr index field to store the descriptor vector of
floats in.
:type vector_field: str
:param timestamp_field: Solr index field to store floating-point UNIX
timestamps.
:type timestamp_field: str
:param timeout: Whether or not the Solr connection should
be persistent or not.
:type timeout: int
:param persistent_connection: Maintain a connection between Solr index
interactions.
:type persistent_connection: bool
:param commit_on_set: Immediately commit changes when a vector is set.
:type commit_on_set: bool
"""
super(SolrDescriptorElement, self).__init__(type_str, uuid)
self.type_field = type_field
self.uuid_field = uuid_field
self.vector_field = vector_field
self.timestamp_field = timestamp_field
self.solr_conn_addr = solr_conn_addr
self.solr_timeout = timeout
self.solr_persistent_connection = persistent_connection
self.solr_commit_on_set = commit_on_set
self.solr = self._make_solr_inst()
def __getstate__(self):
state = super(SolrDescriptorElement, self).__getstate__()
state.update({
"type_field": self.type_field,
"uuid_field": self.uuid_field,
"vector_field": self.vector_field,
"timestamp_field": self.timestamp_field,
"solr_conn_addr": self.solr_conn_addr,
"solr_persistent_connection": self.solr_persistent_connection,
"solr_timeout": self.solr_timeout,
"solr_commit_on_set": self.solr_commit_on_set,
})
return state
def __setstate__(self, state):
# Support older version of serialization
if 'type_label' in state:
self._type_label = state['type_label']
self._uuid = state['uuid']
else:
super(SolrDescriptorElement, self).__setstate__(state)
self.type_field = state['type_field']
self.uuid_field = state['uuid_field']
self.vector_field = state['vector_field']
self.timestamp_field = state['timestamp_field']
self.solr_conn_addr = state['solr_conn_addr']
self.solr_timeout = state['solr_timeout']
self.solr_persistent_connection = state['solr_persistent_connection']
self.solr_commit_on_set = state['solr_commit_on_set']
self.solr = self._make_solr_inst()
def __repr__(self):
return super(SolrDescriptorElement, self).__repr__() + \
'[url: %s, timeout: %d, ' \
'persistent: %s]' \
% (self.solr.url, self.solr.timeout, self.solr.persistent)
def _make_solr_inst(self):
return solr.Solr(self.solr_conn_addr,
persistent=self.solr_persistent_connection,
timeout=self.solr_timeout,
# debug=True # This makes things pretty verbose
)
def _base_doc(self):
t = self.type()
suuid = str(self.uuid())
return {
'id': '-'.join([t, suuid]),
self.type_field: t,
self.uuid_field: suuid,
}
def _get_existing_doc(self):
"""
:return: An existing document dict. If there isn't one for our type/uuid
we return None.
:rtype: None | dict
"""
b_doc = self._base_doc()
r = self.solr.select("id:%s AND %s:%s AND %s:%s"
% (b_doc['id'],
self.type_field, b_doc[self.type_field],
self.uuid_field, b_doc[self.uuid_field]))
if r.numFound == 1:
return r.results[0]
else:
return None
def get_config(self):
return {
"solr_conn_addr": self.solr_conn_addr,
"type_field": self.type_field,
"uuid_field": self.uuid_field,
"vector_field": self.vector_field,
"timestamp_field": self.timestamp_field,
"timeout": self.solr_timeout,
"persistent_connection": self.solr_persistent_connection,
"commit_on_set": self.solr_commit_on_set,
}
def has_vector(self):
return bool(self._get_existing_doc())
def set_vector(self, new_vec):
"""
Set the contained vector.
If this container already stores a descriptor vector, this will
overwrite it.
:param new_vec: New vector to contain.
:type new_vec: numpy.core.multiarray.ndarray
:returns: Self.
:rtype: SolrDescriptorElement
"""
doc = self._base_doc()
doc[self.vector_field] = new_vec.tolist()
doc[self.timestamp_field] = time.time()
self.solr.add(doc, commit=self.solr_commit_on_set)
return self
def vector(self):
doc = self._get_existing_doc()
if doc is None:
return None
# Vectors stored as lists in solr doc
return numpy.array(doc[self.vector_field])
DESCRIPTOR_ELEMENT_CLASS = SolrDescriptorElement
|
from argparse import Namespace
from collections import defaultdict
from dataclasses import dataclass
from typing import List, Dict, Tuple, Callable, Union, Optional
import torch
from parseridge.corpus.corpus import CorpusIterator, Corpus
from parseridge.corpus.sentence import Sentence
from parseridge.corpus.treebank import Treebank
from parseridge.parser.configuration import Configuration
from parseridge.parser.evaluation.callbacks.base_eval_callback import EvalCallback
from parseridge.parser.evaluation.callbacks.handler import EvalCallbackHandler
from parseridge.parser.evaluation.conll_eval import CoNLLEvaluationScript
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.training.dynamic_trainer import DynamicTrainer
from parseridge.utils.helpers import T
from parseridge.utils.logger import LoggerMixin
SCORES = Dict[str, Union[float, Dict[str, Dict[str, float]]]]
@dataclass
class Evaluator(LoggerMixin):
model: Module
treebank: Treebank
callbacks: Optional[List[EvalCallback]] = None
cli_args: Optional[Namespace] = None
batch_size: int = 64
eval_function: Callable = CoNLLEvaluationScript().get_las_score_for_sentences
def __post_init__(self) -> None:
self.callback_handler = EvalCallbackHandler(callbacks=self.callbacks or [])
self.callback_handler.on_initialization(
model=self.model, treebank=self.treebank, cli_args=self.cli_args
)
def shutdown(self):
self.callback_handler.on_shutdown()
def evaluate(self, epoch: int = -1, loss: float = 0.0) -> Dict[str, Dict[str, float]]:
self.model.eval()
self.callback_handler.on_eval_begin(epoch=epoch)
train_scores = self._evaluate_corpus(
self.treebank.train_corpus, corpus_type="train"
)
dev_scores = self._evaluate_corpus(self.treebank.dev_corpus, corpus_type="dev")
test_scores = defaultdict(float)
test_scores["all"] = defaultdict(float)
if self.treebank.test_corpus:
test_scores = self._evaluate_corpus(
self.treebank.test_corpus, corpus_type="test"
)
scores = {
"train": {
"las": train_scores["las"],
"uas": train_scores["uas"],
"all": train_scores["all"],
},
"dev": {
"las": dev_scores["las"],
"uas": dev_scores["uas"],
"all": dev_scores["all"],
},
"test": {
"las": test_scores["las"] if test_scores else None,
"uas": test_scores["uas"] if test_scores else None,
"all": test_scores["all"] if test_scores else None,
},
}
self.callback_handler.on_eval_end(scores=scores, loss=loss, epoch=epoch)
return scores
def _evaluate_corpus(self, corpus: Corpus, corpus_type: str) -> SCORES:
self.callback_handler.on_epoch_begin(dataset=corpus, corpus_type=corpus_type)
gold_sentences: List[Sentence] = []
pred_sentences: List[Sentence] = []
iterator = CorpusIterator(corpus, batch_size=self.batch_size, train=False)
for i, batch in enumerate(iterator):
self.callback_handler.on_batch_begin(
batch=i, batch_data=batch, corpus_type=corpus_type
)
pred, gold = self._run_prediction_batch(batch)
pred_sentences += pred
gold_sentences += gold
self.callback_handler.on_batch_end(
batch=i,
batch_data=batch,
gold_sentences=gold,
pred_sentences=pred,
corpus_type=corpus_type,
)
serialized_gold = [
sentence.to_conllu().serialize()
for sentence in sorted(gold_sentences, key=lambda s: s.id)
]
serialized_pred = [
sentence.to_conllu().serialize()
for sentence in sorted(pred_sentences, key=lambda s: s.id)
]
scores = self.eval_function(serialized_gold, serialized_pred)
self.callback_handler.on_epoch_end(
scores=scores,
gold_sentences=gold_sentences,
pred_sentences=pred_sentences,
gold_sentences_serialized=serialized_gold,
pred_sentences_serialized=serialized_pred,
corpus_type=corpus_type,
)
return scores
def _run_prediction_batch(self, batch) -> Tuple[List[Sentence], List[Sentence]]:
pred_sentences = []
gold_sentences = []
sentence_features, sentences = batch
token_sequences = sentence_features[:, 0, :]
sentence_lengths = torch.tensor(
data=[len(sentence) for sentence in sentences],
dtype=torch.int64,
device=self.model.device,
)
contextualized_tokens_batch = self.model.get_contextualized_input(
token_sequences, sentence_lengths
)
configurations = [
Configuration(
sentence,
contextualized_input,
self.model,
sentence_features=sentence_feature,
)
for contextualized_input, sentence, sentence_feature in zip(
contextualized_tokens_batch, sentences, sentence_features
)
]
while configurations:
# Pass the stacks and buffers through the MLPs in one batch
configurations = DynamicTrainer.predict_logits(configurations, self.model)
# The actual computation of the loss must be done sequentially
for configuration in configurations:
# Predict a list of possible actions: Transitions, their
# label (if the transition is LEFT/ RIGHT_ARC) and the
# score of the action based on the MLP output.
actions = configuration.predict_actions()
if not configuration.swap_possible:
# Exclude swap options
actions = [action for action in actions if action.transition != T.SWAP]
assert actions
best_action = Configuration.get_best_action(actions)
if best_action.transition == T.SWAP:
configuration.num_swap += 1
configuration.apply_transition(best_action)
if configuration.is_terminal:
pred_sentences.append(configuration.predicted_sentence)
gold_sentences.append(configuration.sentence)
# Remove all finished configurations
configurations = [c for c in configurations if not c.is_terminal]
return pred_sentences, gold_sentences
|
# -*- coding: utf-8 -*-
import psycopg2
from model.objectView import ObjectView
class InstitutionalMail:
def persistMail(self,con,user):
if (self.findMail(con,user['id'])) == None:
params = (user['id'],)
cur = con.cursor()
cur.execute('insert into mail.users (id) values (%s)',params)
def deleteMail(self,con,id):
cur = con.cursor()
cur.execute('delete from mail.users where id = %s',(id,))
def findMail(self,con,id):
cur = con.cursor()
cur.execute('select id from mail.users where id = %s',(id,))
m = cur.fetchone()
if m:
return self.convertToDict(m)
else:
return None
def convertToDict(self,m):
userMail = {
'id':m[0]
}
return userMail
|
#coding=utf-8
from sqlalchemy import Column,Integer,String,DATETIME
from models import Model,CRUD
import datetime
from enum import Enum
class Log(Model,CRUD):
def __init__(self,usr_id,name,text,target):
self.usr_id=usr_id
self.displayName=name
self.text=text
self.target=target
__tablename__='log'
__table_args__ = {
"mysql_charset" : "utf8mb4"}
id=Column(Integer,primary_key=True,autoincrement=True)
usr_id=Column(String(100))
displayName=Column(String(100))
text=Column(String(255))
target=Column(String(255))
time=Column(DATETIME,default=datetime.datetime.now)
class StatusText(Enum):
Online="上线了"
Offline="下线了"
ChangeWorld="进入了世界"
ChangeAvatar="更换了角色"
ChangeStatus="更改了个人状态"
ChangeDescription="更改了个人描述"
OfflineText=''
if __name__ == '__main__':
#create table
from models import engine
Model.metadata.create_all(bind=engine)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from .models import *
from django.contrib.auth.forms import UserCreationForm
class BusquedaForm(forms.Form):
e = forms.CharField(label='e', max_length=20)
class RegistroForm(UserCreationForm):
class Meta:
model = usuarios
fields = [
'username',
'first_name',
'last_name',
'email',
'grado',
]
labels = {
'username': 'Nombre de usuario',
'first_name': 'Nombre',
'last_name': 'Apellido',
'email': "Correo electronico",
'grado': "grado",
}
class EditUsu(forms.ModelForm):
class Meta:
model = usuarios
exclude = ['username',]
fields = [
'first_name',
'last_name',
'email',
'grado',
]
labels = {
'first_name': "Nombre",
'last_name': "Apellido",
'email': "Correo electronico",
'grado': 'grado',
}
class NuevaEmpresaform(forms.ModelForm):
class Meta:
model = empresas
fields = ('nombre_empresa',
'departamento',
'ubicacion',
'tutor',
)
labels = {
'nombre_empresa': 'Nombre de la empresa',
'departamento' : 'Departamento',
'ubicacion': 'Ubicacion',
'tutor': 'Tutor',
}
class PreguntasFundamentalesForm(forms.Form):
OpcionesPf1 = (
('Programacion Web','Programacion web'),
('Redes','Redes'),
('Big Data','Big Data'),
('Programacion de escritorio','Programacion de escritorio'),
('Gestion','Gestion'),
('Economia','Economia'),
('Electronica','Electronica'),
)
pf1 = forms.MultipleChoiceField(label = '*Temáticas',
widget = forms.CheckboxSelectMultiple,
choices=OpcionesPf1,
)
pf11 = forms.CharField(label = 'Otras tematicas', initial="Ninguna")
OpcionesPf2 = (
('Transporte','Transporte'),
('Economica','Economica'),
('Bono comida','Bono comida'),
('Nada','Nada'),
)
pf2 = forms.MultipleChoiceField(label = '*Ayudas',
widget = forms.CheckboxSelectMultiple,
choices=OpcionesPf2
)
pf22 = forms.CharField(label = 'Otras ayudas', initial="Ninguna")
OpcionesPf3 = [(1 , 'Muy mala'),(2 , 'Mala'),(3 , 'Normal'),(4 , 'Buena' ), (5 , 'Muy buena')]
nota = forms.ChoiceField(label = '*Opinión general sobre la empresa',
choices = OpcionesPf3,
widget = forms.RadioSelect(),
)
class PreguntasBasicasForm(forms.Form):
OpcionesPb1 = [('Si','Si'),('No','No')]
pb1 = forms.ChoiceField(label = 'Practicas remuneradas',
choices = OpcionesPb1,
widget = forms.RadioSelect(),
required=False)
OpcionesPb2 = [('Si','Si'),('No','No'),('Tal vez','Tal vez')]
pb2 = forms.ChoiceField(label = 'Posibilidad de posterior contratacion',
choices = OpcionesPb2,
widget = forms.RadioSelect(),
required=False)
OpcionesPb3= [('Malo','Malo'),('Normal','Normal'),('Bueno','Bueno')]
pb3 = forms.ChoiceField(label = 'Relacion personal y laboral con el tutor',
choices = OpcionesPb3,
widget = forms.RadioSelect(),
required=False)
pb4 = forms.CharField(label = 'Requisitos previos',
required=False)
class PreguntasOpcionalesForm(forms.Form):
OpcionesPs1= [('Grupal','Grupal'),('Individual','Individual')]
ps1 = forms.ChoiceField(label = 'Forma de trabajo',
choices = OpcionesPs1,
widget = forms.RadioSelect(),
required=False)
OpcionesPs2 = [('Jornada completa (40h)','Jornada completa (40h)'),
('Media jornada (20h)','Media jornada (20h)'),
('Jornada intensiva (35h)','Jornada intensiva (35h)')]
ps2 = forms.ChoiceField(label = 'Tipo de jornada',
choices = OpcionesPs2,
widget = forms.RadioSelect(),
required=False)
OpcionesPs3 = [('Si','Si'),('No','No')]
ps3 = forms.ChoiceField(label = 'Flexibilidad de la jornada',
choices = OpcionesPs3,
widget = forms.RadioSelect(),
required=False)
OpcionesPs4 = [('Si','Si'),('No','No')]
ps4 = forms.ChoiceField(label = 'Periodo Vacacional',
choices = OpcionesPs4,
widget = forms.RadioSelect(),
required=False)
OpcionesPs5 = [('6 meses','6 meses'),('3 meses','3 meses')]
ps5 = forms.ChoiceField(label = 'Duracion de la beca',
choices = OpcionesPs5,
widget = forms.RadioSelect(),
required=False)
OpcionesPs6 = [('Malo','Malo'),('Normal','Normal'),('Bueno','Bueno')]
ps6 = forms.ChoiceField(label = 'Ambiente laboral',
choices = OpcionesPs6,
widget = forms.RadioSelect(),
required=False)
ps7 = forms.CharField(label = 'Descripcion breve del contenido de la beca',
required=False)
class PreguntasAnecdoticasForm(forms.Form):
pa1 = forms.CharField(label = 'Conocimientos adquiridos que anadiras a tu CV',
required=False)
pa2 = forms.CharField(label = 'Comentarios y recomendaciones adicionales',
required=False)
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: xyliao1993@qq.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import torch.nn.functional as F
from torch import nn
from .resnet import ResNet
class ResNetBuilder(nn.Module):
in_planes = 2048
def __init__(self, last_stride=1, model_path='/DATA/model_zoo/resnet50-19c8e357.pth'):
super().__init__()
self.base = ResNet(last_stride)
self.base.load_param(model_path)
def forward(self, x):
global_feat = self.base(x)
global_feat = F.avg_pool2d(global_feat, global_feat.shape[2:]) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1)
return global_feat
def get_optim_policy(self):
base_param_group = self.base.parameters()
return [
{'params': base_param_group}
]
|
from django.shortcuts import render
# Create your views here.
# def test_view(request):
# return HttpResponse("I am a very nice view!")
# def homepage(request):
# return HttpResponse("Welcome to the homepage")
def test_view(request):
return render(request, 'test.html')
|
#!/usr/local/bin/python2.7
# encoding: utf-8
'''
rundaemon -- starts GPIO server
rundaemon is a server exposing GPIOs via HTTP-REST and XMPP
@author: h0ru5
@copyright: 2013. All rights reserved.
@license: Apache 2.0
@contact: johannes.hund@gmail.com
@deffield updated: Updated
'''
import sys
import os
import logging
import thread
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
from rasp.xmpp.GpioClient import GpioClient
from rasp.http import GpioResource
__all__ = []
__version__ = 0.1
__date__ = '2013-09-15'
__updated__ = '2013-09-15'
DEBUG = 1
TESTRUN = 1
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by user_name on %s.
Copyright 2013 organization_name. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-P", "--port", help="set port for rest access [default: ]", metavar="PORT_NUMBER",type=int)
parser.add_argument("-j", "--jid", dest="jid", help="set jid to connect xmpp", metavar="JID")
parser.add_argument("-p", "--pass", dest="passwd", help="set password to connect xmpp")
parser.add_argument("-s", "--service", dest="host", help="set host to connect xmpp")
parser.add_argument("--no-rest", dest="norest", action="store_true", help="disable rest binding")
parser.add_argument("--no-xmpp", dest="noxmpp", action="store_true", help="disable xmpp binding")
parser.add_argument("-v", "--verbose", dest="verbose", action="count",help="set verbosity level")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
# set defaults
parser.set_defaults(verbose=0,port=8080)
# Process arguments
args = parser.parse_args()
#paths = args.paths
port = args.port
jid = args.jid
passwd=args.passwd
host = args.host
if(not host):
host = jid.split("@").pop()
if args.verbose > 0:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Verbose mode on")
else:
logging.basicConfig(level=logging.INFO)
#create and init GPIOs
logging.info("initializing GPIOs")
if(not args.noxmpp):
#startup xmpp backend
logging.info("starting up XMPP frontend using jid %s to connect to server %s" % (jid, host))
xmpp = GpioClient(jid,passwd)
thread.start_new_thread(xmpp.start,())
if(not args.norest):
#startup web backend
logging.info("starting up HTTP-REST frontend using port %s" % port)
GpioResource.start(port)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception, e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
if DEBUG:
#sys.argv.append("-h")
sys.argv.append("-v")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'rundaemon_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
import pymysql
from faker import Faker
import random
fake = Faker()
conn = pymysql.connect(
host = "localhost",
database="practice",
user= "root",
password=""
)
cursor = conn.cursor()
# SET sql_mode = '';
## If you are getting the gourpby error ! a
query1 = """create table Customer (
custid int primary key auto_increment,
name varchar(50) not null,
gender varchar(1) not null,
phone varchar(10) not null,
address varchar(100) not null default "Pune"
);
"""
query2 = '''create table Orders(
orderid int primary key auto_increment,
ordername varchar(50) not null,
quantity int,
custid int,
foreign key(custid) references Customer(custid)
);
'''
try:
cursor.execute(query1)
cursor.execute(query2)
print("Query executed ! [Table Created]")
except pymysql.err.OperationalError :
print("Table already exists")
ls = [0,1,2,3,4,5,6,7,8,9]
def makenum():
s = ""
for _ in ls:
s += str(random.choice(ls))
return s
for i in range(200) :
name = fake.name()
gender = random.choice(['M','F'])
phone = makenum()
addres = fake.city()
query = f'insert into Customer(name, gender, phone, address) values ( "{name}", "{gender}", "{phone}", "{addres}");'
cursor.execute(query)
conn.commit()
orderlist = ["Bakery" , "Bread",
"Meat" , "Seafood",
"Pasta" , "Rice",
"Oils", "Sauces", "Salad", "Dressings", "Condiments",
"Cereals" , "Breakfast Foods",
"Soups" , "Canned Goods",
"Frozen Foods",
"Dairy", "Cheese", "Eggs" , "coffee/tea",
"juice", "soda", "sandwich loaves", "dinner rolls", "tortillas bagels",
"vegetables", "spaghetti sauce", "ketchup",
"cheeses", "eggs", "milk", "yogurt", "butter",
"cereals", "flour", "sugar", "pasta", "mixes",
"waffles", "vegetables", "individual meals", "ice cream",
"lunch meat", "poultry", "beef", "pork",
"paper towels", "toilet paper", "aluminum foil", "sandwich bags",
"baby items", "pet items", "batteries", "greeting cards"
]
for i in range(180) :
ordername = random.choice(orderlist)
quantity = random.randint(1,15)
custid = random.randint(1,160)
query = f'insert into Orders(Ordername, Quantity, custid) values ( "{ordername}", "{quantity}", "{custid}");'
cursor.execute(query)
conn.commit()
|
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.http import Http404
from django.db.models import Q
from .models import ProductType, Product, Category
class ProductSearchListView(ListView):
model = Product
template_name = 'products/product-list.html'
def get_context_data(self, *args, **kwargs):
context = super(ProductSearchListView, self).get_context_data(*args, **kwargs)
qs = self.request.GET.get('q', None)
prod = Product.objects.search(qs)
if qs:
context['products'] = prod
return context
class ProductDetailView(DetailView):
model = Product
template_name = 'products/detail.html'
def get_object(self, *args, **kwargs):
slug = self.kwargs.get('slug')
obj = Product.objects.get_product_by_slug(slug)
if obj:
return obj
else:
raise Http404
def get_context_data(self, *args, **kwargs):
context = super(ProductDetailView, self).get_context_data(*args, **kwargs)
prod = self.get_object()
context['object'] = prod
return context
class ProductListView(ListView):
model = Product
template_name = 'products/product-list.html'
def get_queryset(self, *args, **kwargs):
product_class = self.kwargs.get('prod_class', None)
sex = self.kwargs.get('sex', None)
print(self.kwargs)
if sex and product_class:
queryset = Product.objects.filter(
Q(product_class__name__iexact=product_class) &
Q(product_class__sex__iexact=sex)
)
if not sex and product_class:
queryset = Product.objects.filter(
Q(product_class__name__iexact=product_class)
)
elif sex and not product_class:
queryset = Product.objects.filter(
Q(product_class__sex__iexact=sex)
)
else:
queryset = super(ProductListView, self).get_queryset(*args, **kwargs)
return queryset
def get_context_data(self, *args, **kwargs):
context = super(ProductListView, self).get_context_data(*args, **kwargs)
qs = self.get_queryset()
cat = self.kwargs.get('category')
if cat:
cat = Category.objects.get(name=cat)
qs = qs.filter(category=cat)
context['products'] = qs
return context
def product_by_timestamp(request):
qs = Product.objects.sort_product_by_timestamp()
context = {'products': qs}
return render(request, 'products/product-list.html', context)
|
#!/usr/bin/env python
import sys
import parmed as pmd
d = pmd.load_file(sys.argv[1])
for key in d:
res0 = d[key]
c0 = [a.charge for a in res0.atoms]
#print(res0, c0, sum(c0))
print(res0, sum(c0))
|
import xlrd
import xlwt
import re
from xlutils import copy
book = xlrd.open_workbook('改(补).xlsx')
wbook = copy.copy(book)
table = book.sheet_by_index(0)
wtable = wbook.get_sheet(0)
def run():
list = ['未见', '未见异常', '未见明显异常']
nrow = table.nrows
for i in range(1, nrow):
data = table.cell(i, 7).value
# print(data)
data = data.replace('超声检查报告超声所见:', '', 100)
data = data.split('。')
# print(data)
for o in list:
data = [k for k in data if o not in k and k != '']
m = "".join(data)
# print(m)
wtable.write(i, 7, m)
for x in data:
a = re.compile(u'\d+mmx\d+mm').findall(x)
b = re.compile(u'\d+\.\d+mmx\d+\.\d+mm').findall(x)
s_list = []
if len(a) != 0 and len(b) == 0:
# print(str(x).split(':')[0])
# print(' '.join(a))
s1 = str(x).split(':')[0] + ":" + ' '.join(a) + '\n'
s_list.append(s1)
wtable.write(i, 9, ' '.join(s_list))
if len(b) != 0 and len(a) == 0:
# print(str(x).split(':')[0])
# print(''.join(b))
s2 = str(x).split(':')[0] + ":" + ' '.join(b) + '\n'
s_list.append(s2)
wtable.write(i, 9, ' '.join(s_list))
if len(a) != 0 and len(b) != 0:
s1 = str(x).split(':')[0] + ":" + ' '.join(a) + '\n'
s_list.append(s1)
s2 = str(x).split(':')[0] + ":" + ' '.join(b) + '\n'
s_list.append(s2)
wtable.write(i, 9, ' '.join(s_list))
run()
wbook.save('改改改.xlsx')
|
X = int(input())
N = int(input())
leftover = 0
for OUTER in range(N):
leftover += X
leftover -= int(input())
print(leftover+X)
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Profile
from .forms import SignupForm, ChangePasswordForm, EditProfileForm, LoginForm
from django.contrib.auth.models import User
from posts.models import Post, Follow, Stream
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.urls import resolve, reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.core.paginator import Paginator
from django.contrib.auth import authenticate, login
from django.contrib import messages
def login_request(request):
if request.method == 'POST':
form = LoginForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}")
return redirect('/')
else:
messages.error(request,"Invalid username or password." )
else:
messages.error(request,"Invalid username or password.")
form = LoginForm()
context = {
'form':form
}
return render(request, 'authy/login.html', context)
def userprofile(request, username):
user = get_object_or_404(User, username=username)
profile = Profile.objects.get(user=user)
url_name = resolve(request.path).url_name
if url_name == 'profile':
posts = Post.objects.filter(user=user).order_by('-posted')
else:
posts = profile.favorites.all()
# posts_count = Post.objects.filter(user=user).count()
# following_count = Follow.objects.filter(following=user).count()
# followwers_count = Follow.objects.filter(following=user).count()
follow_status= Follow.objects.filter(following=user, follower=request.user).exists()
context = {
'profile':profile,
'posts':posts,
'follow_status':follow_status
}
return render(request, 'authy/profile.html', context)
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
User.objects.create_user(username=username,email=email, password=password)
return redirect('login')
else:
form = SignupForm()
context = {
'form': form,
}
return render(request, 'authy/signup.html', context)
def passwordchange(request):
user = request.user
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data.get('new_password')
user.set_password(new_password)
user.save()
update_session_auth_hash(request, user)
return redirect('change_password_done')
else:
form = ChangePasswordForm(instance=user)
context = {
'form': form
}
return render(request, 'authy/change_password.html, context')
def passwordChangeDone(request):
return render(request, 'change_password_done.html')
@login_required
def editProfile(request):
user = request.user
profile = Profile.objects.get(user=user)
Base_Width = 400
if request.method == 'POST':
form = EditProfileForm(request.POST, request.FILES)
if form.is_valid():
profile.picture = form.cleaned_data.get('picture')
profile.first_name = form.cleaned_data.get('first_name')
profile.last_name = form.cleaned_data.get('last_name')
profile.location = form.cleaned_data.get('location')
profile.url = form.cleaned_data.get('url')
profile.profile_info = form.cleaned_data.get('profile_info')
profile.save()
return redirect('index')
else:
form = EditProfileForm(instance=profile)
context = {
'form':form
}
return render(request, 'authy/edit_profile.html', context)
@login_required
def follow(request, username, option):
following = get_object_or_404(User, username=username)
try:
f, created = Follow.objects.get_or_create(follower=request.user, following=following)
if int(option) == 0:
f.delete()
Stream.objects.filter(following=following, user=request.user).all().delete()
else:
posts = Post.objects.all().filter(user=following)[:25]
with transaction.atomic():
for post in posts:
stream = Stream(post=post, user=request.user, date=post.posted, following=following)
stream.save()
return HttpResponseRedirect(reverse('profile', args=[username]))
except User.DoesNotExist:
return HttpResponseRedirect(reverse('profile', args=[username]))
|
import os
def main():
infile = open('/Users/Python/Desktop/mypython/mypython-4/employees2.txt','r')
outfile = open('/Users/Python/Desktop/mypython/mypython-4/behidemidterm/file/temp.txt','w')
oldname = input('Enter old name : ')
newname = input('Enter new name : ')
for line in infile :
rec = (line.rsplit('|'))
if rec[0] == oldname :
outfile.write(newname+'|'+rec[1]+'|'+rec[2])
else:
outfile.write(line)
infile.close()
outfile.close()
main()
|
# coding: utf-8
# # Problem 4
#
# This problem consists of a single exercise worth ten (10) points.
#
# **Exercise 0** (10 points). Complete the function `flatten(L)`, which takes a "complex list," `L`, as input and returns a "flat" copy.
#
# By complex, we mean that the input list `L` consists of **arbitrarily nested lists of strings and integers**. For instance, here is a complex list:
#
# ```python
# L = [['a', ['cat'], 2],[[[3]], 'dog'], 4, 5]
# ```
#
# Observe that there are strings and integers, but that these may be embedded within lists inside lists inside lists...
#
# Given such a list, your computational task is to extract all the strings and integers, and return them in a single flattened list. For example:
#
# ```python
# assert flatten(L) == ['a', 'cat', 2, 3, 'dog', 4, 5]
# ```
#
# In your flattened version, the strings and integers must appear in the same "left-to-right" order as they do in the original list. For instance, if you strip out all the square brackets in the definition of `L` above, then observe that `'cat'` appears to the right of `'a'`, and `2` appears to the right of `'cat'`, and `3` appears to the right of `2`, etc.
#
# > Hint: One reasonable approach to this problem is to use _recursion_, that is, the idea of a function that calls itself to solve subproblems. See "Recursive programs" at the Wikipedia page on [Recursion as it is used in computer science](https://en.wikipedia.org/wiki/Recursion_%28computer_science%29#Recursive_programs).
# In[21]:
def flatten(L):
assert type(L) is list
flatL = []
for i in L:
if type(i) is not list:
flatL += [i]
else:
flatL += flatten(i)
return flatL
# In[15]:
# Test cell: test_flatten (10 points)
L = [['a',['cat'],2],[[[3]],'dog'],4,5]
FL = flatten(L)
True_L = ['a', 'cat', 2, 3, 'dog', 4, 5]
print("Your result: \n{}".format(FL))
print('True result: \n{}'.format(True_L))
assert type(FL) is list and FL == ['a', 'cat', 2, 3, 'dog', 4, 5]
print("\n")
L = [[1,[['b'],2],'t',[[3]],'snow'],'x',['hat',7]]
FL = flatten(L)
True_L = [1, 'b', 2, 't', 3, 'snow', 'x', 'hat', 7]
print("Your result: \n{}".format(FL))
print('True result: \n{}'.format(True_L))
assert type(FL) is list and FL == [1, 'b', 2, 't', 3, 'snow', 'x', 'hat', 7]
print("\n")
L = ['x',1,'z']
FL = flatten(L)
True_L = ['x',1,'z']
print("Your result: \n{}".format(FL))
print('True result: \n{}'.format(True_L))
assert type(FL) is list and FL == ['x',1,'z']
print("\n")
L = []
FL = flatten(L)
True_L = []
print("Your result: \n{}".format(FL))
print('True result: \n{}'.format(True_L))
assert type(FL) is list and FL == []
print("\n(Passed!)")
# **Fin!** You've reached the end of this problem. Don't forget to restart the kernel and run the entire notebook from top-to-bottom to make sure you did everything correctly. If that is working, try submitting this problem. (Recall that you *must* submit and pass the autograder to get credit for your work!)
|
import random
import pickle
import pymongo
##############################################################
def get_gabra_word_groups():
'''
Create a list of words obtained from a loaded Gabra MongoDB database and group them by lemma. Caches result into a pickle to avoid using the MongoDB database again.
If you already have gabra.pkl available then you do not need to load the MongoDB database.
Word groups list consists of the following tuples:
[
(
lemma e.g. "kiser",
root e.g. "k-s-r",
wordforms e.g. [ "ksirt", "kiser", "kisret", ... ]
),
...
]
'''
try:
with open("gabra.pkl", "rb") as f:
return pickle.load(f)
except:
pass
#To create a MongoDB instance with the Gabra dump:
#download tar file from http://mlrs.research.um.edu.mt/resources/gabra-api/download and extract it into a folder X
#in X create a folder called "data" next to "tmp"
#open a cmd, change directory to X and load a mongodb instance using mongod --dbpath data
#open another cmd, change directory to X\tmp and restore the dump to the database in "data" using mongorestore -d gabra --port 27017 gabra
db = pymongo.MongoClient()
invalid_vowel_pairs = { x+y for x in "aeiou" for y in "aeiou" } - { "ie", "oe", "ea", "ao", "oa", "eo" }
is_valid_word = lambda word:not any(word[i:i+2] in invalid_vowel_pairs for i in range(len(word)-1)) and word.islower() and word.isalpha()
is_valid_lexeme_doc = lambda lexeme:"lemma" in lexeme and not ("pending" in lexeme and lexeme["pending"]) and is_valid_word(lexeme["lemma"])
added_roots = set()
word_groups = []
for lexeme in db["gabra"]["lexemes"].find():
if not is_valid_lexeme_doc(lexeme):
continue
lexeme_id = lexeme["_id"]
lemma = lexeme["lemma"]
if "root" in lexeme and lexeme["root"] is not None and "radicals" in lexeme["root"]:
root = lexeme["root"]["radicals"]
if root in added_roots:
continue
else:
added_roots.add(root)
alternative_lemmas = { #all lemmas with same root
(alt_lexeme["_id"], alt_lexeme["lemma"])
for alt_lexeme in db["gabra"]["lexemes"].find({"root.radicals":root})
if is_valid_lexeme_doc(alt_lexeme)
}
(lexeme_id, lemma) = min(alternative_lemmas, key=lambda x:len(x[1])) #use shortest lemma of alternatives to represent all lemmas
wordforms = { #unify all word forms of all alternative lemmas
wordform["surface_form"]
for (alt_lexeme_id, alt_lemma) in alternative_lemmas
for wordform in db["gabra"]["wordforms"].find({"lexeme_id":alt_lexeme_id})
if is_valid_word(wordform["surface_form"])
}
else:
root = ""
wordforms = { #get all word forms of lemma
wordform["surface_form"]
for wordform in db["gabra"]["wordforms"].find({"lexeme_id":lexeme_id})
if is_valid_word(wordform["surface_form"])
}
if len(wordforms) < 3:
continue
word_groups.append((lemma, root, sorted(wordforms)))
word_groups.sort()
with open("gabra.pkl", "wb") as f:
pickle.dump(word_groups, f)
return word_groups
##############################################################
def create_raw_trainingset():
'''
Generate random (based on seed) sample of word groups and split them into two equal halves of 100 groups each for two separate text files called "trainingset1.txt" and "trainingset2.txt".
The idea is to have a text file of words which can be manually split into stems and affixes using a text editor.
The two files are used for training and validation.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
with open("trainingset%s.txt"%(i+1,), "w", encoding="utf-8") as f:
for (lemma, root, wordforms) in selected_word_groups:
for word in wordforms:
print(word, file=f)
print("", file=f)
##############################################################
def get_trainingset_roots():
'''
Take the training sets generated by the previous function and display their roots in order to help decide where the segmentation should be applied.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
print("trainingset%s.txt"%(i+1,))
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
for (lemma, root, wordforms) in selected_word_groups:
print(lemma, root)
print()
##############################################################
def validate_trainingset():
'''
Validate the manually segmented words in trainingset1.txt and trainingset2.txt.
The following validations are applied:
Check that, ignoring segmentation, the files still contain the same words generated in create_raw_trainingset().
Check that there are exactly 3 segments (separated by a "-") in each word.
Check that the stem (middle segment) does not end in a vowel as it was observed that no stem in Maltese ends in a vowel.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
print("trainingset%s.txt"%(i+1,))
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
originals = [ word for (lemma, root, wordforms) in selected_word_groups for word in wordforms+[ "" ] ]
with open("trainingset%s.txt"%(i+1,), "r", encoding="utf-8") as f:
for (line, original) in zip(f, originals):
line = line.strip("\r\n")
if line.replace("-", "") != original:
print("corrupted word", line, "should be", original)
break #break as a corrupted word might be caused by a missing or extra line which would shift all following words making them all appear corrupted.
elif line != "":
if line.count("-") != 2:
print("segments", line)
else:
(prefix, stem, suffix) = line.split("-")
if stem[-1] in { "a", "e", "i", "o", "u" }:
print("vowel", line)
print()
##############################################################
#obtain word groups from MongoDB database or cached gabra.pkl
word_groups = get_gabra_word_groups()
seed = 1
#uncomment the function call you want to execute
#create_raw_trainingset()
#get_trainingset_roots()
validate_trainingset()
|
"""
Enter a number and have the program generate PI up to that many decimal places.
Keep a limit to how far the program will go
"""
from math import pi
print('value of pi: ' )
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/23 21:21
# @Author : J
# @File : 绘图功能.py
# @Software: PyCharm
import numpy as np
import cv2 as cv
img = np.zeros((512,512,3),np.uint8)
# np.zeros()有两个参数,一个是创建的图片矩阵大小,另一个是数据类型
# 512,512是像素(第一个512像素高,第二个是512像素宽),3指BGR三种颜色
# uint8是用0-255表示所有颜色。
cv.line(img,(0,0),(511,511),(0,255,0),5)
#这个函数有5个参数,img是图像名称,起点坐标,终点坐标,(255,0,0)是蓝色,5是线的宽度
cv.rectangle(img,(384,0),(510,128),(0,255,0),3)
cv.circle(img,(447,63),63,(0,0,255),-1)
#这个函数有5个参数,图像名称,圆心坐标,半径63,(0,0,255)红色,线宽为-1,当线宽-1时表示封闭图形的颜色填充。
cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
#椭圆 这个函数有8个参数:图像名称,中心点坐标,长轴长度,短轴长度,旋转角度,
#图像出现的部分(长轴顺时针方向起始的角度和结束角度)0,180是下半个椭圆,颜色数组这里255是蓝色,线宽
pts = np.array([[10,5],[20,30],[70,20],[50,10]],np.int32) #多边形
pts = pts.reshape((-1,1,2))
cv.polylines(img,[pts],True,(0,255,255)) #True表示闭合,(0,255,255)是黄色,3是线宽
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img,"OpenCV",(10,500),font,4,(255,255,255),2,cv.LINE_AA)
#这个函数有八个参数,图像名称,字符串,坐标,字体,字号,(255,255,255)白色、线宽2
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
|
import os
import random
import time
def clear_screen():
os.system('cls')
os.system('clear')
def display_page(self):
clear_screen()
print(self.copy)
print('')
if self.end_program == True:
exit()
choice = ''
while choice not in self.answers:
question = self.question
choice = raw_input(question,).lower()
next_page = self.answers.index(choice)
display_page(globals()[self.destinations[next_page]])
class page:
copy = ''
question = ''
answers = ''
destinations = ''
end_program = ''
intro = page()
intro.copy = '''
The sky above the grain elevator is the color of cable television tuned to a dead channel. It's
spring, and the open farming country has blossomed into low green fields spreading out to the
horizon. The hard blue of the sky is dazzling, and your eyes sting and water in the sun.
But the warm, wet wind rolling down the highway is welcome after a long winter of dust and cold.
The sky is empty and the fields are empty and for miles in both directions the road is an empty
ribbon of concrete. %ss have always been bad for the family business. And %s was always your least
favorite month.
sometimes you don't know why you have gasoline and soda shipped to far out to nowhere.
"Hello world," you say to the rolling green fields. It's spring, and the open farming country has
gone from dull grey dead and brown to living and blooming under a warm sun.
The
bright, winter sky is a hard blue that hurts to look at. ''' % (time.strftime("%A"),time.strftime("%B"))
intro.question = "Do you want to REPEAT this page or END program? "
intro.answers = "repeat end".split()
intro.destinations = "intro exit_program".split()
#Purple haze slides across a digital domain.
display_page(intro)
|
#!/usr/bin/env python3
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from Controllers.ProbeController import api_routes
app = Flask(__name__)
app.config.from_pyfile('config.py')
db = SQLAlchemy(app)
app.register_blueprint(api_routes, url_prefix='/api')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 00:44:25 2021
@author: chakati
"""
import cv2
import os
import tensorflow as tf
import frameextractor as fe
import handshape_feature_extractor as hfe
import csv
import re as regex
class GestureDetail:
def __init__(self, gesture_key, gesture_name, output_label):
self.gesture_key = gesture_key
self.gesture_name = gesture_name
self.output_label = output_label
class GestureFeature:
def __init__(self, gesture_detail: GestureDetail, extracted_feature):
self.gesture_detail = gesture_detail
self.extracted_feature = extracted_feature
def extract_feature(location, input_file, mid_frame_counter):
middle_image = cv2.imread(fe.frameExtractor(location + input_file, location + "frames/", mid_frame_counter),
cv2.IMREAD_GRAYSCALE)
response = hfe.HandShapeFeatureExtractor.extract_feature(hfe.HandShapeFeatureExtractor.get_instance(),
middle_image)
return response
def decide_gesture_by_file_name(gesture_file_name):
for x in gesture_details:
if x.gesture_key == gesture_file_name.split('_')[0]:
return x
return None
def decide_gesture_by_name(lookup_gesture_name):
for x in gesture_details:
if x.gesture_name.replace(" ", "").lower() == lookup_gesture_name.lower():
return x
return None
def validate_mutate_recognition(gesture_file_name, extracted_feature_vector, calc_gesture_detail: GestureDetail):
actual_gesture = regex.search('-H-(.*?).mp4', gesture_file_name)
if actual_gesture is None:
actual_gesture = gesture_file_name.split('_')[0]
add_to_vector = False
else:
actual_gesture = actual_gesture.group(1)
add_to_vector = True
if calc_gesture_detail.gesture_name == actual_gesture or calc_gesture_detail.gesture_key == actual_gesture:
if add_to_vector:
featureVectorList.append(GestureFeature(calc_gesture_detail, extracted_feature_vector))
else:
print("mutating vector set for gesture: " + actual_gesture + " for gesture file: " + gesture_file_name)
actual_gesture_detail = decide_gesture_by_name(actual_gesture)
if actual_gesture_detail is not None:
featureVectorList.append(GestureFeature(actual_gesture_detail, extracted_feature_vector))
else:
print(
"Gesture detail not decoded for gesture: " + actual_gesture + " for gesture file: " + gesture_file_name)
return True
return False
# =============================================================================
# Recognize the gesture (use cosine similarity for comparing the vectors)
# =============================================================================
def determine_gesture(gesture_location, gesture_file_name, mid_frame_counter):
video_feature = extract_feature(gesture_location, gesture_file_name, mid_frame_counter)
re_run = True
max_mutations = 0
gesture_detail: GestureDetail = GestureDetail("", "", "")
while re_run and max_mutations < 5:
cos_sin = 1
position = 0
cursor = 0
for featureVector in featureVectorList:
calc_cos_sin = tf.keras.losses.cosine_similarity(
video_feature,
featureVector.extracted_feature,
axis=-1
)
if calc_cos_sin < cos_sin:
cos_sin = calc_cos_sin
position = cursor
cursor = cursor + 1
gesture_detail = featureVectorList[position].gesture_detail
print(gesture_file_name + " calculated gesture " + gesture_detail.gesture_name)
# re_run = validate_mutate_recognition(gesture_file_name, video_feature, gesture_detail)
re_run = False
if re_run:
max_mutations = max_mutations + 1
return gesture_detail
gesture_details = [GestureDetail("Num0", "0", "0"), GestureDetail("Num1", "1", "1"),
GestureDetail("Num2", "2", "2"), GestureDetail("Num3", "3", "3"),
GestureDetail("Num4", "4", "4"), GestureDetail("Num5", "5", "5"),
GestureDetail("Num6", "6", "6"), GestureDetail("Num7", "7", "7"),
GestureDetail("Num8", "8", "8"), GestureDetail("Num9", "9", "9"),
GestureDetail("FanDown", "Decrease Fan Speed", "10"),
GestureDetail("FanOn", "FanOn", "11"), GestureDetail("FanOff", "FanOff", "12"),
GestureDetail("FanUp", "Increase Fan Speed", "13"),
GestureDetail("LightOff", "LightOff", "14"), GestureDetail("LightOn", "LightOn", "15"),
GestureDetail("SetThermo", "SetThermo", "16")
]
# =============================================================================
# Get the penultimate layer for training data
# =============================================================================
featureVectorList = []
path_to_train_data = "traindata/"
count = 0
for file in os.listdir(path_to_train_data):
if not file.startswith('.') and not file.startswith('frames') and not file.startswith('results'):
featureVectorList.append(GestureFeature(decide_gesture_by_file_name(file),
extract_feature(path_to_train_data, file, count)))
count = count + 1
# =============================================================================
# Get the penultimate layer for test data
# =============================================================================
video_locations = ["test/"]
test_count = 0
for video_location in video_locations:
with open('results.csv', 'w', newline='') as results_file:
fieldnames = [
'Gesture_Video_File_Name', 'Gesture_Name',
'Output_Label']
train_data_writer = csv.DictWriter(results_file, fieldnames=fieldnames)
train_data_writer.writeheader()
for test_file in os.listdir(video_location):
if not test_file.startswith('.') and not test_file.startswith('frames') \
and not test_file.startswith('results'):
recognized_gesture_detail = determine_gesture(video_location, test_file, test_count)
test_count = test_count + 1
train_data_writer.writerow({
'Gesture_Video_File_Name': test_file,
'Gesture_Name': recognized_gesture_detail.gesture_name,
'Output_Label': recognized_gesture_detail.output_label})
|
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
# ----------------------------------------------------------------------
#def ctp_binning(cls):
# intype = type(cls)
# if intype.upper() == "STRING":
# ----------------------------------------------------------------------
def bp_binning( cls, bin_file, verbose=False):
code = ' > bp_binning: '
if verbose:
print code+'Assuming l=0, len(cl)-1; lmax=len(cl)-1'
print code+'Reading bin_file %s' %bin_file
nl = len(cls)
fl,ll = readcol(bin_file)
#nb = len(ll)
lmax = np.min( [nl-1, ll[-1]] )
#print nl-1, ll[-1]
if verbose:
print 'lmax = %s' %lmax
bok = np.sum( ( fl < lmax ) )
nb = bok
#print nb
if verbose:
print code+'Maximum bin = ', (fl[nb-1],ll[nb-1])
bin_cls = np.zeros( nb )
# for ib=0,bmax do bin_cls[ib] = total( cls[fl[ib]:ll[ib]] ) / (ll[ib]-fl[ib]+1)
for ib in np.arange( nb ):
#bin_cls[ib] = np.sum( cls[fl[ib]:ll[ib]] ) / (ll[ib]-fl[ib]+1)
if ll[ib] <= lmax:
bnl = ll[ib]-fl[ib]+1
else:
bnl = lmax-fl[ib]+1
#print fl[ib], ll[ib], cls[fl[ib]:ll[ib]+1], bnl
bin_cls[ib] = np.sum( cls[fl[ib]:ll[ib]+1] ) / bnl #(ll[ib]-fl[ib]+1)
if verbose:
print ib, fl[ib], ll[ib]+1, bin_cls[ib], bnl
#tnl = np.sum( ( bin_cls > 0.) )
return bin_cls
# ----------------------------------------------------------------------
def plot_xfaster_newdata_output( file, pol=False, win=1, old=False, otit="XF T-P spectra", res=False, xpol=False, lmax=2000, chkbb=False, verbose=False, eelim=4, ttlim=250, telim=15 ):
code = ' > read_xfaster_newdata_output: '
xfdir = '/global/scratch2/sd/dpietrob/Software/XFaster/'
if verbose:
print code+'XFaster folder = '+xfdir
tcl = hp.fitsfunc.read_cl(xfdir+'data/planck_lcdm_cl_uK_xf1.e-3.fits')
tcl[4] = 0.
tcl[5] = 0.
nl = len( tcl[0] )
l = np.arange( len( tcl[0] ) )
ll = l * (l+1)/2./np.pi
### NB consistent with my definition of .newdat
if old:
nbtt,nbee,nbbb,nbtb,nbte,nbeb = readcol(file, format=np.arange(6)+1, skipline=2, numline=1, verbose=verbose)
else:
nbtt,nbee,nbbb,nbte,nbtb,nbeb = readcol(file, format=np.arange(6)+1, skipline=2, numline=1, verbose=verbose)
print ' TT # bin = ', nbtt
print ' EE # bin = ', nbee
print ' TE # bin = ', nbte
print ' BB # bin = ', nbbb
print ' TB # bin = ', nbtb
print ' EB # bin = ', nbeb
#if verbose:
#print code+'binning theoretical power spectrum...'
btcltt = bp_binning( tcl[0]*ll, xfdir+'data/bins/ctp/CTP_bin_TT' )
btclee = bp_binning( tcl[1]*ll, xfdir+'data/bins/ctp/CTP_bin_EE' )
btclte = bp_binning( tcl[3]*ll, xfdir+'data/bins/ctp/CTP_bin_TE' )
btclbb = bp_binning( tcl[2]*ll, xfdir+'data/bins/ctp/CTP_bin_EE' )
btcltb = bp_binning( tcl[4]*ll, xfdir+'data/bins/ctp/CTP_bin_TE' )
btcleb = bp_binning( tcl[5]*ll, xfdir+'data/bins/ctp/CTP_bin_EE' )
cltt,cltter,lmntt,lmxtt = readcol( file, format=[2,3,6,7], skipline=7, numline=nbtt, verbose=verbose )
# print cltt[0:10]
width = 10
if (not pol) and (not xpol):
il = np.arange(np.max(lmxtt),dtype=np.int16) + 2
#print code+'Here %s' %lmax
#fig = plt.figure(num=win, figsize=(width,450./720*width) )
#fig.figsize=(12,12*450/720)
#print code+'Here'
if not res:
fig = plt.figure(num=win, figsize=(width,450./720*width) )
fig.set_size_inches(width,450./720*width)
ax = fig.add_subplot(111)
else:
fig = plt.figure(num=win, figsize=(width,450./720*width*1.25) )
fig.set_size_inches(width,450./720*width*1.25)
ax = fig.add_subplot(211)
yl = tcl[0]*ll
ax.plot( l[il], yl[il], "r", label="Planck best fit")
ax.plot( (lmntt+lmxtt)/2, cltt, 'k.' )
ax.errorbar( (lmntt+lmxtt)/2, cltt, yerr=cltter, fmt='k.', label="TT spectrum")
tit=file.split('.newdat')
tit=tit[0].split('/')
ax.set_title( tit[-1] )
ax.set_xlabel('$\ell$')
ax.set_ylabel('$\ell(\ell+1) C_\ell /2\pi \; [\mu K^2]$')
ax.set_xlim([1,lmax*1.05])
ax.set_ylim([-500,6500])
# legend
leg = plt.legend(frameon=True)
# remove box around legend
leg.get_frame().set_edgecolor("white")
leg.get_frame().set_alpha(.8)
if res:
ax = fig.add_subplot(212)
ax.plot( l[il], yl[il]*0., "r", label="Planck best fit")
ax.plot( (lmntt+lmxtt)/2, cltt-btcltt[0:len(cltt)], "k." )
ax.errorbar( (lmntt+lmxtt)/2, cltt-btcltt[0:len(cltt)], yerr=cltter, fmt='k.', label="TT spectrum")
ax.set_xlabel('$\ell$')
ax.set_ylabel('$\ell(\ell+1) (C_\ell-C_\ell^th) /2\pi \; [\mu K^2]$')
ax.set_xlim([1,lmax*1.05])
ax.set_ylim([-ttlim,ttlim])
if pol:
clee,cleeer,lmnee,lmxee = readcol( file, format=[2,3,6,7], skipline=7+2*nbtt+1, numline=nbee, verbose=verbose )
clbb,clbber,lmnbb,lmxbb = readcol( file, format=[2,3,6,7], skipline=7+2*nbtt+2*nbee+1+1, numline=nbbb, verbose=verbose )
clte,clteer,lmnte,lmxte = readcol( file, format=[2,3,6,7], skipline=7+2*nbtt+2*nbee+2*nbbb+1+1+1, numline=nbte, verbose=verbose )
#print clee[0:10], btclee[0:10]
#print clte[0:10], btclte[0:10]
#print clbb[0:10], btclbb[0:10]
il = np.arange(np.max(lmxtt),dtype=np.int16) + 2
#fig = plt.figure(win)
if not res:
fig = plt.figure(num=win, figsize=(width*1.8,450./720*width) )
fig.set_size_inches(width*1.8,450./720*width)
ax = fig.add_subplot(131)
else:
fig = plt.figure(num=win, figsize=(width*1.8,450./720*width*1.25) )
fig.set_size_inches(width*1.8,450./720*width*1.25)
ax = fig.add_subplot(231)
tit=file.split('.newdat')
tit=tit[0].split('/')
#plt.title( otit )
yl = tcl[0]*ll
ax.plot( l[il], yl[il], "r", label="Planck best fit")
ax.plot( (lmntt+lmxtt)/2, cltt, "k." )
ax.errorbar( (lmntt+lmxtt)/2, cltt, yerr=cltter, fmt='k.', label="TT spectrum")
ax.set_xlabel('$\ell$')
ax.set_ylabel('$\ell(\ell+1) C_\ell^{TT} /2\pi \; [\mu K^2]$')
ax.set_xlim([1,lmax*1.05])
ax.set_ylim([-500,6500])
leg = plt.legend(frameon=True)
leg.get_frame().set_edgecolor("white")
leg.get_frame().set_alpha(.8)
if res:
ax = fig.add_subplot(234)
ax.plot( l[il], yl[il]*0., "r", label="Planck best fit")
ax.plot( (lmntt+lmxtt)/2, cltt-btcltt[0:len(cltt)], "k." )
ax.errorbar( (lmntt+lmxtt)/2, cltt-btcltt[0:len(cltt)], yerr=cltter, fmt='k.', label="TT spectrum residuals")
ax.set_xlabel('$\ell$')
ax.set_ylabel('$\ell(\ell+1) (C_\ell^{TT}-C_\ell^{th}) /2\pi \; [\mu K^2]$')
ax.set_xlim([1,lmax*1.05])
ax.set_ylim([-ttlim,ttlim])
# EE
if not res:
axee = fig.add_subplot(132)
else:
axee = fig.add_subplot(232)
yl = tcl[1]*ll
axee.plot( l[il], yl[il], "r", label="Best fit")
axee.plot( (lmnee+lmxee)/2, clee, "k." )
axee.errorbar( (lmnee+lmxee)/2, clee, yerr=cleeer, fmt='k.', label="EE spectrum")
axee.set_xlabel('$\ell$')
axee.set_ylabel('$\ell(\ell+1) C_\ell^{EE} /2\pi \; [\mu K^2]$')
axee.set_xlim([1,lmax*1.05])
eerange = np.max(yl[0:lmax])
axee.set_ylim( [-0.1*eerange,eerange] )
leg = plt.legend(frameon=True)
leg.get_frame().set_edgecolor("white")
leg.get_frame().set_alpha(.8)
if res:
ax = fig.add_subplot(235)
ax.plot( l[il], yl[il]*0., "r", label="Planck best fit")
ax.plot( (lmnee+lmxee)/2, clee-btclee[0:len(clee)], "k." )
ax.errorbar( (lmnee+lmxee)/2, clee-btclee[0:len(clee)], yerr=cleeer, fmt='k.', label="EE spectrum residuals")
ax.set_xlabel('$\ell$')
ax.set_ylabel('$\ell(\ell+1) (C_\ell^{EE}-C_\ell^{th}) /2\pi \; [\mu K^2]$')
ax.set_xlim([1,lmax*1.05])
ax.set_ylim([-eelim,eelim])
# TE
if not res:
axte = fig.add_subplot(133)
else:
axte = fig.add_subplot(233)
yl = tcl[3]*ll
axte.plot( l[il], yl[il], "r", label="Planck best fit")
axte.plot( (lmnte+lmxte)/2, clte, "k." )
axte.errorbar( (lmnte+lmxte)/2, clte, yerr=clteer, fmt='k.', label="TE spectrum")
axte.set_xlabel('$\ell$')
axte.set_ylabel('$\ell(\ell+1) C_\ell^{TE} /2\pi \; [\mu K^2]$')
axte.set_xlim([1,lmax*1.05])
axte.set_ylim([-150,150])
leg = plt.legend(frameon=True)
leg.get_frame().set_edgecolor("white")
leg.get_frame().set_alpha(.8)
if res:
axte = fig.add_subplot(236)
axte.plot( l[il], yl[il]*0., "r", label="Planck best fit")
axte.plot( (lmnte+lmxte)/2, clte-btclte[0:len(clte)], "k." )
axte.errorbar( (lmnte+lmxte)/2, clte-btclte[0:len(clte)], yerr=clteer, fmt='k.', label="TE spectrum residuals")
axte.set_xlabel('$\ell$')
axte.set_ylabel('$\ell(\ell+1) (C_\ell^{TE}-C_\ell^{th}) /2\pi \; [\mu K^2]$')
axte.set_xlim([1,lmax*1.05])
axte.set_ylim([-telim,telim])
"""pro read_xfaster_newdata_output, file, init=init, pol=pol, win=win, old=old,otit=otit, res=res, xpol=xpol, lmax=lmax, chkbb=chkbb
endif else begin
il = findgen(max(lmxtt))+2
!p.multi = [0,3,1]
if (keyword_set(res)) then !p.multi=[0,3,2]
window, win, xsize=720*1.8, ysize=450*1.25,tit=otit
print, lmntt[0], lmxtt[0]
plot, l[il], tcl[il,0]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uTT!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmntt+lmxtt)/2,cltt, psym=4
errplot, (lmntt+lmxtt)/2,cltt-cltter, cltt+cltter
oplot, l[il], tcl[il,0]*ll[il], col=245
readcol,file,clee,cleeer,lmnee,lmxee,format='x,f,f,x,x,f,f', skipline=2*nbtt[0]+6+1, numline=nbee[0]
print, lmnee[0], lmxee[0]
il = findgen(max(lmxee))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,1]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uEE!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmnee+lmxee)/2,clee, psym=4
errplot, (lmnee+lmxee)/2,clee-cleeer, clee+cleeer
oplot, l[il], tcl[il,1]*ll[il], col=245
readcol,file,clte,clteer,lmnte,lmxte,format='x,f,f,x,x,f,f', skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+1+1+1, numline=nbte[0]
print, lmnte[0], lmxte[0]
il = findgen(max(lmxte))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,3]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uTE!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmnte+lmxte)/2,clte, psym=4
errplot, (lmnte+lmxte)/2,clte-clteer, clte+clteer
oplot, l[il], tcl[il,3]*ll[il], col=245
; ------ Residuals
if (keyword_set(res)) then begin
plot, (lmntt+lmxtt)/2,cltt-btcltt, psym=4, xtit='!6l', ytit='!6Residuals [!7l!6K!u2!n]', chars=3, yr=[-250,250], xr=[1,lmax]
errplot, (lmntt+lmxtt)/2, cltt-btcltt-cltter, cltt-btcltt+cltter
oplot, (lmntt+lmxtt)/2,cltt*0., thick=0.5, col=245
plot, (lmnee+lmxee)/2,clee-btclee, psym=4, xtit='!6l', ytit='!6Residuals [!7l!6K!u2!n]', chars=3, xr=[1,lmax], yr=[-5,5]
errplot, (lmnee+lmxee)/2, clee-btclee-cleeer, clee-btclee+cleeer
oplot, (lmnee+lmxee)/2,clee*0., thick=0.5, col=245
plot, (lmnte+lmxte)/2,clte-btclte, psym=4, xtit='!6l', ytit='!6Residuals [!7l!6K!u2!n]', chars=3, xr=[1,lmax], yr=[-15,15]
errplot, (lmnte+lmxte)/2, clte-btclte-clteer, clte-btclte+clteer
oplot, (lmnte+lmxte)/2,clte*0., thick=0.5, col=245
endif
endelse
if (keyword_set(xpol)) then begin
il = findgen(max(lmxtt))+2
!p.multi = [0,3,2]
window, win, xsize=720*1.8, ysize=450*1.25,tit=otit
; --- TT
print, lmntt[0],lmxtt[0]
plot, l[il], tcl[il,0]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uTT!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmntt+lmxtt)/2,cltt, psym=4
errplot, (lmntt+lmxtt)/2,cltt-cltter, cltt+cltter
oplot, l[il], tcl[il,0]*ll[il], col=245
; --- EE
readcol,file,clee,cleeer,lmnee,lmxee,format='x,f,f,x,x,f,f', skipline=2*nbtt[0]+6+1, numline=nbee[0]
print, lmnee[0],lmxee[0]
il = findgen(max(lmxee))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,1]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uEE!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmnee+lmxee)/2,clee, psym=4
errplot, (lmnee+lmxee)/2,clee-cleeer, clee+cleeer
oplot, l[il], tcl[il,1]*ll[il], col=245
; --- BB
readcol,file,clbb,clbber,lmnbb,lmxbb,format='x,f,f,x,x,f,f', skipline=2*nbtt[0]+2*nbee[0]+6+1+1, numline=nbbb[0]
print, lmnbb[0],lmxbb[0]
if (keyword_set(chkbb)) then begin
oplot, (lmnee+lmxee)/2,clee-clbb, col=90, psym=6
legend, ['C!dl!uEE!n-C!dl!uBB!n'], psym=6, col=90, /top, /left, chars=1.8
endif
il = findgen(max(lmxbb))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,2]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uBB!n [!7l!6K!u2!n]', xr=[1,lmax], yr=[0,max(tcl[where(il lt lmax),1]*ll[where(il lt lmax)])]
oplot, (lmnbb+lmxbb)/2,clbb, psym=4
errplot, (lmnbb+lmxbb)/2,clbb-clbber, clbb+clbber
oplot, l[il], tcl[il,2]*ll[il], col=245
if (keyword_set(chkbb)) then begin
oplot, (lmnee+lmxee)/2,clee-btclee, col=90, psym=6
legend, ['C!dl!uEE!n-C!dl!uEE,Th!n'], psym=6, col=90, /top, /right, chars=1.8
endif
; --- TE
readcol,file,clte,clteer,lmnte,lmxte,format='x,f,f,x,x,f,f', skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+1+1+1, numline=nbte[0]
print, lmnte[0],lmxte[0]
il = findgen(max(lmxte))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,3]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uTE!n [!7l!6K!u2!n]', xr=[1,lmax]
oplot, (lmnte+lmxte)/2,clte, psym=4
errplot, (lmnte+lmxte)/2,clte-clteer, clte+clteer
oplot, l[il], tcl[il,3]*ll[il], col=245
; --- TB
readcol,file,cltb,cltber,lmntb,lmxtb,format='x,f,f,x,x,f,f', skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+2*nbte[0]+1+1+1+1, numline=nbtb[0]
print, lmntb[0],lmxtb[0]
il = findgen(max(lmxtb))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,4]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uTB!n [!7l!6K!u2!n]', xr=[1,lmax],yr=[-50,50]
oplot, (lmntb+lmxtb)/2,cltb, psym=4
errplot, (lmntb+lmxtb)/2,cltb-cltber, cltb+cltber
oplot, l[il], tcl[il,4]*ll[il], col=245
; --- EB
readcol,file,cleb,cleber,lmneb,lmxeb,format='x,f,f,x,x,f,f', skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+2*nbte[0]+2*nbtb[0]+1+1+1+1+1, numline=nbeb[0]
print, lmneb[0],lmxeb[0]
il = findgen(max(lmxeb))+2
ll=l*(l+1)/2./!pi
plot, l[il], tcl[il,5]*ll[il], chars=3, xtit='!6l', ytit='!6D!dl!uEB!n [!7l!6K!u2!n]', xr=[1,lmax], yr=[-1,1]
oplot, (lmneb+lmxeb)/2,cleb, psym=4
errplot, (lmneb+lmxeb)/2,cleb-cleber, cleb+cleber
oplot, l[il], tcl[il,5]*ll[il], col=245
endif
!p.multi = 0
stop
end"""
# ----------------------------------------------------------------------
def readcol(file,skipline=0l,numline=-1l,format=[1,2],verbose=False, twod=False):
"""Routine to emulate IDL readcol.pro"""
code = ' > readcol: '
if verbose:
print ' > Readcol: being talkative ', verbose
if verbose:
print ' > Readcol routine. D.Pietrobon Oct 2013'
if verbose:
print ' > Readcol: reading file "%s"' %file
nlines = int( get_lineNum(file,verbose=verbose) )
skipline = int( skipline )
numline = int( numline )
#print nlines, skipline, numline
if verbose:
print ' > Readcol: total number of lines: %s' %nlines
try:
file_content = open(file, 'r')
if skipline > 0:
if verbose:
print ' > Readcol: skipping %s lines...' %skipline
if numline < 0:
numline = nlines-skipline
else:
if numline > (nlines-skipline):
print ' > Readcol: WARNING, not enough lines. Reading %s lines:' %nlines-skipline
numline = nlines-skipline
# Getting format
ifields = np.asarray( format, np.int16 )
nfields = len( format )
if verbose:
print ' > readcol: number of fields to be read = %s ' %nfields
if verbose:
print ' > readcol: fields read = %s' %ifields
ifields[:] -= 1
values = np.zeros( (numline, nfields) )
# for iline in np.arange(np.max( [nlines,numline] ))+skipline:
cnt = 0
for iline,line in enumerate( file_content ):
if (iline >= skipline) and (iline < skipline+numline):
if verbose:
print ' > line %s' %iline
print ' > %s' %line
line = line.strip('\n')
entries = np.asarray( line.split() )
#print ifields
#print entries[ ifields[0] ]
#print entries[ ifields ]
#if verbose:
# print entries[ [ifields] ]
#print entries[ifields]
entries = np.asarray( entries[ifields] )
#if verbose:
# print entries
#values.append(entries)
values[cnt,:] = entries
if verbose:
print values[cnt,:]
cnt += 1
file_content.close()
#print [ values[:,icol] for icol in np.arange(nfields) ]
if twod:
if verbose:
print code+'Exit-'
return values
else:
if verbose:
print code+'Exit-'
return [ values[:,icol] for icol in np.arange(nfields) ]
except IOError:
print "File not found: %s" %file
# ----------------------------------------------------------------------
def get_lineNum(file,verbose=False):
if verbose:
print ' > get_lineNum routine. D.Pietrobon Oct 2013'
nlines = -1
try:
f = open(file, 'r')
nlines=0
for iline in f:
nlines+=1
if verbose:
print " > get_lineNum: line = %s" %nlines
return nlines
except IOError:
print "File not found: %s" %file
# ----------------------------------------------------------------------
def extract_xfaster_newdata_output(file, ncl='EE', xfdir='/global/scratch2/sd/dpietrob/Software/XFaster/', tclfile='data/planck_lcdm_cl_uK_xf1.e-3.fits', old=False):
xfdata = {}
code = 'extract_xfaster_newdata_output: '
print code + 'WARNING - new xfaster output assumed.'
tcl = hp.fitsfunc.read_cl( tclfile )
tcl[4] = 0.
tcl[5] = 0.
l = np.arange( len( tcl[0]))
ll = l*(l+1)/2./np.pi
### NB consistent with my definition of .newdat
if (ncl == '1') or (ncl.upper() == 'TT'):
nbtt=readcol(file,format=[1],skipline=2,numline=1)
print ' TT # bin = ', nbtt
else:
if old:
nbtt,nbee,nbbb,nbtb,nbte,nbeb = readcol(file, format=np.arange(6)+1,skipline=2,numline=1)
else:
nbtt,nbee,nbbb,nbte,nbtb,nbeb = readcol(file, format=np.arange(6)+1,skipline=2,numline=1)
print ' EE # bin = ', nbee
print ' TE # bin = ', nbte
print ' BB # bin = ', nbbb
print ' TB # bin = ', nbtb
print ' EB # bin = ', nbeb
# ------- Binning
if (ncl == '1') or (ncl.upper() == 'TT'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=7, numline=nbtt[0])
btcl = bp_binning(tcl[0]*ll, xfdir+'data/bins/ctp/CTP_bin_TT')
if (ncl == '2') or (ncl.upper() == 'EE'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=2*nbtt[0]+7+1, numline=nbee[0])
btcl = bp_binning(tcl[1]*ll, xfdir+'data/bins/ctp/CTP_bin_EE')
if (ncl == '3') or (ncl.upper() == 'BB'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=2*nbtt[0]+2*nbee[0]+7+1+1, numline=nbbb[0])
btcl = bp_binning(tcl[2]*ll, xfdir+'data/bins/ctp/CTP_bin_TE')
if (ncl == '4') or (ncl.upper() == 'TE'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+1+1+1, numline=nbte[0])
btcl = bp_binning(tcl[3]*ll, xfdir+'data/bins/ctp/CTP_bin_EE')
if (ncl == '5') or (ncl.upper() == 'TB'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+2*nbte[0]+1+1+1+1, numline=nbtb[0])
btcl = bp_binning(tcl[4]*ll, xfdir+'data/bins/ctp/CTP_bin_TE')
if (ncl == '6') or (ncl.upper() == 'EB'):
cl,cler,lmn,lmx = readcol(file,format=[2,3,6,7], skipline=7+2*nbtt[0]+2*nbee[0]+2*nbbb[0]+2*nbte[0]+2*nbtb[0]+1+1+1+1+1, numline=nbeb[0])
btcl = bp_binning(tcl[5]*ll, xfdir+'data/bins/ctp/CTP_bin_EE')
lcen = (lmn+lmx)/2.
res = cl-btcl
xfdata.append(lcen)
xfdata.append(cl)
xfdata.append(res)
xfdata.append(btcl)
return xfdata
# ----------------------------------------------------------------------
def make_sky_cut(gal_cut, ns, verbose=False):
import numpy as np
import healpy as hp
npix = 12 * ns**2
cut = np.ones( npix )
ipix = np.arange( npix, dtype=np.long )
theta,phi = hp.pix2ang( ns, ipix )
radeg = 180./np.pi
theta_deg = ( np.pi/2.-theta ) * radeg
phi_deg = phi * radeg
cut[ np.logical_and( theta_deg<gal_cut, theta_deg>-gal_cut )] = 0.
if verbose:
hp.mollview(cut)
return cut
# Simple ILC code -- Translate from IDL
"""def ilc(mapfiles=maps, maskfile=maskfile, gal_cut=0, silent=False, check=False, show_mask=False, double=False, md_rem=False, verbose=False, show_map=False):
import healpy as hp
import numpy as np
if (len(maskfile) != 0) and ( gal_cut == 0) and verbose ):
print ' --> ILC performed on the whole sky.'
if (not keyword_set(maps)) then begin
nfreq = len( mapfiles )
input_type = type( mapfiles[0] )
if input_type == 'str':
if verbose:
print ' - determining map size...:'
map = hp.read_map( mapfiles[0] )
npix = len( map )
ns = np.sqrt( npix/12 )
if verbose:
print ' - determining map size...: %s' %nside
maps = np.zeros( npix, nfreq, dtype=np.float )
print 'reading maps...'
psmask = np.ones( npix )
for ifreq in range(nfreq):
print ' - map ', ifreq+1, ' of', nfreq, ' :'+mapfiles[ifreq]
temp = hp.read_map( mapfiles[ifreq] )
psmask[ int (np.logical_not( (temp == -1.6375e30) or (np.isfinite(temp)) ) ) ] = 0.
maps[*,ifreq] = temp
if check:
hp.mollview(temp, min=-300, max=300., fig=ifreq+1)
else:
npix = n_elements( maps[*,0] )
ns = np.sqrt( npix/12 )
psmask = np.ones( npix )
for ifreq in range(nfreq):
print ' - map ', ifreq+1, ' of', nfreq, ' :'+mapfiles[ifreq]
temp = ( mapfiles[ifreq] )
psmask[ int (np.logical_not( (temp == -1.6375e30) or (np.isfinite(temp)) ) ) ] = 0.
if check:
hp.mollview(temp, min=-300, max=300., fig=ifreq+1)
mask = np.ones( npix )
if len( maskfile ) gt 0:
print, ' - reading '+maskfile+'...'
gmask = hp.read_map( maskfile )
mask_ns = len(gmask)
if mask_ns != ns:
gmask = hp.ud_grade( gmask, order_in='RING', nside_out=ns)
thres = int( np.logical_not( gmask >= 0.5) )
gmask[ thres ] = 0
thres = int( np.logical_not( gmask < 0.5) )
gmask[ thres ] = 1
mask = mask * gmask
if gal_cut>0.:
dpfunc.make_sky_cut( gal_cut, ns )
mask = mask * sky_cut
if (keyword_set(show_mask) or keyword_set(check)) then mollview, mask, px=600, win=0, chars=1.5, tit='!6Mask used'
;for ifreq = 0, nfreq-1 do maps[*,ifreq] = maps[*,ifreq] * cmb2sz[ifreq]
if (keyword_set(md_rem)) then begin
print, 'removing mono/dipole...'
for ifreq=0,nfreq-1 do begin
temp = maps[*,ifreq]
remove_dipole, temp, mask, ordering='ring', nside=ns
endfor
endif
gpix = where( (mask[*,0] gt 0.) and (pmask gt 0.) )
ngpix = n_elements(gpix)
bpix = where(mask[*,0] eq 0.)
nbpix = n_elements(bpix)
gR = dblarr(nfreq, nfreq)
bR = dblarr(nfreq, nfreq)
print, ' - :DP - ILC: computing correlation matrix...'
;------ Pedestrian
if (False) then begin
ave = dblarr(nfreq,2)
for ifreq = 0, nfreq-1 do begin
gavei = mean(maps[gpix, ifreq])
ave[ifreq,0] = gavei
if (bpix[0] ne -1) then begin
bavei = mean(maps[bpix, ifreq])
ave[ifreq,1] = bavei
endif
for jfreq = ifreq, nfreq-1 do begin
avej = mean(maps[gpix,jfreq])
gR[ifreq, jfreq] = total( (maps[gpix,ifreq]-gavei) * (maps[gpix,jfreq]-avej) ) / ngpix
; gR[ifreq, jfreq] = total( (maps[gpix,ifreq]) * (maps[gpix,jfreq]) ) / ngpix
gR[jfreq, ifreq] = gR[ifreq, jfreq]
if (bpix[0] ne -1) then begin
avej = mean(maps[bpix,jfreq])
bR[ifreq, jfreq] = total( (maps[bpix,ifreq]-bavei) * (maps[bpix,jfreq]-avej) ) / nbpix
; bR[ifreq, jfreq] = total( (maps[bpix,ifreq]) * (maps[bpix,jfreq]) ) / nbpix
bR[jfreq, ifreq] = bR[ifreq, jfreq]
endif
endfor
endfor
endif
;------
for ifreq = 0, nfreq-1 do begin
if not ( keyword_set(silent) ) then print, ifreq
for jfreq = ifreq, nfreq-1 do begin
gR[ifreq, jfreq] = correlate( maps[gpix,ifreq], maps[gpix,jfreq], /covariance )
gR[jfreq, ifreq] = gR[ifreq, jfreq]
if (bpix[0] ne -1) then begin
bR[ifreq, jfreq] = correlate( maps[bpix,ifreq], maps[bpix,jfreq], /covariance )
bR[jfreq, ifreq] = bR[ifreq, jfreq]
endif
endfor
endfor
; ------
gRm1 = invert(gR, /double, status)
print, status
if (bpix[0] ne -1) then begin
bRm1 = invert(bR, /double, status)
print, status
endif
a = findgen(nfreq) * 0. + 1.
gw = dblarr(nfreq)
gw = total(gRm1,2) / total(gRm1)
if (bpix[0] ne -1) then bw = total(bRm1,2) / total(bRm1)
print, ' - gw: ', gw
if (bpix[0] ne -1) then print, ' - bw: ', bw
ilc = fltarr(npix,3)
if (keyword_set(double)) then ilc = dblarr(Npix, 3)
for ifreq = 0, nfreq-1 do begin
;## ilc[gpix] = ilc[gpix] + maps[gpix,ifreq] * gw[ifreq]
;## ilc[bpix] = ilc[bpix] + maps[bpix,ifreq] * bw[ifreq]
ilc[*,0] = ilc[*,0] + maps[*,ifreq] * gw[ifreq]
if (bpix[0] ne -1) then ilc[*,1] = ilc[*,1] + maps[*,ifreq] * bw[ifreq]
endfor
ilc[gpix,2] = ilc[gpix,0]
ilc[bpix,2] = ilc[bpix,1]
print, 'STDDEV ILC gp (fs,out,in) = ', stddev(ilc[*,0]), stddev(ilc[gpix,0]), stddev(ilc[bpix,0])
print, 'STDDEV ILC bp (fs,out,in) = ', stddev(ilc[*,1]), stddev(ilc[gpix,1]), stddev(ilc[bpix,1])
print, 'STDDEV ILC combined (fs,out,in) = ', stddev(ilc[*,2]), stddev(ilc[gpix,2]), stddev(ilc[bpix,2])
;write_fits_map, 'ffp4_01a_ILCout.fits', ilc[*,0], /ring, units='!7l!6K'
;write_fits_map, 'ffp4_01a_ILCin.fits', ilc[*,1], /ring, units='!7l!6K'
if (not keyword_set(silent)) then mollview, ilc[*,0], chars=1.5, tit='!6ILC: weights outside mask. N!dside!n='+string(ns,format='(i4.4)'), grat=[10,10], px=650;, no_monopole=true, gal_cut=40, min=-300, max=300
if (not keyword_set(silent) and (bpix[0] ne -1) ) then mollview, ilc[*,1], chars=1.5, tit='!6ILC: weights inside mask. N!dside!n='+string(ns,format='(i4.4)'), grat=[10,10], px=650;, no_monopole=true, gal_cut=40, min=-300, max=300
if (not keyword_set(silent) and (bpix[0] ne -1) ) then mollview, ilc[*,2], chars=1.5, tit='!6ILC: combined', grat=[10,10], px=650;, no_monopole=true, gal_cut=40, min=-300, max=300
if (not keyword_set(silent) and (bpix[0] ne -1) ) then mollview, ilc[*,0]-ilc[*,1], chars=1.5, tit='!6ILC Difference', grat=[10,10], px=650;, no_monopole=true, gal_cut=40, min=-15, max=15
if (do_png) then begin
restore, 'chains/pix_01a_v2.res.sav'
mollview, cmb, min=-300, max=300, chars=1.5, win=4, tit='!6Commander', no_monopole=true, gal_cut=40
mollview, cmb-ilc[*,0], min=-30, max=30, chars=1.5, win=5, tit='!6Commander-ILC!dout!n', no_monopole=true, gal_cut=40
mollview, cmb-ilc[*,1], min=-30, max=30, chars=1.5, win=6, tit='!6Commander-ILC!din!n', no_monopole=true, gal_cut=40
read_fits_map, 'ffp4_scalar_cmb_ns128_60arcmin_uK.fits', inp
mollview, cmb-inp, min=-30, max=30, chars=1.5, win=7, tit='!6Commander-Input', no_monopole=true, gal_cut=40
mollview, ilc[*,0]-inp, min=-30, max=30, chars=1.5, win=8, tit='!6ILC!dout!n-Input', no_monopole=true, gal_cut=40
mollview, ilc[*,1]-inp, min=-30, max=30, chars=1.5, win=9, tit='!6ILC!din!n-Input', no_monopole=true, gal_cut=40
mollview, ilc[*,0], min=-300, max=300, chars=1.5, win=-1, tit='!6ILC: weights outside mask', no_monopole=true, gal_cut=40, png='ffp4_01a_ILCout.png'
mollview, ilc[*,1], min=-300, max=300, chars=1.5, win=-2, tit='!6ILC: weights inside mask', no_monopole=true, gal_cut=40, png='ffp4_01a_ILCin.png'
mollview, ilc[*,0]-ilc[*,1], min=-30, max=30, chars=1.5, win=-3, tit='!6ILC Difference', no_monopole=true, gal_cut=40, png='ffp4_01a_ILCout-in.png'
mollview, cmb, min=-300, max=300, chars=1.5, win=-4, tit='!6Commander', no_monopole=true, gal_cut=40, png='ffp4_01a_CMD.png'
mollview, cmb-ilc[*,0], min=-30, max=30, chars=1.5, win=-5, tit='!6Commander-ILC!dout!n', no_monopole=true, gal_cut=40, png='ffp4_01a_CMD-ILCout.png'
mollview, cmb-ilc[*,1], min=-30, max=30, chars=1.5, win=-6, tit='!6Commander-ILC!din!n', no_monopole=true, gal_cut=40, png='ffp4_01a_CMD-ILCin.png'
mollview, cmb-inp, min=-30, max=30, chars=1.5, win=-7, tit='!6Commander-Input', no_monopole=true, gal_cut=40, png='ffp4_01a_CMD-INP.png'
mollview, ilc[*,0]-inp, min=-30, max=30, chars=1.5, win=-8, tit='!6ILC!dout!n-Input', no_monopole=true, gal_cut=40, png='ffp4_01a_ILCout-INP.png'
mollview, ilc[*,1]-inp, min=-30, max=30, chars=1.5, win=-9, tit='!6ILC!din!n-Input', no_monopole=true, gal_cut=40, png='ffp4_01a_ILCin-INP.png'
endif
print, ' --- End of Program ---'
return, ilc
;stop
; matrix multiplication failures
gw = grm1##a
n = reform(a##(grm1##a))
gw[*] = gw[*] / n[0]
print, gw
bw = brm1##a
n = reform(a##(brm1##a))
bw[*] = bw[*] / n[0]
stop
end"""
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import os
import pickle
import fabio
from scipy.ndimage.filters import gaussian_filter, convolve1d
from scipy.interpolate import UnivariateSpline
from skimage.morphology import white_tophat, disk
import ccp13
from pyFAI.method_registry import IntegrationMethod
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from musclex import __version__
try:
from . import QF_utilities as qfu
from ..utils.file_manager import fullPath, createFolder, getBlankImageAndMask, getMaskOnly, ifHdfReadConvertless
from ..utils.histogram_processor import *
from ..utils.image_processor import *
except: # for coverage
from modules import QF_utilities as qfu
from utils.file_manager import fullPath, createFolder, getBlankImageAndMask, getMaskOnly, ifHdfReadConvertless
from utils.histogram_processor import *
from utils.image_processor import *
# Make sure the cython part is compiled
# from subprocess import call
# call(["python setup2.py build_ext --inplace"], shell = True)
class QuadrantFolder:
"""
A class for Quadrant Folding processing - go to process() to see all processing steps
"""
def __init__(self, img_path, img_name, parent, file_list=None, extension=''):
"""
Initial value for QuadrantFolder object
:param img_path: directory path of input image
:param img_name: image file name
"""
if extension in ('.hdf5', '.h5'):
index = next((i for i, item in enumerate(file_list[0]) if item == img_name), 0)
self.orig_img = file_list[1][index]
else:
self.orig_img = fabio.open(fullPath(img_path, img_name)).data
self.orig_img = ifHdfReadConvertless(img_name, self.orig_img)
self.orig_img = self.orig_img.astype("float32")
self.orig_image_center = None
self.dl, self.db = 0, 0
self.empty = False
self.img_path = img_path
self.img_name = img_name
self.imgCache = {} # displayed images will be saved in this param
self.ignoreFolds = set()
self.version = __version__
cache = self.loadCache() # load from cache if it's available
self.initImg = None
self.centImgTransMat = None # Centerize image transformation matrix
self.center_before_rotation = None # we need the center before rotation is applied each time we rotate the image
self.rotMat = None # store the rotation matrix used so that any point specified in current co-ordinate system can be transformed to the base (original image) co-ordinate system
self.centerChanged = False
self.expandImg = 1
if parent is not None:
self.parent = parent
else:
self.parent = self
self.newImgDimension = None
self.masked = False
# info dictionary will save all results
if cache is not None:
self.info = cache
else:
self.info = {}
def cacheInfo(self):
"""
Save info dict to cache. Cache file will be save as filename.info in folder "qf_cache"
:return: -
"""
cache_file = fullPath(fullPath(self.img_path, "qf_cache"), self.img_name + ".info")
createFolder(fullPath(self.img_path, "qf_cache"))
self.info['program_version'] = self.version
with open(cache_file, "wb") as c:
pickle.dump(self.info, c)
def loadCache(self):
"""
Load info dict from cache. Cache file will be filename.info in folder "qf_cache"
:return: cached info (dict)
"""
cache_file = fullPath(fullPath(self.img_path, "qf_cache"), self.img_name+".info")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as c:
info = pickle.load(c)
if info is not None:
if info['program_version'] == self.version:
return info
print("Cache version " + info['program_version'] + " did not match with Program version " + self.version)
print("Invalidating cache and reprocessing the image")
return None
def delCache(self):
"""
Delete cache
:return: -
"""
cache_path = fullPath(self.img_path, "qf_cache")
cache_file = fullPath(cache_path, self.img_name + '.info')
if os.path.exists(cache_path) and os.path.isfile(cache_file):
os.remove(cache_file)
def deleteFromDict(self, dicto, delStr):
"""
Delete a key and value from dictionary
:param dict: input dictionary
:param delStr: deleting key
:return: -
"""
if delStr in dicto:
del dicto[delStr]
def process(self, flags):
"""
All processing steps - all flags are provided by Quadrant Folding app as a dictionary
settings must have ...
ignore_folds - ignored quadrant = quadrant that will not be averaged
bgsub - background subtraction method (-1 = no bg sub, 0 = Circular, 1 = 2D convex hull, 2 = white-top-hat)
mask_thres - pixel value that won't be averaged (deplicated)
sigmoid - merging gradient
other backgound subtraction params - cirmin, cirmax, nbins, tophat1, tophat2
"""
print(str(self.img_name) + " is being processed...")
self.updateInfo(flags)
self.initParams()
self.applyBlankImageAndMask()
self.findCenter()
self.centerizeImage()
self.rotateImg()
self.calculateAvgFold()
self.getRminmax()
self.applyBackgroundSubtraction()
self.mergeImages()
self.generateResultImage()
if "no_cache" not in flags:
self.cacheInfo()
self.parent.statusPrint("")
def updateInfo(self, flags):
"""
Update info dict using flags
:param flags: flags
:return: -
"""
if flags['orientation_model'] is None:
if 'orientation_model' not in self.info:
flags['orientation_model'] = 0
else:
del flags['orientation_model']
self.info.update(flags)
if 'fixed_roi_rad' in self.info:
self.info['roi_rad'] = self.info['fixed_roi_rad']
def initParams(self):
"""
Initial some parameters in case GUI doesn't specified
"""
if 'mask_thres' not in self.info:
self.info['mask_thres'] = getMaskThreshold(self.orig_img)
if 'ignore_folds' not in self.info:
self.info['ignore_folds'] = set()
if 'bgsub' not in self.info:
self.info['bgsub'] = 0
if 'sigmoid' not in self.info:
self.info['sigmoid'] = 0.05
def applyBlankImageAndMask(self):
"""
Apply the blank image and mask threshold on the orig_img
:return: -
"""
if 'blank_mask' in self.info and self.info['blank_mask'] and not self.masked:
img = np.array(self.orig_img, 'float32')
blank, mask = getBlankImageAndMask(self.img_path)
maskOnly = getMaskOnly(self.img_path)
if blank is not None:
img = img - blank
if mask is not None:
img[mask > 0] = self.info['mask_thres'] - 1.
if maskOnly is not None:
print("Applying mask only image")
img[maskOnly > 0] = self.info['mask_thres'] - 1
self.orig_img = img
self.masked = True
def findCenter(self):
"""
Find center of the diffraction. The center will be kept in self.info["center"].
Once the center is calculated, the rotation angle will be re-calculated, so self.info["rotationAngle"] is deleted
"""
self.parent.statusPrint("Finding Center...")
if 'mask_thres' not in self.info:
self.initParams()
if 'center' in self.info:
self.centerChanged = False
return
self.centerChanged = True
if 'calib_center' in self.info:
self.info['center'] = self.info['calib_center']
return
if 'manual_center' in self.info:
center = self.info['manual_center']
if self.rotMat is not None:
center = np.dot(cv2.invertAffineTransform(self.rotMat), [center[0] + self.dl, center[1] + self.db, 1])
self.info['manual_center'] = center
self.info['center'] = self.info['manual_center']
return
print("Center is being calculated ... ")
self.orig_image_center = getCenter(self.orig_img)
self.orig_img, self.info['center'] = processImageForIntCenter(self.orig_img, self.orig_image_center)
print("Done. Center = "+str(self.info['center']))
def rotateImg(self):
"""
Find rotation angle of the diffraction. Turn the diffraction equator to be horizontal. The angle will be kept in self.info["rotationAngle"]
Once the rotation angle is calculated, the average fold will be re-calculated, so self.info["avg_fold"] is deleted
"""
self.parent.statusPrint("Finding Rotation Angle...")
if 'manual_rotationAngle' in self.info:
self.info['rotationAngle'] = self.info['manual_rotationAngle']
del self.info['manual_rotationAngle']
self.deleteFromDict(self.info, 'avg_fold')
elif "mode_angle" in self.info:
print(f'Using mode orientation {self.info["mode_angle"]}')
self.info['rotationAngle'] = self.info["mode_angle"]
self.deleteFromDict(self.info, 'avg_fold')
elif not self.empty and 'rotationAngle' not in self.info.keys():
print("Rotation Angle is being calculated ... ")
# Selecting disk (base) image and corresponding center for determining rotation as for larger images (formed from centerize image) rotation angle is wrongly computed
_, center = self.parent.getExtentAndCenter()
img = copy.copy(self.initImg) if self.initImg is not None else copy.copy(self.orig_img)
if 'detector' in self.info:
self.info['rotationAngle'] = getRotationAngle(img, center, self.info['orientation_model'], man_det=self.info['detector'])
else:
self.info['rotationAngle'] = getRotationAngle(img, center, self.info['orientation_model'])
self.deleteFromDict(self.info, 'avg_fold')
print("Done. Rotation Angle is " + str(self.info['rotationAngle']) +" degree")
def getExtentAndCenter(self):
"""
Give the extent and the center of the image in self.
:return: extent, center
"""
if self is None:
return [0,0], (0,0)
if self.orig_image_center is None:
self.findCenter()
self.statusPrint("Done.")
if 'calib_center' in self.info:
center = self.info['calib_center']
elif 'manual_center' in self.info:
center = self.info['manual_center']
else:
center = self.orig_image_center
extent = [self.info['center'][0] - center[0], self.info['center'][1] - center[1]]
return extent, center
def centerizeImage(self):
"""
Create an enlarged image such that image center is at the center of new image
"""
self.parent.statusPrint("Centererizing image...")
if not self.centerChanged:
return
center = self.info['center']
if self.centImgTransMat is not None and 'calib_center' not in self.info:
# convert center in initial img coordinate system
M = self.centImgTransMat
M[0,2] = -1*M[0,2]
M[1,2] = -1*M[1,2]
center = [center[0], center[1], 1]
center = np.dot(M, center)
if 'manual_center' in self.info:
self.info['manual_center'] = (int(center[0]), int(center[1]))
if 'calib_center' in self.info:
self.info['calib_center'] = (int(center[0]), int(center[1]))
center = (int(center[0]), int(center[1]))
if self.initImg is None:
# While centerizing image use the first image after reading from file and processing for int center
self.initImg = self.orig_img
print("Dimension of initial image before centerize ", self.orig_img.shape)
img = self.initImg
print("Dimension of image before centerize ", img.shape)
b, l = img.shape
if self.parent.newImgDimension is None:
# This is the max dimension in the case beamline is in a corner and image rotated to 45 degrees
qf_w, qf_h = 2.8*(l-center[0]), 2.8*(b-center[1])
max_side = max(max(l,b), max(qf_w, qf_h))
dim = int(self.expandImg*max_side)
self.parent.newImgDimension = dim
else:
dim = self.parent.newImgDimension
new_img = np.zeros((dim,dim)).astype("float32")
new_img[0:b,0:l] = img
#Translate image to appropriate position
transx = int(((dim/2) - center[0]))
transy = int(((dim/2) - center[1]))
M = np.float32([[1,0,transx],[0,1,transy]])
self.centImgTransMat = M
rows, cols = new_img.shape
# mask_thres = self.info["mask_thres"]
# if self.img_type == "PILATUS":
# if mask_thres == -999:
# mask_thres = getMaskThreshold(img, self.img_type)
# mask = np.zeros((new_img.shape[0], new_img.shape[1]), dtype=np.uint8)
# mask[new_img <= mask_thres] = 255
# cv2.setNumThreads(1) # Added to prevent segmentation fault due to cv2.warpAffine
# translated_Img = cv2.warpAffine(new_img, M, (cols, rows))
# translated_mask = cv2.warpAffine(mask, M, (cols, rows))
# translated_mask[translated_mask > 0.] = 255
# translated_Img[translated_mask > 0] = mask_thres
# else:
cv2.setNumThreads(1) # Added to prevent segmentation fault due to cv2.warpAffine
translated_Img = cv2.warpAffine(new_img,M,(cols,rows))
self.orig_img = translated_Img
self.info['center'] = (int(dim / 2), int(dim / 2))
self.center_before_rotation = (int(dim / 2), int(dim / 2))
print("Dimension of image after centerize ", self.orig_img.shape)
def getRotatedImage(self):
"""
Get rotated image by angle while image = original input image, and angle = self.info["rotationAngle"]
"""
img = np.array(self.orig_img, dtype="float32")
center = self.info["center"]
if self.center_before_rotation is not None:
center = self.center_before_rotation
else:
self.center_before_rotation = center
b, l = img.shape
rotImg, newCenter, self.rotMat = rotateImage(img, center, self.info["rotationAngle"])
# Cropping off the surrounding part since we had already expanded the image to maximum possible extent in centerize image
bnew, lnew = rotImg.shape
db, dl = (bnew - b)//2, (lnew-l)//2
final_rotImg = rotImg[db:bnew-db, dl:lnew-dl]
self.info["center"] = (newCenter[0]-dl, newCenter[1]-db)
self.dl, self.db = dl, db # storing the cropped off section to recalculate coordinates when manual center is given
return final_rotImg
def getFoldNumber(self, x, y):
"""
Get quadrant number by coordinates x, y (top left = 0, top right = 1, bottom left = 2, bottom right = 3)
:param x: x coordinate
:param y: y coordinate
:return: coordinate number
"""
center = self.info['center']
center_x = center[0]
center_y = center[1]
if x < center_x and y < center_y:
return 0
if x >= center_x and y < center_y:
return 1
if x < center_x and y >= center_y:
return 2
if x >= center_x and y >= center_y:
return 3
return -1
def applyAngularBGSub(self):
"""
Apply Circular Background Subtraction to average fold, and save the result to self.info['bgimg1']
"""
copy_img = copy.copy(self.info['avg_fold'])
center = [copy_img.shape[1]-1, copy_img.shape[0]-1]
npt_rad = int(distance(center,(0,0)))
if 'detector' in self.info:
det = find_detector(copy_img, man_det=self.info['detector'])
else:
det = find_detector(copy_img)
ai = AzimuthalIntegrator(detector=det)
ai.setFit2D(100, center[0], center[1])
mask = np.zeros((copy_img.shape[0], copy_img.shape[1]))
start_p = self.info["cirmin"] # minimum value of circular background subtraction pixel range in percent
end_p = self.info["cirmax"] # maximum value of circular background subtraction pixel range in percent
rmin = self.info["rmin"] # minimum radius for background subtraction
rmax = self.info["rmax"] # maximum radius for background subtraction
theta_size = self.info["bin_theta"] # bin size in degree
nBins = 90/theta_size
I2D = []
integration_method = IntegrationMethod.select_one_available("csr", dim=1, default="csr", degradable=True)
for deg in range(180, 271):
_, I = ai.integrate1d(copy_img, npt_rad, mask=mask, unit="r_mm", method=integration_method, azimuth_range=(deg, deg+1))
I2D.append(I)
I2D = np.array(I2D)
sub_tr = []
for i in range(nBins):
# loop in each theta range
subr = []
theta1 = i * theta_size
theta2 = (i+1) * theta_size
if i+1 == nBins:
theta2 += 1
for r in range(0, I2D.shape[1]):
# Get azimuth line on each radius (in theta range)
rad = I2D[theta1:theta2,r]
if start_p == end_p:
percentile = int(round(start_p * len(rad) / 100.))
rad = np.array(sorted(rad)[percentile: percentile+1])
else:
s = int(round(start_p * len(rad) / 100.))
e = int(round(end_p * len(rad) / 100.))
if s == e:
rad = sorted(rad)[s: s+1]
else:
rad = np.array(sorted(rad)[s: e])
# Get mean value of pixel range
subr.append(np.mean(rad))
subr_hist = subr[rmin:rmax + 1]
hist_x = list(range(0, len(subr_hist)))
# Get pchip line from subtraction histogram
hull_x, hull_y = getHull(hist_x, subr_hist)
y_pchip = np.array(pchip(hull_x, hull_y, hist_x))
subr_hist = np.concatenate((np.zeros(rmin), y_pchip))
subr_hist = np.concatenate((subr_hist, np.zeros(len(subr) - rmax)))
sub_tr.append(subr_hist)
# Create Angular background from subtraction lines (pchipline in each bin)
bg_img = qfu.createAngularBG(copy_img.shape[1], copy_img.shape[0], np.array(sub_tr, dtype=np.float32), nBins)
result = copy_img - bg_img
result -= result.min()
# Subtract original average fold by background
self.info['bgimg1'] = result
def applyCircularlySymBGSub2(self):
"""
Apply Circular Background Subtraction to average fold, and save the result to self.info['bgimg1']
"""
fold = copy.copy(self.info['avg_fold'])
# center = [fold.shape[1] + .5, fold.shape[0] + .5]
img = self.makeFullImage(fold)
img = img.astype("float32")
width = img.shape[1]
height = img.shape[0]
ad = np.ravel(img)
ad = np.array(ad, 'f')
b = np.array(ad, 'f')
rmin = float(self.info['rmin'])
rmax = float(self.info['rmax'])
bin_size = float(self.info["radial_bin"])
smoo = self.info['smooth']
tension = self.info['tension']
max_bin = int(np.ceil((rmax - rmin) / bin_size))*10
max_num = int(np.ceil(rmax * 2 * np.pi))*10
pc1 = self.info['cirmin']/100.
pc2 = self.info['cirmax']/100.
csyb = np.zeros(max_bin, 'f')
csyd = np.zeros(max_bin, 'f')
ys = np.zeros(max_bin, 'f')
ysp = np.zeros(max_bin, 'f')
wrk = np.zeros(max_bin * 9, 'f')
pixbin = np.zeros(max_num, 'f')
index_bn = np.zeros(max_num, 'f')
ccp13.bgcsym2(ad=ad, b=b,
smoo=smoo,
tens=tension,
pc1=pc1,
pc2=pc2,
npix=width,
nrast=height,
dmin=rmin,
dmax=rmax,
xc=width/2.-.5,
yc=height/2.-.5,
dinc=bin_size,
csyb=csyb,
csyd=csyd,
ys=ys,
ysp=ysp,
wrk=wrk,
pixbin=pixbin,
index_bn=index_bn,
iprint=0,
ilog=6,
maxbin=max_bin,
maxnum=max_num)
background = copy.copy(b)
background[np.isnan(background)] = 0.
background = np.array(background, 'float32')
background = background.reshape((height, width))
background = background[:fold.shape[0], :fold.shape[1]]
result = np.array(fold - background, dtype=np.float32)
result = qfu.replaceRmin(result, int(rmin), 0.)
self.info['bgimg1'] = result
def applySmoothedBGSub(self, typ='gauss'):
"""
Apply the background substraction smoothed, with default type to gaussian.
:param typ: type of the substraction
"""
fold = copy.copy(self.info['avg_fold'])
img = self.makeFullImage(fold)
img = img.astype("float32")
width = img.shape[1]
height = img.shape[0]
img = np.ravel(img)
buf = np.array(img, 'f')
maxfunc = len(buf)
cback = np.zeros(maxfunc, 'f')
b = np.zeros(maxfunc, 'f')
smbuf = np.zeros(maxfunc, 'f')
vals = np.zeros(20, 'f')
if typ == 'gauss':
vals[0] = self.info['fwhm']
vals[1] = self.info['cycles']
vals[2] = float(self.info['rmin'])
vals[3] = float(self.info['rmax'])
vals[4] = width / 2. - .5
vals[5] = height / 2. - .5
vals[6] = img.min() - 1
options = np.zeros((10, 10), 'S')
options[0] = ['G', 'A', 'U', 'S', 'S', '', '', '', '', '']
options = np.array(options, dtype='S')
else:
vals[0] = self.info['boxcar_x']
vals[1] = self.info['boxcar_y']
vals[2] = self.info['cycles']
vals[3] = float(self.info['rmin'])
vals[4] = float(self.info['rmax'])
vals[5] = width / 2. - .5
vals[6] = height / 2. - .5
options = np.zeros((10, 10), 'S')
options[0] = ['B', 'O', 'X', 'C', 'A', '', '', '', '', '']
options = np.array(options, dtype='S')
npix = width
nrast = height
xb = np.zeros(npix, 'f')
yb = np.zeros(npix, 'f')
ys = np.zeros(npix, 'f')
ysp = np.zeros(npix, 'f')
sig = np.zeros(npix, 'f')
wrk = np.zeros(9 * npix, 'f')
iflag = np.zeros(npix * nrast, 'f')
ilog = 6
ccp13.bcksmooth(buf=buf,
cback=cback,
b=b,
smbuf=smbuf,
vals=vals,
options=options,
xb=xb,
yb=yb,
ys=ys,
ysp=ysp,
sig=sig,
wrk=wrk,
iflag=iflag,
ilog=ilog,
nrast=nrast,
npix=npix)
background = copy.copy(b)
background[np.isnan(background)] = 0.
background = np.array(background, 'float32')
background = background.reshape((height, width))
background = background[:fold.shape[0], :fold.shape[1]]
result = np.array(fold - background, dtype=np.float32)
result = qfu.replaceRmin(result, int(self.info['rmin']), 0.)
self.info['bgimg1'] = result
def applyRovingWindowBGSub(self):
"""
Apply Roving Window background subtraction
:return:
"""
fold = copy.copy(self.info['avg_fold'])
# center = [fold.shape[1] + .5, fold.shape[0] + .5]
img = self.makeFullImage(fold)
width = img.shape[1]
height = img.shape[0]
img = np.ravel(img)
buf = np.array(img, 'f')
b = np.zeros(len(buf), 'f')
iwid = self.info['win_size_x']
jwid = self.info['win_size_y']
isep = self.info['win_sep_x']
jsep = self.info['win_sep_y']
smoo = self.info['smooth']
tension = self.info['tension']
pc1 = self.info['cirmin'] / 100.
pc2 = self.info['cirmax'] / 100.
maxdim = width * height
maxwin = (iwid * 2 + 1) * (jwid * 2 + 1)
ccp13.bgwsrt2(buf=buf,
b=b,
iwid=iwid,
jwid=jwid,
isep=isep,
jsep=jsep,
smoo=smoo,
tens=tension,
pc1=pc1,
pc2=pc2,
npix=width,
nrast=height,
maxdim=maxdim,
maxwin=maxwin,
xb=np.zeros(maxdim, 'f'),
yb=np.zeros(maxdim, 'f'),
ys=np.zeros(maxdim, 'f'),
ysp=np.zeros(maxdim, 'f'),
wrk=np.zeros(9 * maxdim, 'f'),
bw=np.zeros(maxwin, 'f'),
index_bn=np.zeros(maxwin, 'i'),
iprint=0,
ilog=6)
background = copy.copy(b)
background[np.isnan(background)] = 0.
background = np.array(background, 'float32')
background = background.reshape((height, width))
background = background[:fold.shape[0], :fold.shape[1]]
result = np.array(fold - background, dtype=np.float32)
result = qfu.replaceRmin(result, int(self.info['rmin']), 0.)
self.info['bgimg1'] = result
def applyCircularlySymBGSub(self):
"""
Apply Circular Background Subtraction to average fold, and save the result to self.info['bgimg1']
"""
copy_img = copy.copy(self.info['avg_fold'])
center = [copy_img.shape[1] - .5, copy_img.shape[0] - .5]
# npt_rad = int(distance(center, (0, 0)))
# ai = AzimuthalIntegrator(detector="agilent_titan")
# ai.setFit2D(100, center[0], center[1])
# mask = np.zeros((copy_img.shape[0], copy_img.shape[1]))
start_p = self.info["cirmin"] # minimum value of circular background subtraction pixel range in percent
end_p = self.info["cirmax"] # maximum value of circular background subtraction pixel range in percent
rmin = self.info["rmin"] # minimum radius for background subtraction
rmax = self.info["rmax"] # maximum radius for background subtraction
radial_bin = self.info["radial_bin"]
smoo = self.info['smooth']
# tension = self.info['tension']
max_pts = (2.*np.pi*rmax / 4. + 10) * radial_bin
nBin = int((rmax-rmin)/radial_bin)
xs, ys = qfu.getCircularDiscreteBackground(np.array(copy_img, np.float32), rmin, start_p, end_p, radial_bin, nBin, max_pts)
max_distance = int(round(distance(center, (0,0)))) + 10
sp = UnivariateSpline(xs, ys, s=smoo)
newx = np.arange(rmin, rmax)
interpolate = sp(newx)
newx = np.arange(0, max_distance)
newy = list(np.zeros(rmin))
newy.extend(list(interpolate))
newy.extend(np.zeros(max_distance-rmax))
self.info['bg_line'] = [xs, ys, newx, newy]
# Create background from spline line
background = qfu.createCircularlySymBG(copy_img.shape[1],copy_img.shape[0], np.array(newy, dtype=np.float32))
result = copy_img - background
# result -= result.min()
# Subtract original average fold by background
self.info['bgimg1'] = result
def getFirstPeak(self, hist):
"""
Find the first peak using the histogram.
Start from index 5 and go to the right until slope is less than -10
:param hist: histogram
"""
for i in range(5, int(len(hist)/2)):
if hist[i] - hist[i-1] < -10:
return i
return 20
def getRminmax(self):
"""
Get R-min and R-max for background subtraction process. If these value is changed, background subtracted images need to be reproduced.
"""
self.parent.statusPrint("Finding Rmin and Rmax...")
print("R-min and R-max is being calculated...")
if 'fixed_rmin' in self.info and 'fixed_rmax' in self.info:
if 'rmin' in self.info and 'rmax' in self.info:
if self.info['rmin'] == self.info['fixed_rmin'] and self.info['rmax'] == self.info['fixed_rmax']:
return
self.info['rmin'] = self.info['fixed_rmin']
self.info['rmax'] = self.info['fixed_rmax']
elif 'rmin' in self.info and 'rmax' in self.info:
return
else:
copy_img = copy.copy(self.info['avg_fold'])
center = [copy_img.shape[1] - 1, copy_img.shape[0] - 1]
npt_rad = int(distance(center, (0, 0)))
# Get 1D azimuthal integration histogram
if 'detector' in self.info:
det = find_detector(copy_img, man_det=self.info['detector'])
else:
det = find_detector(copy_img)
ai = AzimuthalIntegrator(detector=det)
ai.setFit2D(100, center[0], center[1])
integration_method = IntegrationMethod.select_one_available("csr", dim=1, default="csr", degradable=True)
_, totalI = ai.integrate1d(copy_img, npt_rad, unit="r_mm", method=integration_method, azimuth_range=(180, 270))
self.info['rmin'] = int(round(self.getFirstPeak(totalI) * 1.5))
self.info['rmax'] = int(round((min(copy_img.shape[0], copy_img.shape[1]) - 1) * .8))
self.deleteFromDict(self.info, 'bgimg1') # remove "bgimg1" from info to make it reprocess
self.deleteFromDict(self.info, 'bgimg2') # remove "bgimg2" from info to make it reprocess
print("Done. R-min is "+str(self.info['rmin']) + " and R-max is " + str(self.info['rmax']))
def apply2DConvexhull(self): # Deprecated, removed from MuscleX
"""
Apply 2D Convex hull Background Subtraction to average fold, and save the result to self.info['bgimg1']
"""
copy_img = copy.copy(self.info['avg_fold'])
rmin = self.info['rmin']
rmax = self.info['rmax']
center = [copy_img.shape[1] - 1, copy_img.shape[0] - 1]
hist_x = list(np.arange(rmin, rmax + 1))
pchiplines = []
det = "agilent_titan"
npt_rad = int(distance(center, (0, 0)))
ai = AzimuthalIntegrator(detector=det)
ai.setFit2D(100, center[0], center[1])
integration_method = IntegrationMethod.select_one_available("csr", dim=1, default="csr", degradable=True)
for deg in np.arange(180, 271, 1):
if deg == 180 :
_, I = ai.integrate1d(copy_img, npt_rad, unit="r_mm", method=integration_method, azimuth_range=(180, 180.5))
elif deg == 270:
_, I = ai.integrate1d(copy_img, npt_rad, unit="r_mm", method=integration_method, azimuth_range=(269.5, 270))
else:
_, I = ai.integrate1d(copy_img, npt_rad, unit="r_mm", method=integration_method, azimuth_range=(deg-0.5, deg+0.5))
hist_y = I[int(rmin):int(rmax+1)]
hist_y = list(np.concatenate((hist_y, np.zeros(len(hist_x) - len(hist_y)))))
#hist_y = list(I[hist_x])
hull_x, hull_y = getHull(hist_x, hist_y)
y_pchip = pchip(hull_x, hull_y, hist_x)
pchiplines.append(y_pchip)
# Smooth each histogram by radius
pchiplines = np.array(pchiplines, dtype="float32")
pchiplines2 = convolve1d(pchiplines, [1,2,1], axis=0)/4.
# Produce Background from each pchip line
background = qfu.make2DConvexhullBG2(pchiplines2, copy_img.shape[1], copy_img.shape[0], center[0], center[1], rmin, rmax)
# Smooth background image by gaussian filter
s = 10
w = 4
t = (((w - 1.) / 2.) - 0.5) / s
background = gaussian_filter(background, sigma=s, truncate=t)
# Subtract original average fold by background
result = copy_img - background
self.info['bgimg1'] = result
def calculateAvgFold(self):
"""
Calculate an average fold for 1-4 quadrants. Quadrants are splitted by center and rotation
"""
self.parent.statusPrint("Calculating Avg Fold...")
if 'avg_fold' not in self.info.keys():
self.deleteFromDict(self.info, 'rmin')
self.deleteFromDict(self.info, 'rmax')
# self.imgResultForDisplay = None
rotate_img = copy.copy(self.getRotatedImage())
center = self.info['center']
center_x = int(center[0])
center_y = int(center[1])
print("Quadrant folding is being processed...")
img_width = rotate_img.shape[1]
img_height = rotate_img.shape[0]
fold_width = max(int(center[0]), img_width-int(center[0])) # max(max(int(center[0]), img_width-int(center[0])), max(int(center[1]), img_height-int(center[1])))
fold_height = max(int(center[1]), img_height-int(center[1])) # fold_width
# Get each fold, and flip them to the same direction
top_left = rotate_img[max(center_y-fold_height,0):center_y, max(center_x-fold_width,0):center_x]
top_right = rotate_img[max(center_y-fold_height,0):center_y, center_x:center_x+fold_width]
top_right = cv2.flip(top_right,1)
buttom_left = rotate_img[center_y:center_y+fold_height, max(center_x-fold_width,0):center_x]
buttom_left = cv2.flip(buttom_left,0)
buttom_right = rotate_img[center_y:center_y+fold_height, center_x:center_x+fold_width]
buttom_right = cv2.flip(buttom_right,1)
buttom_right = cv2.flip(buttom_right,0)
# Add all folds which are not ignored
quadrants = np.ones((4, fold_height, fold_width), rotate_img.dtype) * (self.info['mask_thres'] - 1.)
for i, quad in enumerate([top_left, top_right, buttom_left, buttom_right]):
quadrants[i][-quad.shape[0]:, -quad.shape[1]:] = quad
remained = np.ones(4, dtype=bool)
remained[list(self.info["ignore_folds"])] = False
quadrants = quadrants[remained]
# Get average fold from all folds
self.get_avg_fold(quadrants,fold_height,fold_width)
if 'resultImg' in self.imgCache:
del self.imgCache['resultImg']
print("Done.")
def get_avg_fold(self, quadrants, fold_height, fold_width):
"""
Get average fold from input
:param quadrants: 1-4 quadrants
:param fold_height: quadrant height
:param fold_width: quadrant width
:return:
"""
result = np.zeros((fold_height, fold_width))
if len(self.info["ignore_folds"]) < 4:
# if self.info['pixel_folding']:
# average fold by pixel to pixel by cython
result = qfu.get_avg_fold_float32(np.array(quadrants, dtype="float32"), len(quadrants), fold_height, fold_width,
self.info['mask_thres'])
# else:
# result = np.mean( np.array(quadrants), axis=0 )
self.info['avg_fold'] = result
def applyBackgroundSubtraction(self):
"""
Apply background subtraction by user's choice. There are 2 images produced in this process
- bgimg1 : image after applying background subtraction INSIDE merge radius
- bgimg2 : image after applying background subtraction OUTSIDE merge radius
"""
self.parent.statusPrint("Applying Background Subtraction...")
print("Background Subtraction is being processed...")
method = self.info["bgsub"]
# Produce bgimg1
if "bgimg1" not in self.info:
avg_fold = np.array(self.info['avg_fold'], dtype="float32")
if method == 'None':
self.info["bgimg1"] = avg_fold # if method is None, original average fold will be used
elif method == '2D Convexhull':
self.apply2DConvexhull()
elif method == 'Circularly-symmetric':
self.applyCircularlySymBGSub2()
# self.applyCircularlySymBGSub()
elif method == 'White-top-hats':
self.info["bgimg1"] = white_tophat(avg_fold, disk(self.info["tophat1"]))
elif method == 'Roving Window':
self.applyRovingWindowBGSub()
elif method == 'Smoothed-Gaussian':
self.applySmoothedBGSub('gauss')
elif method == 'Smoothed-BoxCar':
self.applySmoothedBGSub('boxcar')
else:
self.info["bgimg1"] = avg_fold
self.deleteFromDict(self.imgCache, "BgSubFold")
# Produce bgimg2
if "bgimg2" not in self.info:
avg_fold = np.array(self.info['avg_fold'], dtype="float32")
if method == 'None':
self.info["bgimg2"] = avg_fold # if method is 'None', original average fold will be used
else:
self.info["bgimg2"] = white_tophat(avg_fold, disk(self.info["tophat2"]))
self.deleteFromDict(self.imgCache, "BgSubFold")
print("Done.")
def mergeImages(self):
"""
Merge bgimg1 and bgimg2 at merge radius, with sigmoid as a merge gradient param.
The result of merging will be kept in self.info["BgSubFold"]
:return:
"""
self.parent.statusPrint("Merging Images...")
print("Merging images...")
if "BgSubFold" not in self.imgCache:
img1 = np.array(self.info["bgimg1"], dtype="float32")
img2 = np.array(self.info["bgimg2"], dtype="float32")
sigmoid = self.info["sigmoid"]
center = [img1.shape[1]-1, img1.shape[0]-1]
rad = self.info["rmax"] - 10
# Merge 2 images at merge radius using sigmoid as merge gradient
self.imgCache['BgSubFold'] = qfu.combine_bgsub_float32(img1, img2, center[0], center[1], sigmoid, rad)
self.deleteFromDict(self.imgCache, "resultImg")
print("Done.")
def generateResultImage(self):
"""
Put 4 self.info["BgSubFold"] together as a result image
:return:
"""
self.parent.statusPrint("Generating Resultant Image...")
print("Generating result image from average fold...")
result = self.makeFullImage(copy.copy(self.imgCache['BgSubFold']))
if 'rotate' in self.info and self.info['rotate']:
result = np.rot90(result)
result[np.isnan(result)] = 0.
if 'roi_rad' in self.info:
center = result.shape[0]/2, result.shape[1]/2
rad = self.info['roi_rad']
result = result[max(int(center[1]-rad), 0):min(int(center[1]+rad), result.shape[1]), max(int(center[0]-rad), 0):min(int(center[0]+rad), result.shape[0])]
self.imgCache['resultImg'] = result
print("Done.")
def makeFullImage(self, fold):
"""
Flip + rotate 4 folds and combine them to 1 image
:param fold:
:return: result image
"""
fold_height = fold.shape[0]
fold_width = fold.shape[1]
top_left = fold
top_right = cv2.flip(fold, 1)
buttom_left = cv2.flip(fold, 0)
buttom_right = cv2.flip(buttom_left, 1)
resultImg = np.zeros((fold_height * 2, fold_width * 2))
resultImg[0:fold_height, 0:fold_width] = top_left
resultImg[0:fold_height, fold_width:fold_width * 2] = top_right
resultImg[fold_height:fold_height * 2, 0:fold_width] = buttom_left
resultImg[fold_height:fold_height * 2, fold_width:fold_width * 2] = buttom_right
return resultImg
def statusPrint(self, text):
"""
Print the text in the window or in the terminal depending on if we are using GUI or headless.
:param text: text to print
:return: -
"""
print(text)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ResearchReportConfig(AppConfig):
name = 'research_report'
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 15:07:06 2017
@author: toelch
"""
import pandas as pd
data = pd.read_csv('https://dataverse.harvard.edu/api/access/datafile/3005330')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from mycfo.kpi.doctype.skill_mapping.skill_mapping import get_sample_data
class CustomerSkillMapping(Document):
def update_skill_mapping_details(self, args):
self.set('skill_mapping_details', [])
for data in args.get('data'):
if data.get('industry')!=None:
smd = self.append('skill_mapping_details',{})
smd.skill = data.get('master_industry')
smd.sub_skill = data.get('industry')
smd.beginner = data.get('beginner')
smd.imtermediatory = data.get('imtermediatory')
smd.expert = data.get('expert')
smd.none_field = data.get('none_field')
self.save()
def before_insert(self):
if not len(self.skill_mapping_details):
skill_data = get_sample_data()
for data in skill_data.get("get_sample_data"):
smd = self.append('skill_mapping_details',{})
smd.skill = data[0]
smd.sub_skill = data[1]
@frappe.whitelist()
def get_sample_data_from_table(doc_name):
return {
"get_sample_data": frappe.db.sql("""select skill,sub_skill,none_field,beginner,imtermediatory,expert from `tabSkill Mapping Details` where parent='%s' order by skill asc, sub_skill asc"""%doc_name, as_list=1)
}
@frappe.whitelist()
def get_customer_skill_mapping(customer, group, segment):
if frappe.db.get_value("Customer Skill Mapping", customer, "name"):
csm = frappe.get_doc("Customer Skill Mapping", customer)
else:
csm = frappe.new_doc("Customer Skill Mapping")
csm.customer = customer
csm.customer_group = group
csm.customer_segment = segment
return csm.as_dict()
|
from backend import *
from backendConfig import *
from decomp import *
|
from PIL import Image
def show_board():
img=Image.open("sal.png")
img.show()
show_board()
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor
from esphome.const import CONF_ID
from . import EmptySensorHub, CONF_HUB_ID
DEPENDENCIES = ['empty_sensor_hub']
binary_sensor_ns = cg.esphome_ns.namespace('binary_sensor')
BinarySensor = binary_sensor_ns.class_('BinarySensor', binary_sensor.BinarySensor, cg.Nameable)
CONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(BinarySensor),
cv.GenerateID(CONF_HUB_ID): cv.use_id(EmptySensorHub)
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
paren = yield cg.get_variable(config[CONF_HUB_ID])
var = cg.new_Pvariable(config[CONF_ID])
yield binary_sensor.register_binary_sensor(var, config)
cg.add(paren.register_binary_sensor(var))
|
import os
#import numpy
import time
from time import clock
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-7-oracle/'
#os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-7-openjdk-amd64/'
from neo4j import GraphDatabase, INCOMING, Evaluation, OUTGOING, ANY
from array import array
#acessar o banco de dados
db = GraphDatabase('mathematics')
#identificar as chaves
with db.transaction:
pesquisador_idx = db.node.indexes.get('pesquisadores')
#funcao para identificar um vertice dado um numero
def get_pesquisador(ide):
return pesquisador_idx['id'][ide].single
#funcao para contar o numero de vertices em cada ordem ascendente
def Fecundidade(A):
fecundidade = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path) == 1:
fecundidade = fecundidade + 1
pass
A[('fecundidade+')] = fecundidade
fecundidade = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path) == 1:
fecundidade = fecundidade + 1
pass
A[('fecundidade-')] = fecundidade
return
def Fertilidade(A):
fertilidade = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path) == 1 and path.end[('fecundidade+')] > 0:
fertilidade = fertilidade + 1
pass
A[('fertilidade+')] = fertilidade
fertilidade = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path) == 1 and path.end[('fecundidade-')] > 0:
fertilidade = fertilidade + 1
pass
A[('fertilidade-')] = fertilidade
return
def Descendencia(A):
descendencia = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for node in traverser.nodes:
descendencia = descendencia + 1
pass
A[('descendencia+')] = descendencia - 1
descendencia = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for node in traverser.nodes:
descendencia = descendencia + 1
pass
A[('descendencia-')] = descendencia - 1
return
def Descendencia2(A):
vetor = []
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path)==2:
vetor.append(path.end['id'])
pass
if len(vetor) != 0:
A['decendencia2+'] = vetor
else:
A['decendencia2+'] = 0
vetor = []
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path)==2:
vetor.append(path.end['id'])
pass
if len(vetor) != 0:
A['decendencia2-'] = vetor
else:
A['decendencia2-'] = 0
return
def Primo(A):
conjunto=set([])
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path)==2:
for r in path.end['decendencia2+']:
conjunto.add(r)
pass
if len(conjunto) == 0:
A['primo+'] = 0
else:
A['primo+'] = len(conjunto)-1
conjunto=set([])
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for path in traverser:
if len(path)==2:
for r in path.end['decendencia2-']:
conjunto.add(r)
pass
if len(conjunto) == 0:
A['primo-'] = 0
else:
A['primo-'] = len(conjunto)-1
return
def Geracoes(A):
D = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.depthFirst()\
.traverse(A)
for path in traverser:
if D < len(path):
D = len(path)
pass
A['geracoes+'] = D
D = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.depthFirst()\
.traverse(A)
for path in traverser:
if D < len(path):
D = len(path)
pass
A['geracoes-'] = D
return
def Orientacoes(A):
orientacoes = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', OUTGOING)\
.breadthFirst()\
.traverse(A)
for node in traverser.nodes:
orientacoes = orientacoes + node['fecundidade+']
pass
A[('orientacoes+')] = orientacoes
orientacoes = 0
with db.transaction:
traverser = db.traversal()\
.relationships('SENT_TO', INCOMING)\
.breadthFirst()\
.traverse(A)
for node in traverser.nodes:
orientacoes = orientacoes + node['fecundidade-']
pass
A[('orientacoes-')] = orientacoes
return
#********************Programa Principal******************************************
#Leitura do arquivo de dados do grafo
arq = open('mathematics.txt','r')
matematicos = []
print("Lendo registros...")
for matematico in arq.readlines():
matematicos.append(int(matematico.split(';')[0]))
arq.close()
#Contagem do numero de vertices encontrados
floresta = len(matematicos)
print("Total de vertices = %d" % floresta)
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Fecundidade(vertice)
print('Fecundidade - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Fertilidade(vertice)
print('Fertilidade - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Descendencia(vertice)
print('Descendencia - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Descendencia2(vertice)
print('Descendencia2 - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Primo(vertice)
print('Primo - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Geracoes(vertice)
print('Geracoes - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
cont = 0
for p in matematicos:
cont +=1
vertice=get_pesquisador(int(p))
Orientacoes(vertice)
print('Orientacoes - %d - %s - %.3f%%' % (vertice['id'],vertice['nome'],float(cont)/float(floresta)*100.00))
print("Gravando arquivos...")
ascendente = open('metricas.txt','w')
for p in matematicos:
verticeInteresse = get_pesquisador(int(p))
ascendente.write('%d;' % p)
ascendente.write('%d;' % verticeInteresse[('fecundidade+')])
ascendente.write('%d;' % verticeInteresse[('fertilidade+')])
ascendente.write('%d;' % verticeInteresse[('descendencia+')])
ascendente.write('%d;' % verticeInteresse[('primo+')])
ascendente.write('%d;' % verticeInteresse[('geracoes+')])
ascendente.write('%d;' % verticeInteresse[('orientacoes+')])
ascendente.write('%d;' % verticeInteresse[('fecundidade-')])
ascendente.write('%d;' % verticeInteresse[('fertilidade-')])
ascendente.write('%d;' % verticeInteresse[('descendencia-')])
ascendente.write('%d;' % verticeInteresse[('primo-')])
ascendente.write('%d;' % verticeInteresse[('geracoes-')])
ascendente.write('%d;' % verticeInteresse[('orientacoes-')])
ascendente.write('\n')
ascendente.close()
db.shutdown()
|
import Plugin
import connection
class EchoPlugin(Plugin.EasyPlugin):
'''This Plugin responds to the sender with received package'''
def command_echo(self, package):
''' echo the package '''
package.connection.sendResponse(package)
def command_frontendEcho(self, package):
''' echo this package to the frondends'''
for connection in self.backend.getNodeConnections(":frontend"):
connection.sendResponse(package)
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
T = [15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 298, 300]
c = np.array([0.022, 0.054, 0.112, 0.203, 0.332, 0.500, 0.698, 0.912, 1.375, 1.846, 2.298, 2.714, 3.094, 3.422,
3.704, 3.943, 4.165, 4.361, 4.536, 4.690, 4.823, 4.938, 5.039, 5.122, 5.198, 5.268,
5.329, 5.383, 5.436, 5.483, 5.523, 5.562, 5.592, 5.599])*4.184
plt.scatter(T, c, color="0")
R = 8.314
def cvm(temp, theta):
return 3*R * (theta/temp)**2 * np.exp(theta/temp) * (np.exp(theta/temp)-1)**(-2)
popt, pcov = curve_fit(cvm, T, c, p0=300)
print(popt)
print(pcov)
T_teoretisk = np.linspace(1, 330, 600)
plt.plot(T_teoretisk, cvm(T_teoretisk, popt[0]), color="0")
#plt.title(r"Kurvetilpasning, Giauque og Meads, $\Theta_E = 283$K")
plt.xlabel(r"$T$ / K")
plt.ylabel(r"$C_{vm}$ / J K$^{-1}$ mol $^{-1}$")
plt.ylim([0, 25])
plt.xlim([0, 320])
plt.tight_layout()
plt.show()
# VELGER T = 283.2K SOM EINSTEINTEMPERATUR FOR ALUMINIUM
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Any
from pants.backend.scala.subsystems.scalatest import Scalatest
from pants.backend.scala.target_types import (
ScalatestTestExtraEnvVarsField,
ScalatestTestSourceField,
ScalatestTestTimeoutField,
)
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.goals.test import (
TestDebugRequest,
TestExtraEnv,
TestFieldSet,
TestRequest,
TestResult,
TestSubsystem,
)
from pants.core.target_types import FileSourceField
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Addresses
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs, RemovePrefix, Snapshot
from pants.engine.process import (
FallibleProcessResult,
InteractiveProcess,
Process,
ProcessCacheScope,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import SourcesField, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.goals import lockfile
from pants.jvm.jdk_rules import JdkEnvironment, JdkRequest, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool, GenerateJvmToolLockfileSentinel
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmDependenciesField, JvmJdkField
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ScalatestTestFieldSet(TestFieldSet):
required_fields = (
ScalatestTestSourceField,
JvmJdkField,
)
sources: ScalatestTestSourceField
timeout: ScalatestTestTimeoutField
jdk_version: JvmJdkField
dependencies: JvmDependenciesField
extra_env_vars: ScalatestTestExtraEnvVarsField
class ScalatestTestRequest(TestRequest):
tool_subsystem = Scalatest
field_set_type = ScalatestTestFieldSet
supports_debug = True
class ScalatestToolLockfileSentinel(GenerateJvmToolLockfileSentinel):
resolve_name = Scalatest.options_scope
@dataclass(frozen=True)
class TestSetupRequest:
field_set: ScalatestTestFieldSet
is_debug: bool
@dataclass(frozen=True)
class TestSetup:
process: JvmProcess
reports_dir_prefix: str
@rule(level=LogLevel.DEBUG)
async def setup_scalatest_for_target(
request: TestSetupRequest,
jvm: JvmSubsystem,
scalatest: Scalatest,
test_subsystem: TestSubsystem,
test_extra_env: TestExtraEnv,
) -> TestSetup:
jdk, transitive_tgts = await MultiGet(
Get(JdkEnvironment, JdkRequest, JdkRequest.from_field(request.field_set.jdk_version)),
Get(TransitiveTargets, TransitiveTargetsRequest([request.field_set.address])),
)
lockfile_request = await Get(GenerateJvmLockfileFromTool, ScalatestToolLockfileSentinel())
classpath, scalatest_classpath, files = await MultiGet(
Get(Classpath, Addresses([request.field_set.address])),
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(
SourceFiles,
SourceFilesRequest(
(dep.get(SourcesField) for dep in transitive_tgts.dependencies),
for_sources_types=(FileSourceField,),
enable_codegen=True,
),
),
)
input_digest = await Get(Digest, MergeDigests((*classpath.digests(), files.snapshot.digest)))
toolcp_relpath = "__toolcp"
extra_immutable_input_digests = {
toolcp_relpath: scalatest_classpath.digest,
}
reports_dir_prefix = "__reports_dir"
reports_dir = f"{reports_dir_prefix}/{request.field_set.address.path_safe_spec}"
# Classfiles produced by the root `scalatest_test` targets are the only ones which should run.
user_classpath_arg = ":".join(classpath.root_args())
# Cache test runs only if they are successful, or not at all if `--test-force`.
cache_scope = (
ProcessCacheScope.PER_SESSION if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
)
extra_jvm_args: list[str] = []
if request.is_debug:
extra_jvm_args.extend(jvm.debug_args)
field_set_extra_env = await Get(
EnvironmentVars, EnvironmentVarsRequest(request.field_set.extra_env_vars.value or ())
)
process = JvmProcess(
jdk=jdk,
classpath_entries=[
*classpath.args(),
*scalatest_classpath.classpath_entries(toolcp_relpath),
],
argv=[
*extra_jvm_args,
"org.scalatest.tools.Runner",
# TODO: We currently give the entire user classpath to the JVM for startup (which
# mixes it with the user classpath), and then only specify the roots to run here.
# see https://github.com/pantsbuild/pants/issues/13871
*(("-R", user_classpath_arg) if user_classpath_arg else ()),
"-o",
"-u",
reports_dir,
*scalatest.args,
],
input_digest=input_digest,
extra_env={**test_extra_env.env, **field_set_extra_env},
extra_jvm_options=scalatest.jvm_options,
extra_immutable_input_digests=extra_immutable_input_digests,
output_directories=(reports_dir,),
description=f"Run Scalatest runner for {request.field_set.address}",
timeout_seconds=request.field_set.timeout.calculate_from_global_options(test_subsystem),
level=LogLevel.DEBUG,
cache_scope=cache_scope,
use_nailgun=False,
)
return TestSetup(process=process, reports_dir_prefix=reports_dir_prefix)
@rule(desc="Run Scalatest", level=LogLevel.DEBUG)
async def run_scalatest_test(
test_subsystem: TestSubsystem,
batch: ScalatestTestRequest.Batch[ScalatestTestFieldSet, Any],
) -> TestResult:
field_set = batch.single_element
test_setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=False))
process_result = await Get(FallibleProcessResult, JvmProcess, test_setup.process)
reports_dir_prefix = test_setup.reports_dir_prefix
xml_result_subset = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([f"{reports_dir_prefix}/**"]))
)
xml_results = await Get(Snapshot, RemovePrefix(xml_result_subset, reports_dir_prefix))
return TestResult.from_fallible_process_result(
process_result,
address=field_set.address,
output_setting=test_subsystem.output,
xml_results=xml_results,
)
@rule(level=LogLevel.DEBUG)
async def setup_scalatest_debug_request(
batch: ScalatestTestRequest.Batch[ScalatestTestFieldSet, Any]
) -> TestDebugRequest:
setup = await Get(TestSetup, TestSetupRequest(batch.single_element, is_debug=True))
process = await Get(Process, JvmProcess, setup.process)
return TestDebugRequest(
InteractiveProcess.from_process(process, forward_signals_to_process=False, restartable=True)
)
@rule
def generate_scalatest_lockfile_request(
_: ScalatestToolLockfileSentinel, scalatest: Scalatest
) -> GenerateJvmLockfileFromTool:
return GenerateJvmLockfileFromTool.create(scalatest)
def rules():
return [
*collect_rules(),
*lockfile.rules(),
UnionRule(GenerateToolLockfileSentinel, ScalatestToolLockfileSentinel),
*ScalatestTestRequest.rules(),
]
|
try:
from charm.core.math.integer import integer,randomBits,random,randomPrime,isPrime,encode,decode,hashInt,bitsize,legendre,gcd,lcm,serialize,deserialize,int2Bytes,toInt
#from charm.core.math.integer import InitBenchmark,StartBenchmark,EndBenchmark,GetBenchmark,GetGeneralBenchmarks,ClearBenchmark
except Exception as err:
print(err)
exit(-1)
class IntegerGroup:
def __init__(self, start=0):
pass
def setparam(self, p, q):
if p == (2 * q) + 1 and isPrime(p) and isPrime(q):
self.p = integer(p)
self.q = integer(q)
return True
else:
print("p and q are not safe primes!")
return False
def paramgen(self, bits, r=2):
# determine which group
while True:
self.p = randomPrime(bits, 1)
self.q = (self.p - 1) / 2
if (isPrime(self.p) and isPrime(self.q)):
break
self.r = r
return None
def randomGen(self):
while True:
h = random(self.p)
g = (h ** self.r) % self.p
if not g == 1:
break
return g
def groupSetting(self):
return 'integer'
def groupType(self):
return 'SchnorrGroup mod p'
def groupOrder(self):
return bitsize(self.q)
def bitsize(self):
return bitsize(self.q) / 8
def isMember(self, x):
return x.isCongruent()
def random(self, max=0):
if max == 0:
return random(self.p)
else:
return random(max)
def encode(self, M):
return encode(M, self.p, self.q)
def decode(self, element):
return decode(element, self.p, self.q)
def serialize(self, object):
assert type(object) == integer, "cannot serialize non-integer types"
return serialize(object)
def deserialize(self, bytes_object):
assert type(bytes_object) == bytes, "cannot deserialize object"
return deserialize(bytes_object)
def hash(self, *args):
if isinstance(args, tuple):
#print "Hashing => '%s'" % args
return hashInt(args, self.p, self.q, False)
return None
class IntegerGroupQ:
def __init__(self, start=0):
pass
def paramgen(self, bits, r=2):
# determine which group
while True:
self.p = randomPrime(bits, 1)
self.q = (self.p - 1) / 2
if (isPrime(self.p) and isPrime(self.q)):
break
self.r = r
return None
def randomG(self):
return self.randomGen()
def randomGen(self):
while True:
h = random(self.p)
g = (h ** self.r) % self.p
if not g == 1:
#print "g => %s" % g
break
return g
def groupSetting(self):
return 'integer'
def groupType(self):
return 'SchnorrGroup mod q'
def groupOrder(self):
return bitsize(self.q)
def messageSize(self):
return bitsize(self.q) / 8
def isMember(self, x):
return x.isCongruent()
def random(self, max=0):
if max == 0:
return random(self.q)
else:
return random(max)
def encode(self, M):
return encode(M, self.p, self.q)
def decode(self, element):
return decode(element, self.p, self.q)
def hash(self, *args):
if isinstance(args, tuple):
return hashInt(args, self.p, self.q, True)
List = []
for i in args:
List.append(i)
return hashInt(tuple(List), self.p, self.q, True)
def serialize(self, object):
assert type(object) == integer, "cannot serialize non-integer types"
return serialize(object)
def deserialize(self, bytes_object):
assert type(bytes_object) == bytes, "cannot deserialize object"
return deserialize(bytes_object)
class RSAGroup:
def __init__(self):
self.p = self.q = self.n = 0
def paramgen(self, secparam):
while True:
p, q = randomPrime(secparam), randomPrime(secparam)
if isPrime(p) and isPrime(q) and gcd(p * q, (p - 1) * (q - 1)) == 1:
break
self.p = p
self.q = q
return (p, q, p * q)
def setparam(self, p, q):
if isPrime(p) and isPrime(q) and p != q:
self.p = integer(p)
self.q = integer(q)
self.n = self.p * self.q
return True
else:
print("p and q are not primes!")
return False
def serialize(self, object):
assert type(object) == integer, "cannot serialize non-integer types"
return serialize(object)
def deserialize(self, bytes_object):
assert type(bytes_object) == bytes, "cannot deserialize object"
return deserialize(bytes_object)
def random(self, max=0):
if max == 0:
return random(self.n)
else:
return random(max)
def groupSetting(self):
return 'integer'
def groupType(self):
return 'RSAGroup mod p'
def groupOrder(self):
return bitsize(self.n)
def encode(self, value):
pass
def decode(self, value):
pass
|
import cv2
from scipy.misc import imread, imsave
import time
import numpy as np
import os
from skimage.feature import register_translation
from scipy.ndimage import filters
import errno
import matplotlib.pyplot as plt
import tifffile as tiff
from joblib import Parallel, delayed
from stuckpy.microscopy import inout
from stuckpy.microscopy import visualize
from stuckpy.microscopy import operations
INPUT_FOLDER = r'E:\E_Documents\Research\NPG\Mechanical Testing\20171214 TEM tensile success\Movie 1\brittle clip'
OUTPUT_FOLDER = r'E:\E_Documents\Research\NPG\Mechanical Testing\20171214 TEM tensile success\Movie 1\brittle clip/aligned'
TO_REF = True # true = align to reference image, false = align to next image.
REF = None #Set to None for middle image
secondRef = None
refswitch = 1700
SIGMA = 3 #blur amount
disable_y = False
def align(im, refim):
shift, _, _ = register_translation(refim, im, 1) #calc offset relative to last image
return shift
# Load Images
def run():
# Load Images
t = time.time()
print('Loading images...', end='')
files = inout.get_file_names(INPUT_FOLDER, 'tif')
files = files[:2308]
im0 = imread(files[0], 'L')
rows, cols = im0.shape
num = len(files)
im = np.zeros((rows,cols,num), dtype='uint8')
for i, file in enumerate(files):
im[:,:,i] = imread(files[i], 'L')
im = im[:991, :,:] # crop out scale bar
#im = im[30:, :-10, :]
elapsed = time.time() - t
print('Done. Took %.2f seconds.' %elapsed)
# Ensure directory exists
try:
os.makedirs(OUTPUT_FOLDER)
except OSError as e:
if e.errno != errno.EEXIST:
raise
to_ref = TO_REF #align to middle image (True) or adjacent image (False)
#Preprocess images
frames = im.copy()
t = time.time()
print('Applying Gaussian filter...', end='')
im = operations.gaussian(im, SIGMA)
elapsed = time.time() - t
print('Done. Took %.2f seconds.' %elapsed)
# Calculate shift matrix
rows,cols,num = im.shape
ref = int(num/2)
ref += 100
print(ref)
if REF is not None:
ref = REF
#plt.imshow(im[:,:,ref], cmap=plt.cm.gray)
#plt.show()
shift = np.zeros((num, 2)) #initialize shift matrix - [y, x] for each image
if to_ref:
if secondRef is None:
get_shift = Parallel(n_jobs=4, verbose=50)(delayed(align)(im[:,:,i],im[:,:,ref]) for i in range(num))
for i,s in enumerate(get_shift):
shift[i,:] = s
else:
get_shift = Parallel(n_jobs=4, verbose=50)(delayed(align)(im[:,:,i],im[:,:,ref]) for i in range(refswitch))
get_shift2 = Parallel(n_jobs=4, verbose=50)(delayed(align)(im[:,:,i],im[:,:,secondRef]) for i in range(refswitch,num))
for i,s in enumerate(get_shift):
shift[i,:] = s
for i,s in enumerate(get_shift2):
shift[refswitch+i,:] = s
#print(shift)
# adjust the shift array such that 0,0 is the minumum
minrow, mincol = np.amin(shift, axis=0)
maxrow, maxcol = np.amax(shift, axis=0)
for i in range(num):
shift[i,0] -= minrow
shift[i,1] -= mincol
else:
t = time.time()
print('Calculating offset for %d images...' %num, end='')
for i in range(ref,num-1):
if i % 10 == 0:
visualize.update_progress((i-ref)/num)
shift[i+1,:], _, _ = register_translation(im[:,:,i], im[:,:,i+1], 10) #calc offset relative to last image
shift[i+1,:] = shift[i+1,:] + shift[i,:] #relative to reference image
#now align front half
for i in range(ref, 0, -1):
if i % 10 == 0:
visualize.update_progress((num-i)/num)
shift[i-1,:], _, _ = register_translation(im[:,:,i], im[:,:,i-1], 10) #calc offset relative to last image
shift[i-1,:] = shift[i-1,:] + shift[i,:] #relative to reference image
elapsed = time.time() - t
print('Done. Took %.2f seconds.' %elapsed)
if disable_y:
shift[:,1] = 0
# adjust the shift array such that 0,0 is the minumum
minrow, mincol = np.amin(shift, axis=0)
maxrow, maxcol = np.amax(shift, axis=0)
for i in range(num):
shift[i,0] -= minrow
shift[i,1] -= mincol
# Save shift file for reference
try:
np.save(SHIFT_FILE, shift)
except:
pass
# get shape of final movie
minrow, mincol = np.amin(shift, axis=0)
maxrow, maxcol = np.amax(shift, axis=0)
rows = rows + int(round(maxrow))
cols = cols + int(round(maxcol))
# Generate shifted images
t = time.time()
print('Shifting %d images...' %num, end='')
#shiftim = []
shiftim = np.zeros((rows,cols,num), dtype='uint8')
for i in range(num):
M = np.float32([[1,0,shift[i,1]],[0,1,shift[i,0]]]) #transformation matrix
shiftim[:,:,i] = cv2.warpAffine(frames[:,:,i],M,(cols,rows))
elapsed = time.time() - t
print('Done. Took %.2f seconds.' %elapsed)
#print(shift)
# Save images
if True:
t = time.time()
print('Saving %d images...' %num, end='')
for i in range(num):
tiff.imsave(files[i].replace(INPUT_FOLDER, OUTPUT_FOLDER), shiftim[:,:,i])
elapsed = time.time() - t
print('Done. Took %.2f seconds.' %elapsed)
if __name__ == '__main__':
run()
|
import random
suits = ["Hearts","Spades","Clubs","Diamonds"]
ranks = ["Two","Three","Four","Five","Six","Seven","Eight","Nine","Ten","Jack","Queen","King","Ace"]
value = {"Two":2,"Three":3,"Four":4,"Five":5,"Six":6,"Seven":7,"Eight":8,"Nine":9,"Ten":10,"Jack":10,"Queen":10,"King":10,"Ace":11}
playing = True
class Card:
def __init__(self,suit,rank):
self.sum = 0
self.suits = suit
self.rank = rank
def __str__(self):
return "{} of {}".format(self.rank,self.suits)
class Deck:
def __init__(self):
self.deck = []
for suit in suits:
for rank in ranks:
self.deck.append(Card(suit,rank))
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
def __str__(self):
deck_comp = ""
for card in self.deck:
deck_comp += "\n" + card.__str__()
return "The deck contains: " + deck_comp
class Hand:
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0
def add_card(self,card):
self.cards.append(card)
self.value = self.value + value[card.rank]
if card.rank == "Ace":
self.aces += 1
def adjust_for_ace(self):
if self.value >=10:
self.value += 11
else:
self.value += 1
class Chips:
def __init__(self,total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
def take_bet(chips):
while True:
try:
chips.bet = int(input("Enter How Much You'd Like to Bet"))
except:
print ("Sorry that was not an integer")
else:
if chips.bet > chips.total:
print("Sorry You cant bet more than you own")
else:
break
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
def hit_or_stand(deck,hand):
global playing
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck,hand) # hit() function defined above
elif x[0].lower() == 's':
print("Player stands. Dealer is playing.")
playing = False
else:
print("Sorry, please try again.")
continue
break
def show_some(player,dealer):
print("\nDealer's Hand:")
print(" <card hidden>")
print('',dealer.cards[1])
print("\nPlayer's Hand:", *player.cards, sep='\n ')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep='\n ')
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand:", *player.cards, sep='\n ')
print("Player's Hand =",player.value)
def player_busts(player,dealer,chips):
print("Player busts!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("Player wins!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("Dealer busts!")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("Dealer wins!")
chips.lose_bet()
def push(player,dealer):
print("Dealer and Player tie! It's a push.")
"""
while True:
print("Welcome to BlackJack, You will be competing against the computer, the deck will now shuffle")
deck = Deck()
deck.shuffle()
player1 = Hand()
player1.add_card(deck.deal())
player1.add_card(deck.deal())
dealer1 = Hand()
dealer1.add_card(deck.deal())
dealer1.add_card(deck.deal())
player_chips = Chips()
take_bet(player_chips)
show_some(player1,dealer1)
while playing:
hit_or_stand(deck,player1)
show_some(player1,dealer1)
if player1.value > 21:
player_busts(player1,dealer1,player_chips)
break
if player1.value <= 21:
while dealer1.value < 17:
hit(deck,dealer1)
show_all(player1,dealer1)
if player1.value > 21:
player_busts(player1,dealer1,player_chips)
elif dealer1.value > player.value:
dealer_wins(player1,dealer1,player_chips)
elif dealer1.value < player1.value:
player_wins(player1,dealer1,player_chips)
else:
push(player1,dealer1)
print("You're current total is " + str(player1.value))
"""
|
__author__ = "Narwhale"
#异常结构
try:
# 主代码块
pass
except KeyError as e:
# 异常时,执行该块
pass
else:
# 主代码块执行完,执行该块
pass
finally:
# 无论异常与否,最终执行该块
pass
while True:
num1 = input('num1:')
num2 = input('num2:')
try:
num1 = int(num1)
num2 = int(num2)
result = num1 + num2
except Exception as e:
print ('出现异常,信息如下:',e)
|
import numpy as np
from numba import njit
import game.consts as consts
@njit('u8(u8[:, :])')
def pack(arr):
bits = np.uint64(0)
for x in range(8):
for y in range(8):
bits += arr[x, y] << (x * 8 + y)
return bits
@njit('u8[:, :](u8)')
def unpack(bits):
arr = np.empty((8, 8), dtype=np.uint64)
for x in range(8):
for y in range(8):
arr[x, y] = bits & 1
bits >>= 1
return arr
@njit('u8[:, :, :](u8, u8, u8)')
def bits_to_array(my, opp, obs):
arr = np.empty((3, 8, 8), dtype=np.uint64)
arr[0] = unpack(my)
arr[1] = unpack(opp)
arr[2] = unpack(obs)
return arr
@njit('Tuple((u8, u8, u8))(u8[:, :, :])')
def array_to_bits(arr):
my = pack(arr[0])
opp = pack(arr[1])
obs = pack(arr[2])
return my, opp, obs
@njit('u8(u8)')
def popcount(x):
x -= ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333)
return (((x + (x >> 4)) & 0xf0f0f0f0f0f0f0f) * 0x101010101010101 >> 56) & 0xff
def to_string(my, opp, obs):
pos = 1
res = ''
for idx in range(8 * 8):
if my & pos:
res += consts.BOARD_STR_MY
elif opp & pos:
res += consts.BOARD_STR_OPP
elif obs & pos:
res += consts.BOARD_STR_OBS
else:
res += consts.BOARD_STR_EMPTY
if idx % 8 == 7:
res += '\n'
pos <<= 1
return res
def from_string(s):
s = sum(s.split(), '')
assert len(s) == 8 * 8
pos = 1
my, opp, obs = 0, 0, 0
for c in s:
if c == consts.BOARD_STR_MY:
my |= pos
elif c == consts.BOARD_STR_OPP:
opp |= pos
elif c == consts.BOARD_STR_OBS:
obs |= pos
else:
assert c == consts.BOARD_STR_EMPTY
pos <<= 1
# Bitboard operations.
MASKS = np.array([
0x7F7F7F7F7F7F7F7F, # Right.
0x007F7F7F7F7F7F7F, # Down-right.
0xFFFFFFFFFFFFFFFF, # Down.
0x00FEFEFEFEFEFEFE, # Down-left.
0xFEFEFEFEFEFEFEFE, # Left.
0xFEFEFEFEFEFEFE00, # Up-left.
0xFFFFFFFFFFFFFFFF, # Up.
0x7F7F7F7F7F7F7F00, # Up-right.
], dtype=np.uint64)
LSHIFTS = np.array([
0, # Right.
0, # Down-right.
0, # Down.
0, # Down-left.
1, # Left.
9, # Up-left.
8, # Up.
7, # Up-right.
])
RSHIFTS = np.array([
1, # Right.
9, # Down-right.
8, # Down.
7, # Down-left.
0, # Left.
0, # Up-left.
0, # Up.
0, # Up-right.
])
assert len(MASKS) == len(LSHIFTS) == len(RSHIFTS)
NUM_DIRS = len(MASKS)
@njit('u8(u8, i4)')
def shift(disks, dir):
assert 0 <= dir < NUM_DIRS
if dir < NUM_DIRS // 2:
assert LSHIFTS[dir] == 0, "Shifting right."
return (disks >> RSHIFTS[dir]) & MASKS[dir]
else:
assert RSHIFTS[dir] == 0, "Shifting left."
return (disks << LSHIFTS[dir]) & MASKS[dir]
@njit('u8(u8, u8, u8)')
def generate_moves(my_disks, opp_disks, obstacles):
assert (my_disks & opp_disks) == 0, "Disk sets should be disjoint."
empty_cells = ~(my_disks | opp_disks | obstacles)
legal_moves = np.uint64(0)
for dir in range(NUM_DIRS):
# Get opponent disks adjacent to my disks in direction dir.
x = shift(my_disks, dir) & opp_disks
# Add opponent disks adjacent to those, and so on.
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
# Empty cells adjacent to those are valid moves.
legal_moves |= shift(x, dir) & empty_cells
return legal_moves
@njit('i4(u8, u8, u8)')
def is_terminated(my_disks, opp_disks, obstacles):
i_can_move = generate_moves(my_disks, opp_disks, obstacles) > 0
opp_can_move = generate_moves(opp_disks, my_disks, obstacles) > 0
return not (i_can_move or opp_can_move)
@njit('i4(u8, u8, u8)')
def evaluate(my_disks, opp_disks, obstacles):
my_score = popcount(my_disks)
opp_score = popcount(opp_disks)
return my_score - opp_score
@njit('Tuple((u8, u8))(u8, u8, i4)')
def resolve_move(my_disks, opp_disks, board_idx):
new_disk = np.uint64(1 << board_idx)
captured_disks = np.uint64(0)
assert 0 <= board_idx < 64, "Move must be within the board."
assert my_disks & opp_disks == 0, "Disk sets must be disjoint."
assert (my_disks | opp_disks) & new_disk == 0, "Target not empty!"
my_disks |= new_disk
for dir in range(NUM_DIRS):
# Find opponent disk adjacent to the new disk.
x = shift(new_disk, dir) & opp_disks
# Add any adjacent opponent disk to that one, and so on.
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
x |= shift(x, dir) & opp_disks
# Determine whether the disks were captured.
bounding_disk = shift(x, dir) & my_disks
captured_disks |= x if bounding_disk else np.uint64(0)
assert captured_disks, "A valid move must capture disks."
my_disks ^= captured_disks
opp_disks ^= captured_disks
assert my_disks & opp_disks == 0, "The sets must still be disjoint."
return my_disks, opp_disks
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from typing import Dict, List, Union
import pytest
from pants.option.custom_types import (
DictValueComponent,
ListValueComponent,
UnsetBool,
_flatten_shlexed_list,
memory_size,
)
from pants.option.errors import ParseError
ValidPrimitives = Union[int, str]
ParsedList = List[ValidPrimitives]
ParsedDict = Dict[str, Union[ValidPrimitives, ParsedList]]
def test_memory_size() -> None:
assert memory_size("1GiB") == 1_073_741_824
assert memory_size(" 1 GiB ") == 1_073_741_824
assert memory_size("1.22GiB") == 1_309_965_025
assert memory_size("1MiB") == 1_048_576
assert memory_size(" 1 MiB ") == 1_048_576
assert memory_size("1.4MiB") == 1_468_006
assert memory_size("1KiB") == 1024
assert memory_size(" 1 KiB ") == 1024
assert memory_size("1.4KiB") == 1433
assert memory_size("10B") == 10
assert memory_size(" 10 B ") == 10
assert memory_size("10.4B") == 10
assert memory_size("10") == 10
assert memory_size(" 10 ") == 10
assert memory_size("10.4") == 10
# Must be a Bytes unit.
with pytest.raises(ParseError):
memory_size("1ft")
with pytest.raises(ParseError):
memory_size("1m")
# Invalid input.
with pytest.raises(ParseError):
memory_size("")
with pytest.raises(ParseError):
memory_size("foo")
def test_flatten_shlexed_list() -> None:
assert _flatten_shlexed_list(["arg1", "arg2"]) == ["arg1", "arg2"]
assert _flatten_shlexed_list(["arg1 arg2"]) == ["arg1", "arg2"]
assert _flatten_shlexed_list(["arg1 arg2=foo", "--arg3"]) == ["arg1", "arg2=foo", "--arg3"]
assert _flatten_shlexed_list(["arg1='foo bar'", "arg2='baz'"]) == [
"arg1=foo bar",
"arg2=baz",
]
class TestCustomTypes:
@staticmethod
def assert_list_parsed(s: str, *, expected: ParsedList) -> None:
assert expected == ListValueComponent.create(s).val
@staticmethod
def assert_split_list(s: str, *, expected: List[str]) -> None:
assert expected == ListValueComponent._split_modifier_expr(s)
def test_unset_bool(self):
# UnsetBool should only be use-able as a singleton value via its type.
with pytest.raises(NotImplementedError):
UnsetBool()
def test_dict(self) -> None:
def assert_dict_parsed(s: str, *, expected: ParsedDict) -> None:
assert expected == DictValueComponent.create(s).val
assert_dict_parsed("{}", expected={})
assert_dict_parsed('{ "a": "b" }', expected={"a": "b"})
assert_dict_parsed("{ 'a': 'b' }", expected={"a": "b"})
assert_dict_parsed('{ "a": [1, 2, 3] }', expected={"a": [1, 2, 3]})
assert_dict_parsed('{ "a": [1, 2] + [3, 4] }', expected={"a": [1, 2, 3, 4]})
def assert_dict_error(s: str) -> None:
with pytest.raises(ParseError):
assert_dict_parsed(s, expected={})
assert_dict_error("[]")
assert_dict_error("[1, 2, 3]")
assert_dict_error("1")
assert_dict_error('"a"')
def test_list(self) -> None:
self.assert_list_parsed("[]", expected=[])
self.assert_list_parsed("[1, 2, 3]", expected=[1, 2, 3])
self.assert_list_parsed("(1, 2, 3)", expected=[1, 2, 3])
self.assert_list_parsed('["a", "b", "c"]', expected=["a", "b", "c"])
self.assert_list_parsed("['a', 'b', 'c']", expected=["a", "b", "c"])
self.assert_list_parsed("[1, 2] + [3, 4]", expected=[1, 2, 3, 4])
self.assert_list_parsed("(1, 2) + (3, 4)", expected=[1, 2, 3, 4])
self.assert_list_parsed('a"', expected=['a"'])
self.assert_list_parsed("a'", expected=["a'"])
self.assert_list_parsed("\"a'", expected=["\"a'"])
self.assert_list_parsed("'a\"", expected=["'a\""])
self.assert_list_parsed('a"""a', expected=['a"""a'])
self.assert_list_parsed("1,2", expected=["1,2"])
self.assert_list_parsed("+[1,2]", expected=[1, 2])
self.assert_list_parsed("\\", expected=["\\"])
def test_split_list_modifier_expressions(self) -> None:
self.assert_split_list("1", expected=["1"])
self.assert_split_list("foo", expected=["foo"])
self.assert_split_list("1,2", expected=["1,2"])
self.assert_split_list("[1,2]", expected=["[1,2]"])
self.assert_split_list("[1,2],[3,4]", expected=["[1,2],[3,4]"])
self.assert_split_list("+[1,2],[3,4]", expected=["+[1,2],[3,4]"])
self.assert_split_list("[1,2],-[3,4]", expected=["[1,2],-[3,4]"])
self.assert_split_list("+[1,2],foo", expected=["+[1,2],foo"])
self.assert_split_list("+[1,2],-[3,4]", expected=["+[1,2]", "-[3,4]"])
self.assert_split_list("-[1,2],+[3,4]", expected=["-[1,2]", "+[3,4]"])
self.assert_split_list(
"-[1,2],+[3,4],-[5,6],+[7,8]", expected=["-[1,2]", "+[3,4]", "-[5,6]", "+[7,8]"]
)
self.assert_split_list("+[-1,-2],-[-3,-4]", expected=["+[-1,-2]", "-[-3,-4]"])
self.assert_split_list('+["-"],-["+"]', expected=['+["-"]', '-["+"]'])
self.assert_split_list('+["+[3,4]"],-["-[4,5]"]', expected=['+["+[3,4]"]', '-["-[4,5]"]'])
# Spot-check that this works with literal tuples as well as lists.
self.assert_split_list("+(1,2),-(3,4)", expected=["+(1,2)", "-(3,4)"])
self.assert_split_list(
"-[1,2],+[3,4],-(5,6),+[7,8]", expected=["-[1,2]", "+[3,4]", "-(5,6)", "+[7,8]"]
)
self.assert_split_list("+(-1,-2),-[-3,-4]", expected=["+(-1,-2)", "-[-3,-4]"])
self.assert_split_list('+("+(3,4)"),-("-(4,5)")', expected=['+("+(3,4)")', '-("-(4,5)")'])
# Check that whitespace around the comma is OK.
self.assert_split_list("+[1,2] , -[3,4]", expected=["+[1,2]", "-[3,4]"])
self.assert_split_list("+[1,2] ,-[3,4]", expected=["+[1,2]", "-[3,4]"])
self.assert_split_list("+[1,2] , -[3,4]", expected=["+[1,2]", "-[3,4]"])
# We will split some invalid expressions, but that's OK, we'll error out later on the
# broken components.
self.assert_split_list("+1,2],-[3,4", expected=["+1,2]", "-[3,4"])
self.assert_split_list("+(1,2],-[3,4)", expected=["+(1,2]", "-[3,4)"])
@pytest.mark.xfail(
reason="The heuristic list modifier expression splitter cannot handle certain very unlikely cases."
)
def test_split_unlikely_list_modifier_expression(self) -> None:
# Example of the kind of (unlikely) values that will defeat our heuristic, regex-based
# splitter of list modifier expressions.
funky_string = "],+["
self.assert_split_list(
f'+["{funky_string}"],-["foo"]', expected=[f'+["{funky_string}"]', '-["foo"]']
)
def test_unicode_comments(self) -> None:
"""We had a bug where unicode characters in comments would cause the option parser to fail.
Without the fix to the option parser, this test case reproduces the error:
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 44:
ordinal not in range(128)
"""
self.assert_list_parsed(
dedent(
"""
[
'Hi there!',
# This is a comment with ‘sneaky‘ unicode characters.
'This is an element in a list of strings.',
# This is a comment with an obvious unicode character ☺.
]
"""
).strip(),
expected=["Hi there!", "This is an element in a list of strings."],
)
|
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors import Predictor
@Predictor.register("covid_predictor")
class CovidPredictor(Predictor):
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
instance = self._dataset_reader.text_to_instance(
**json_dict
)
return instance
|
# created by Ryan Spies
# 4/13/2015
# Python 2.7
# Description: generate an input file with for the MAP preprocessor
# MAP input format: http://www.nws.noaa.gov/oh/hrl/nwsrfs/users_manual/part3/_pdf/37map.pdf
import os
import dateutil
os.chdir("../..")
maindir = os.getcwd()
################### user input #########################
RFC = 'APRFC_FY2017'
fxgroup = 'NWAK'
year1 = '1965'; year2 = '2010'
card_type = 'MAP'
weight_type = 'SEAS' # choices: 'PRE' or 'SEAS'
consis_check = 'off' # choices: 'on' or 'off'
networks = ['nhds_hourly','nhds_daily','raws_hourly'] # choices: 'asos_hourly','nhds_daily','raws_hourly','scan_hourly'
workingdir = maindir + os.sep + 'Calibration_NWS'+ os.sep + RFC[:5] + os.sep + RFC + os.sep + 'MAP_MAT_development' + os.sep + 'station_data'
daily_obs_file = workingdir + os.sep + 'nhds_daily' + os.sep + 'nhds_site_obs_time_' + fxgroup+ '.csv' # file with the obs time changes for some nhds daily stations
map_weights = workingdir + os.sep + 'MAP_input' + os.sep + 'pre_weights' + os.sep # file with the pxpp output -> template for MAP input
basin_coords = workingdir + os.sep + 'MAP_input' + os.sep + 'vertices_summary.txt'
## define basins below ###
if fxgroup == 'ANAK':
map_basins = ['KNKA2LWR','KNKA2UPR','KNKA2GL'] #
if fxgroup == 'NWAK':
map_basins = ['ABLA2LWR','ABLA2UPR','KIAA2LWR','KIAA2UPR','WULA2LWR','WULA2UPR']
map_basins_desc = {'ABLA2LWR':'ABLA2 LOWER','ABLA2UPR':'ABLA2 UPPER','KIAA2LWR':'KIAA2 LOWER','KIAA2UPR':'KIAA2 UPPER','WULA2LWR':'WULA2 LOWER'
,'WULA2UPR':'WULA2 UPPER','KNKA2LWR':'KNKA2 LOWER','KNKA2UPR':'KNKA2 UPPER','KNKA2GL':'KNKA2 GLACIER'}
map_basins_area = {'ABLA2LWR':4710,'ABLA2UPR':1701,'KIAA2LWR':2699,'KIAA2UPR':372,'WULA2LWR':440
,'WULA2UPR':265,'KNKA2LWR':318,'KNKA2UPR':413,'KNKA2GL':488}
if consis_check == 'on':
map_basins = [fxgroup + '_consis_check']
consis_input = 0
out_ext = '_consis_check.map'
else:
consis_input = len(map_basins)
out_ext = '.map'
########################################################
out_file = open(workingdir + os.sep + 'MAP_input' + os.sep + year1 + '-' + year2 + os.sep + 'MAP_input_'+fxgroup+'_' + year1 + '_' + year2 + '_' + weight_type + out_ext,'wb')
pxpp_output = workingdir + os.sep + 'MAP_input' + os.sep + year1 + '-' + year2 + os.sep + 'pxpp_punch' + os.sep + 'ptpx_' + fxgroup + '_breaks_pun.txt' # file with the pxpp output -> template for MAP input
print 'Creating file -> ' + str('MAP_input_'+fxgroup+'_' + year1 + '_' + year2 + '.map')
################ A card block ####################
# data pulled from pxpp output punch file
open_pxpp = open(pxpp_output,'r')
for entry in open_pxpp:
if entry[:2] == '@A':
out_file.write(entry)
break
open_pxpp.close()
################ B card block ####################
MAP_compute = consis_input; station_weight = weight_type; null = '0'; cont = 'CONT'; interval = '6'; adj = 'ADJ'; cons = 'SESN'
winter = 10; summer = 4; ctim = 'CTIM' # note: need n+1 commas for defaults (n is number of default variables)
out_file.write('{:2s} {:2d} {:4s} {:1s} {:4s} {:1s} {:3s} {:4s} {:2d} {:1d} {:4s}'.format('@B',MAP_compute,station_weight,null,cont,interval,adj,cons,winter,summer,ctim))
out_file.write('\n')
################ C card block ####################
mean_precip = 'NORM'; map_output = ''; prec_compare = ''
out_file.write('{:2s} {:4s} {:4s} {:4s}'.format('@C',mean_precip,map_output,prec_compare))
out_file.write('\n')
################ D/F/G card block ####################
# data pulled from pxpp output punch file
stations_input = []; stations_desc = [] # create a dictionary of available stations (name and id)
open_pxpp = open(pxpp_output,'r')
for line in open_pxpp:
if line[:2] == '@D': # or line[:2] == '@F' or line[:2] == '@G':
out_file.write(line)
if line[:2] == '@F':
site_desc = str((line.split("'")[1]))
stations_desc.append(site_desc)
f_line = line
if line[:2] == '@G':
g_line = line
site_name = str((line.split()[-1]))
stations_input.append(site_name)
# find the initial observation time
obs_time_file = open(daily_obs_file,'r')
time_obs = '7.' # default obs time if
if site_name[:2].isupper() == True: # ignores hourly NHDS sites ('ak') -> set to 0. obs time
for inst in obs_time_file:
sep = inst.split(',')
if sep[0] != 'COOP ID' and sep[0] != '':
site_id = str(sep[0])[-4:]
if site_id == site_name[-4:]:
if sep[9] != '': # ignore missing obs time instances
time_obs = str(int(float(sep[9])/100))
if len(time_obs) == 1:
time_obs = time_obs + '.'
break # only find the first instance of obs time -> M card will correct obs time changes
f_line = f_line.rstrip('\n') + ' ' + time_obs + '\n'
out_file.write(f_line)
out_file.write(g_line[:3])
## added below block to add spacing for monthly station climo values > 9.99 (prevent data overlapping?)
parse_line = g_line[3:63]
mclimos = [parse_line[i:i+5] for i in range(0, len(parse_line), 5)]
extra_space = 4 # use this to determine if previous line was 5 or 4 chars
for i, value in enumerate(mclimos):
if extra_space == 5 and len(value.replace(' ','')) == 5:
out_file.write(' ')
elif i > 0 and extra_space == 4 and len(value.replace(' ','')) == 5:
out_file.write(' ')
out_file.write(value)
extra_space = len(value.replace(' ',''))
out_file.write(g_line[64:])
obs_time_file.close()
open_pxpp.close()
################ I card block ####################
# area information and predetermined weights
for map_basin in map_basins:
if MAP_compute > 0: # only add @I card for MAP generation run - not consistency check run
area_id = map_basin; area_desc = map_basins_desc[map_basin].replace(' ','_'); area = map_basins_area[map_basin]; area_units = 'MI2'; basin_name = 'FY17_CALB'; file_name = area_id
out_file.write('{:2s} {:12s} {:20s} {:5d} {:3s} {:12s} {:12s}'.format('@I',area_id,area_desc,area,area_units,basin_name,file_name))
out_file.write('\n')
################ J card block ####################
# omit when using predetermined weights
if station_weight == 'THIE' or station_weight == 'GRID':
#base_units = 'ENGL';
#out_file.write('{:2s} {:4s} {:8s} {:20s} {:3s} {:12s} {:12s}'.format('@J',base_units,map_basin,area_desc))
out_file.write('{:2s} '.format('@J'))
out_file.write('(')
find_coords = open(basin_coords,'r')
for line in find_coords:
sep = line.split('\t'); coord_count = 1
if sep[0] == map_basin:
for pair in sep[2:-1]:
out_file.write('{:11s}'.format((pair.rstrip('\n')).replace(' ','0')))
if coord_count == len(sep[2:-1]):
out_file.write(')\n')
elif coord_count % 5 == 0 and coord_count > 1:
out_file.write('\n ')
else:
out_file.write(' ')
coord_count += 1
find_coords.close()
################ L card block ####################
# only needed for predetermined weights
#### Annual Weights ######
if station_weight == 'PRE' and consis_check != 'on':
count = 1; wpairs = {}
out_file.write('@L\n')
open_weights = open(map_weights + map_basin + '.csv','r')
for each in open_weights:
if each.split(',')[0].strip() != 'Station': # skip header line
station = each.split(',')[0].strip()
weight = each.split(',')[7].strip()
wpairs[station] = weight
if len(wpairs) != len(stations_input):
print '!!! Number of stations specified if @F/@G not equal to num of stations in weights csv!!!'
for point in stations_input: # iterate @F/@G stations
if point in wpairs: # check station is list in pre weights csv
idweight = wpairs[point]
if idweight == '' or idweight == '\n':
idweight = 0.0
out_file.write('{:4.3f}'.format(float(idweight)))
out_file.write(' ')
if count % 10 == 0:
out_file.write('\n')
count += 1
else:
print '!!!! Station/weight not found --> ' + point
out_file.write('\n')
print 'Added pre-determined weights for: ' + map_basin
#### Season weights used instead of annual weights ##########
if station_weight == 'SEAS' and consis_check != 'on':
count = 1; wpairs = {}; spairs = {}
out_file.write('@L\n')
wopen_weights = open(map_weights + 'winter' + os.sep + map_basin + '.csv','r')
for each in wopen_weights: # loop winter weights csv
if each.split(',')[0].strip() != 'Station': # skip header line
station = each.split(',')[0].strip()
weight = each.split(',')[7].strip()
wpairs[station] = weight
if len(wpairs) != len(stations_input):
print '!!! Number of stations specified in @F/@G not equal to num of stations in weights csv!!!'
sopen_weights = open(map_weights + 'summer' + os.sep + map_basin + '.csv','r')
for each in sopen_weights: # loop summer weights csv
if each.split(',')[0].strip() != 'Station': # skip header line
station = each.split(',')[0].strip()
weight = each.split(',')[7].strip()
spairs[station] = weight
if len(spairs) != len(stations_input):
print '!!! Number of stations specified in @F/@G not equal to num of stations in weights csv!!!'
### add station weights to card file ####
for point in stations_input: # iterate @F/@G stations
if point in wpairs: # check station is list in pre weights csv
idweight = wpairs[point]
if idweight == '' or idweight == '\n':
idweight = 0.0
out_file.write('{:4.3f}'.format(float(idweight)))
out_file.write(' ')
if count % 10 == 0:
out_file.write('\n')
count += 1
else:
print '!!!! Winter Station/weight not found --> ' + point
for point in stations_input: # iterate @F/@G stations
if point in spairs: # check station is list in pre weights csv
idweight = spairs[point]
if idweight == '' or idweight == '\n':
idweight = 0.0
out_file.write('{:4.3f}'.format(float(idweight)))
out_file.write(' ')
if count % 10 == 0:
out_file.write('\n')
count += 1
else:
print '!!!! Summer Station/weight not found --> ' + point
out_file.write('\n')
print 'Added pre-determined weights for: ' + map_basin
################ M card block ####################
if ctim == 'CTIM': # omit unless observation time corrections specified in B card
if 'nhds_daily' in networks:
print 'Adding M block - obs time history...'
for station_num, station_input in enumerate(stations_input):
obs_time_file = open(daily_obs_file,'r')
prev_station = ''; prev_obs = ''
for line in obs_time_file:
sep = line.split(',')
if sep[0] != 'COOP ID' and sep[0] != '':
site_id = str(sep[0])[-4:]
if site_id == station_input[-4:]:
if station_input[:2].isupper() == True: # ignores hourly NHDS sites ('ak')
#print 'Obs time change found: ' + station_input
begin_date = dateutil.parser.parse(sep[1])
if sep[9] != '': # ignore missing obs time instances
time_obs = int(float(sep[9])/100)
if site_id == prev_station: # check for repeat obs_time instances for same site
if time_obs != prev_obs:
out_file.write('@M ' + str(station_num + 1) + ' ' + str(begin_date.month) + ' ' + str(begin_date.year) + ' ' + str(time_obs) + '\n')
else:
out_file.write('@M ' + str(station_num + 1) + ' ' + str(begin_date.month) + ' ' + str(begin_date.year) + ' ' + str(time_obs) + '\n')
prev_station = site_id; prev_obs = time_obs
obs_time_file.close()
out_file.write('@M 999\n')
################ O/Q/R/S card block ####################
# data pulled from pxpp output punch file
# O card only used when ADJ specified in B card
if adj == 'ADJ':
open_pxpp = open(pxpp_output,'r')
for entry in open_pxpp:
if entry[:2] == '@O':
if entry[:5] == '@O 99':
out_file.write('@O 999\n')
else:
out_file.write(entry)
open_pxpp.close()
open_pxpp = open(pxpp_output,'r')
for entry in open_pxpp:
if entry[:2] == '@Q' or entry[:4] == 'NHDS' or entry[:4] == 'USGS' or entry[:1] == '/' or entry[:2] == '@R' or entry[:2] == '@S' or entry[:2] == ' ':
out_file.write(entry)
open_pxpp.close()
out_file.write('\n')
out_file.close()
print 'Completed!!'
|
#!/usr/bin/env python3
import hashlib
import platform
import re
import subprocess
# did you do "pip install requests"?
import requests
def register():
nodename = getMyNodeName()
ips = getMyIpAddresses()
ips = ",".join(ips)
token = generateToken(["set", nodename, ips])
# https://host/myip/set/foobar/?ip=123.45.67.1&token=4ae98c43c976a794
url = "https://host/myip/set/{0}/?ip={1}&token={2}".format(nodename, ips, token)
r = requests.get(url)
if r.status_code == 200:
print("successfully registered as {0} at {1}".format(nodename, ips))
else:
print("Failed {0}: {1}".format(r.status_code, r.text))
print("URL was {0}".format(url))
def getMyNodeName():
return platform.node()
reLocalIP = re.compile(r' (192\.168\.\d+\.\d+|10\.\d+\.\d+\.\d+)/')
def getMyIpAddresses():
#"ip addr"
p = subprocess.run(["ip", "addr"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
if p.stderr:
print("ip addr printed to stderr: >>{0}<<".format(p.stderr.decode("latin-1")))
raise ValueError
lines = p.stdout.decode("latin-1").split("\n")
def extractIP(line):
line = line.strip()
match = reLocalIP.search(line)
return match.group(1) if match else None
return list(ip for ip in (extractIP(line) for line in lines) if ip)
def generateToken(parts):
secret = b'secret-token'
m = hashlib.md5()
m.update(secret)
for part in parts:
m.update(part.encode("utf-8"))
longDigest = m.hexdigest()
shortDigest = longDigest[15:31]
return shortDigest
# ****************************************
if __name__ == "__main__":
register()
|
import random
from dolfin import *
import numpy as np
eps = 0.005
class InitialConditions(UserExpression):
def __init__(self, **kwargs):
random.seed(2 + MPI.rank(MPI.comm_world))
super().__init__(**kwargs)
def eval(self, values, x):
values[0] = 0.5 + 0.1*(0.5 - random.random())
#values[0] = 0.5*(1. - np.tanh((sqrt((x[0]-0.5)**2+(x[1]-0.5)**2)-0.25)/(.1)))
values[1] = 0.0
def value_shape(self):
return (2,)
class CahnHilliardEquation(NonlinearProblem):
def __init__(self, a, L):
NonlinearProblem.__init__(self)
self.L = L
self.a = a
def F(self, b, x):
assemble(self.L, tensor=b)
def J(self, A, x):
assemble(self.a, tensor=A)
lmbda = 1.0e-02 # surface parameter
dt = 5.0e-06 # time step
theta = 1.0 # time stepping family, e.g. theta=1 -> backward Euler, theta=0.5 -> Crank-Nicolson
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["cpp_optimize"] = True
mesh = UnitSquareMesh.create(96, 96, CellType.Type.quadrilateral)
P1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
ME = FunctionSpace(mesh, P1*P1)
# Trial and test functions of the space ``ME`` are now defined::
# Define trial and test functions
du = TrialFunction(ME)
q, v = TestFunctions(ME)
u = Function(ME) # current solution
u0 = Function(ME) # solution from previous converged step
# Split mixed functions
dc, dmu = split(du)
c, mu = split(u)
c0, mu0 = split(u0)
u_init = InitialConditions(degree=1)
u.interpolate(u_init)
u0.interpolate(u_init)
e1 = Constant((1.,0))
e2 = Constant((0,1.))
m = [e1, -e1, e2, -e2]
c_grad = grad(c)
abs_grad = abs(c_grad[0]) + abs(c_grad[1])
#abs_grad = abs(grad(c))
nv = grad(c) / abs_grad
def heaviside(x):
'''if x.eval() < -DOLFIN_EPS:
return Constant(0)
elif x.eval()>DOLFIN_EPS:
return Constant(1.)
else:
return Constant(0.5)'''
return 0.5*(x+abs(x)) / abs(x) if conditional(gt(DOLFIN_EPS, abs(x)), True, False) else 0.5
ai = 0.5
wi = 4.
gamma = 1 - sum(ai**wi * heaviside(dot(nv, mi)) for mi in m)
b_energy = Constant(0.25)*c**2*(Constant(1) - c)**2
multiplier = sqrt(b_energy)
L0 = c*q*dx - c0*q*dx + multiplier * dt*dot(grad(mu), grad(q))*dx
#L1 = mu*v*dx - dfdc*v*dx - lmbda*dot(grad(c), grad(v))*dx
w = gamma * (eps*0.5*dot(grad(c), grad(c)) + 1./eps * b_energy) * dx
print(w)
#1./eps * f
F = derivative(w, c, v)
L1 = mu*v*dx - F
L = L0 + L1
a = derivative(L, u, du)
problem = CahnHilliardEquation(a, L)
solver = NewtonSolver()
solver.parameters["linear_solver"] = "lu"
solver.parameters["convergence_criterion"] = "incremental"
solver.parameters["relative_tolerance"] = 1e-6
#solver.parameters['relaxation_parameter'] = .99
solver.parameters['maximum_iterations'] = 100
file = File("result/ps-1/output.pvd", "compressed")
# Step in time
t = 0.0
T = 100*dt
while (t < T):
t += dt
u0.vector()[:] = u.vector()
solver.solve(problem, u.vector())
file << (u.split()[0], t)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 12:51:18 2018
@author: Harsha Vardhan Manoj
"""
import pandas as pd
import re
import time
import numpy as np
udata = ['user_id', 'gender', 'age', 'occupation','zip']
users = pd.read_table('ml-latest-small/users.dat',sep='::',
header=None, names = udata, engine = 'python')
mdata=['movie_id', 'title', 'genre']
movies = pd.read_table('ml-latest-small/movies.dat',
sep='::', header=None, names=mdata,engine = 'python')
rdata=['user_id','movie_id','rating','time']
ratings = pd.read_table('ml-latest-small/ratings.dat',
sep='::', header=None, names=rdata,engine = 'python')
rating_years=[]
for rate in ratings['time']:
rating_years.append(str(time.ctime(rate))[-4:]);
ratings['rating_year']=rating_years
data = pd.merge(pd.merge(ratings,users),movies)
#mean_ratings = data.pivot_table('rating',index=['title','rating_year'], aggfunc=[len, np.mean])
mean_ratings = data.pivot_table('rating',index=['title'], aggfunc=[len, np.mean])
pd.set_option('display.max_columns',50)
mean_ratings.reset_index(inplace=True)
#mean_ratings.columns = ['title', 'rating_year','no.of_Ratings','Mean']
mean_ratings.columns = ['title','no.of_Ratings','Mean']
years=[]
for movie in mean_ratings['title']:
years.append(re.search('\(\d{4}\)',movie)[0]);
mean_ratings['year']=years
print("Movies with Year in their data")
print(mean_ratings)
print("===============================================================")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
mean_ratings=mean_ratings.loc[mean_ratings['no.of_Ratings'] >10]
print("Top movies:")
for x in range(1971, 2001):
temp=pd.DataFrame(mean_ratings.loc[mean_ratings['year']=='('+str(x)+')'])
print(temp.loc[temp['Mean']==temp['Mean'].max()])
print("===============================================================")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
year_diff=[]
i=0;
prev=0;
top_movies=pd.DataFrame(mean_ratings.nlargest(20, 'Mean').sort_values(by=['year']))
for movie in top_movies['year']:
if(i==0):
prev=int(movie[1:5])
year_diff.append(0)
i=i+1
else:
year_diff.append(int(movie[1:5])-prev)
prev=int(movie[1:5])
top_movies['Year_diff']= year_diff
print("===============================================================")
print("Top twenty movies")
print(top_movies)
print("===============================================================")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("average gap for release of top movies is")
print(str(top_movies['Year_diff'].mean())+' Years')
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
|
# Task 1
import time
def decorator1(fun):
count = 0
def wrapper(*args):
exec_time = 0
nonlocal count
count += 1
start_time = 0
start_time = time.time()
fun(*args)
exec_time = time.time() - start_time
print(fun.__name__ ,'call:', count, 'executed in', format(exec_time, '.8f'), 'sec')
return wrapper
|
"""
Module for tests
"""
from bitstring import BitArray
import binascii
from itertools import permutations
import datetime
from Polynomial_Extractor import PolynomialExtractor
from Polynomial_Generator import PolynomialGenerator
import Vault_Verifier
from Minutia import *
from Minutiae_Extractor import MinutiaeExtractor
from Geometric_Hashing_Transformer import GHTransformer
import Constants
from Minutia_Converter import MinutiaConverter
now = datetime.datetime.now()
GALLERY_IMAGE = '2_6'
PROBE_IMAGE = '4_2'
XYT_GALLERY_PATH = 'input_images/' + 'FVC2006_DB_2B/' + GALLERY_IMAGE + '.xyt'
XYT_PROBE_PATH = 'input_images/' + 'FVC2006_DB_2B/' + PROBE_IMAGE + '.xyt'
POLY_DEGREE = 12
SECRET = "SECRET..."
GF_2_M = 32
CRC_LENGTH = 32
SECRET_LENGTH = len(SECRET.encode()) * 8
MINUTIAE_OUT_FILE = 'out/minutiae_out_basis_{}_vs_{}_{}_{}.csv'.format(
PROBE_IMAGE, GALLERY_IMAGE, now.strftime("%Y%m%d"), now.strftime("%H%M")
)
def test_ideal_secret_length(len_poly, len_crc):
for x in range(1000):
if (x + len_crc) % (len_poly + 1) == 0 and x % 8 == 0:
print(x)
def test_secret_length():
secret_original_bytes = SECRET.encode()
secret_original_bit = BitArray(bytes=secret_original_bytes, length=len(secret_original_bytes) * 8)
print(len(secret_original_bit))
def test_bitstring_extraction():
secret_bytes = SECRET.encode()
secret_bit = BitArray(bytes=secret_bytes, length=len(secret_bytes) * 8)
print(secret_bit.bin)
print("Length of bits original secret: %d" % len(secret_bit))
checksum_bit = BitArray(uint=binascii.crc32(secret_bit.bytes), length=CRC_LENGTH)
print(checksum_bit.bin)
print(len(checksum_bit))
# join secret bitstring with CRC
total_bit = secret_bit.copy()
total_bit.append(checksum_bit)
print('Length total bit: {}'.format(len(total_bit)))
coefficients = []
assert len(total_bit) % 13 == 0
step = int(len(total_bit) / 13)
for i in range(0, len(total_bit), step):
coefficients.append(total_bit[i:i + step].uint)
print(coefficients)
PolynomialExtractor.check_crc_in_poly(coefficients, POLY_DEGREE, CRC_LENGTH)
poly = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, 69, 67, 82, 69, 84, 46, 46, 46, 201, 112, 67, 73]
print(PolynomialExtractor.check_crc_in_poly(poly, POLY_DEGREE, CRC_LENGTH))
def test_combinations():
l = list(range(10))
combinations(l, 4)
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
print(tuple(pool[i] for i in indices))
def generate_basis_transformed_tuples(xyt_path):
"""
:param xyt_path: fingerprint probe image
:returns: list of minutiae tuple (basis count [int], basis [Minutia], minutia [Minutia])
"""
nbis_minutiae_extractor = MinutiaeExtractor()
minutiae_list = nbis_minutiae_extractor.extract_minutiae_from_xyt(xyt_path)[0:Constants.MINUTIAE_POINTS_AMOUNT]
minutiae_list_gh = []
for m in minutiae_list:
minutiae_list_gh.append(MinutiaNBIS_GH.convert_from_MinutiaNBIS(m))
all_minutiae_list = []
for cnt_basis, basis in enumerate(minutiae_list_gh, 1):
for m in GHTransformer.transform_minutiae_to_basis(basis, minutiae_list_gh):
all_minutiae_list.append((cnt_basis, basis, m))
return all_minutiae_list
def write_probe_vs_gallery(probe_xyt_path, gallery_xyt_path, out_file):
write_basis_minutiae_header(out_file)
gallery_tuples = generate_basis_transformed_tuples(gallery_xyt_path)
probe_tuples = generate_basis_transformed_tuples(probe_xyt_path)
for i, probe_tuple in enumerate(probe_tuples, 1):
print('Probe tuple: {}'.format(i))
probe_minutia = probe_tuple[2]
probe_string = generate_print_string_tuple(probe_tuple)
if probe_minutia.is_zero():
continue
match = False
print_string = probe_string
for gallery_tuple in gallery_tuples:
gallery_minutia = gallery_tuple[2]
if gallery_minutia.is_zero():
continue
if Vault_Verifier.fuzzy_compare(gallery_minutia, probe_minutia):
match = True
gallery_string = generate_print_string_tuple(gallery_tuple)
print_string = print_string + gallery_string
if match:
with open(out_file, 'a') as log:
log.write(print_string + '\n')
else:
with open(out_file, 'a') as log:
log.write(probe_string + '\n')
def write_geometric_hashing(xyt_path, out_file):
tuples_list = generate_basis_transformed_tuples(xyt_path)
for m_tuple in tuples_list:
write_basis_minutiae_to_file(m_tuple[0], m_tuple[1], m_tuple[2], out_file)
def write_tuple_to_file(m_tuples, out_file):
"""
:param m_tuples: list of tuples
:param out_file: output file
:return:
"""
for m_tuple in m_tuples:
write_basis_minutiae_to_file(m_tuple[0], m_tuple[1], m_tuple[2], out_file)
def generate_print_string_tuple(m_tuple):
cnt_basis = m_tuple[0]
basis = m_tuple[1]
minutia = m_tuple[2]
return '{};{};{};{};{};{};{};'.format(
cnt_basis, basis.x, basis.y, basis.theta, minutia.x, minutia.y, minutia.theta
)
def write_basis_minutiae_to_file(cnt_basis, basis, minutia, out_file, line_break=True):
with open(out_file, 'a') as log:
if line_break:
log.write('{};{};{};{};{};{};{}\n'.format(
cnt_basis, basis.x, basis.y, basis.theta, minutia.x, minutia.y, minutia.theta
))
else:
log.write('{};{};{};{};{};{};{};'.format(
cnt_basis, basis.x, basis.y, basis.theta, minutia.x, minutia.y, minutia.theta
))
def write_basis_minutiae_header(out_file):
# clear log file and add log header
open(out_file, 'w+').close()
with open(out_file, 'a') as log:
log.write('basis#;basis_x probe;basis_y probe;basis_theta probe;x probe;y probe;theta probe;'
'basis#;basis_x gallery;basis_y gallery;basis_theta gallery;x gallery;y gallery;theta gallery\n')
def generate_all_geom_from_path(xyt_path):
nbis_minutiae_extractor = MinutiaeExtractor()
minutiae_list = nbis_minutiae_extractor.extract_minutiae_from_xyt(xyt_path)[0:]
probe_minutiae_GH = GHTransformer.convert_list_to_MinutiaNBIS_GH(minutiae_list)
for cnt_basis, basis in enumerate(probe_minutiae_GH):
# take random basis and try matching
element = GHTransformer.generate_verification_table_element(basis, probe_minutiae_GH.copy())
print(element)
def convert_x_to_minutia(x):
m_conv = MinutiaConverter()
m = m_conv.get_minutia_from_uint(x)
m_gh = MinutiaNBIS_GH.convert_from_MinutiaNBIS(m)
return m_gh
def test_poly_in_gf(x):
secret_bytes = b'\xa4uw\x89|\xaao\xce\x8b\xdf\xcdIsB\xd3k\x95G'
poly_gen = PolynomialGenerator(secret_bytes, Constants.POLY_DEGREE, Constants.CRC_LENGTH, 32)
print(poly_gen.evaluate_polynomial_gf_2(x, 32))
if __name__ == '__main__':
# test_geometric_hashing()
# generate_all_geom_from_path(XYT_GALLERY_PATH)
#write_basis_minutiae_header(MINUTIAE_OUT_FILE)
#write_probe_vs_gallery(XYT_PROBE_PATH, XYT_GALLERY_PATH, MINUTIAE_OUT_FILE)
test_poly_in_gf(297850099)
|
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
class EmailAuthenticationBackend(ModelBackend):
def authenticate(self, request, email = None, password = None):
if email and password:
try:
user = User.objects.get(email = email)
if not user.check_password(password):
user = None
except User.DoesNotExist:
user = None
return user
|
#!/usr/bin/python
arr = [list(line.rstrip('\n')) for line in open('problem_79.in')]
chars = ['0', '1', '2', '3', '6', '7', '8', '9']
ans = ""
while len(arr) > 0:
char = arr[0][0]
for i in range(len(arr)):
if arr[i][1] == char:
char = arr[i][0]
ans += char
chars.remove(char)
for i in range(len(arr)):
if char in arr[i]:
arr[i].remove(char)
if len(arr[i]) == 0 or len(arr[i]) == 1:
arr[i] = []
while [] in arr:
arr.remove([])
ans += str(chars[0])
print(ans)
|
# coding: utf-8
#
# In[9]:
from datetime import datetime
import requests
import sys
# see http://docs.python-requests.org/en/master/user/quickstart/ for package documentation
geoportalBaseURL = 'http://datadiscoverystudio.org/geoportal/'
catalogISOmetadataBase = geoportalBaseURL + 'rest/metadata/item/'
print catalogISOmetadataBase
XML_HEADER = '<?xml version="1.0" encoding="UTF-8"?>'
fileLocationBase = 'c:\\tmp\\'
print fileLocationBase
sitemaptohtml = 'https://raw.githubusercontent.com/CINERGI/xmlsitemap/master/xml-sitemap.xsl'
#suggest copying the xslt file into the same directory with the sitemaps, in which case, use this
# value for sitemaptohtml:
# sitemaptohtml = 'xml-sitemap.xsl'
# first some utility functions for file generation and writing
def writeLinks( response, mfile ):
# writes entries in sitemap file, with URL for metadata record as html; the record
# is expected to include a schema.org JSON-LD script for use by the search indexers
for hit in response["hits"]["hits"]:
# hittitle = hit["_source"]["title"]
try:
hitid = hit["_id"]
hitmodified = hit["_source"]["sys_modified_dt"]
# print "title: ", hittitle, " id: ", hitid, " date: ", hitmodified
mfile.write('<url>')
mfile.write("\n")
# original CINERGI catalog location
#mfile.write('<loc>http://cinergi.sdsc.edu/geoportal/rest/metadata/item/'
# + hitid + '/html</loc>')
mfile.write('<loc>' + catalogISOmetadataBase + hitid + '/html</loc>')
mfile.write("\n")
mfile.write('<lastmod>' + hitmodified + '</lastmod>')
mfile.write("\n")
mfile.write('<changefreq>monthly</changefreq>')
mfile.write("\n")
# mfile.write('<priority>0.8</priority>')
# mfile.write("\n")
mfile.write('</url>')
mfile.write("\n")
except:
print("ERROR writing sitemap url for _id= " + hitid)
print(sys.exc_info()[1])
return
def indexFile():
# set up the sitemap index. This file has a link to each sitemap file.
# sitemaps are limited to 10000 entries, so if there is a bigger catalog, have
# to generate multiple sitemaps and point to them from the index.
try:
file_object = open(fileLocationBase + "DDSSiteIndex.xml", "w")
except:
print("ERROR: Can't open the index file, bailing out")
print(sys.exc_info()[1])
sys.exit(0)
# put in the header stuff
file_object.write(XML_HEADER)
file_object.write("\n")
file_object.write('<?xml-stylesheet type="text/xsl" href="' + sitemaptohtml + '"?>')
file_object.write('\n')
file_object.write('<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')
file_object.write("\n")
return(file_object)
def siteMapFile(name):
# opens a new empty sitemap file and returns the file_object for writing to it.
try:
file_object = open(fileLocationBase + name, "w")
except:
print("ERROR: Can't open the new sitemap file: " + name + ", bailing out")
print(sys.exc_info()[1])
sys.exit(0)
#put in the header stuff
file_object.write(XML_HEADER)
file_object.write('\n')
file_object.write('<?xml-stylesheet type="text/xsl" href="' + sitemaptohtml + '"?>')
file_object.write('\n')
file_object.write('<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')
file_object.write('\n')
return(file_object)
# construct Elasticsearch URL with search request
# espath="http://cinergi.sdsc.edu/geoportal/elastic/"
espath= geoportalBaseURL + "elastic/"
esindex="metadata"
esresource="/item/_search"
baseURL = espath+esindex+esresource
# need to use scrolling because there are >10000 records
# this is the time to live for the scroll index; renewed on each search call
p_scroll="1m"
#number of records to return in each batch.
# This will be the number of links in each sitemap file
p_size="10000"
#use this for testing
#p_size="10"
# the only field we need for the sitemap is the modified date
# comma delimited list of index fields to return from the _source section of the hits object
#p_source="sys_modified_dt,title"
p_source="sys_modified_dt"
# first get the scroll index to start scrolling loop, and the total number of records
counter = 0
filecount = 0
#print counter
#first request to get scrolling set up
p = {'scroll':p_scroll,
'size' : p_size,
'_source' : p_source}
r = requests.get(baseURL, params=p)
print "request1: ", r.url
if r.status_code == requests.codes.ok:
response = r.json()
totalRecords = response["hits"]["total"]
scrollID = response["_scroll_id"]
# set up the index file
indexhandle = indexFile()
print "total records: ", totalRecords
sitemapfilename = "ddssitemap" + str(filecount)+ ".xml"
sitemaphandle = siteMapFile(sitemapfilename)
writeLinks(response, sitemaphandle)
sitemaphandle.write('</urlset>')
sitemaphandle.close()
#new index entry
indexhandle.write('<sitemap>')
indexhandle.write('\n')
# indexhandle.write('<loc>http://cinergi.sdsc.edu/geoportal/' + sitemapfilename + '</loc>')
# providing a full URL to put links in the sitemap index:
# indexhandle.write('<loc>' + geoportalBaseURL + sitemapfilename + '</loc>')
# using local file paths also works, and is likely easier to maintain in the long run:
indexhandle.write('<loc>' + sitemapfilename + '</loc>')
indexhandle.write('\n')
indexhandle.write('<lastmod>' + str(datetime.now())+ '</lastmod>')
indexhandle.write('\n')
indexhandle.write('</sitemap>')
indexhandle.write('\n')
filecount = filecount + 1
counter = counter + int(p_size)
else:
r.raise_for_status()
sys.exit(0)
while counter < totalRecords:
# use this for testing:
#while counter < 50:
#have to hit the scroll resource for Elasticsearch
esresource="_search/scroll"
#Geoportal Elasticsearch pass through requires publisher role to run the scroll resource
espath="http://admin:admin@datadiscoverystudio.org/geoportal/elastic/"
baseURL = espath+esresource
p = { 'scroll':p_scroll,
'scroll_id' : scrollID}
r = requests.get(baseURL, params=p)
# print "request: ", r.url, r.status_code
# print "raw response2: ", r, " status: ", r.status_code
# print r.headers['content-type']
if r.status_code == requests.codes.ok:
response = r.json()
scrollID = response["_scroll_id"]
sitemapfilename = "ddssitemap" + str(filecount)+ ".xml"
sitemaphandle = siteMapFile(sitemapfilename)
writeLinks(response, sitemaphandle)
sitemaphandle.write('</urlset>')
sitemaphandle.close()
#new index entry
indexhandle.write('<sitemap>')
indexhandle.write('\n')
indexhandle.write('<loc>' + geoportalBaseURL + sitemapfilename + '</loc>')
indexhandle.write('\n')
indexhandle.write('<lastmod>' + str(datetime.now())+ '</lastmod>')
indexhandle.write('\n')
indexhandle.write('</sitemap>')
indexhandle.write('\n')
filecount = filecount + 1
counter = counter + int(p_size)
print "count: ", counter
else:
r.raise_for_status()
break
indexhandle.write('</sitemapindex>')
indexhandle.close()
print "done, counter = ",counter
# In[ ]:
# In[ ]:
|
from mytoy import toy
def test_toy_default():
assert toy() == 1
def test_toy_0():
assert toy(0) == 1
def test_toy_1():
assert toy(1) == 2
|
import requests as req
from bs4 import BeautifulSoup as soup
import json
def crawl ():
url = "https://www.kabum.com.br/cgi-local/site/listagem/listagem.cgi?string=teclado&btnG= "
rs = req.get(url)
content =rs.content
json = []
page_soup = soup(content,'html.parser')
containers = page_soup.findAll('div',{"class":"listagem-box"})
for container in containers:
nome=container.a.img['alt']
preco = container.findAll('div',{"class":"listagem-precoavista"})
preco = preco[0].text
obj = {"nome":nome , "preco":preco}
json.append(obj)
return json
|
import define
def signal(symbol ,data ,databig , position , signal, sleep , file ,initialsignal ,datasmall):
histogram = data[define.ema24] - data[define.ema52] - data[define.signal18]
file.write(str(symbol))
file.write (' histogram = ')
file.write(str(histogram))
file.write('\n')
#if histogram > 0 and position[define.position] != "LONG":
if data[define.hostogramhistory][0] < data[define.hostogramhistory][1] and data[define.hostogramhistory][1] < data[define.hostogramhistory][2] and data[define.hostogramhistory][2] < data[define.hostogramhistory][3] and data[define.hostogramhistory][3] - data[define.hostogramhistory][2] > data[define.hostogramhistory][2] - data[define.hostogramhistory][1] and data[define.hostogramhistory][2] - data[define.hostogramhistory][1] > data[define.hostogramhistory][1] - data[define.hostogramhistory][0]:
signal = 'BUY'
file.write ('signal buy on ')
file.write(str(symbol))
file.write('\n')
sleep = 0
#if abs(data[define.lastramp]-data[define.ramp])/data[define.price] /3000 * 1000000 < 0.11 :
# sleep =1
# file.write ('sleep for ramp macd')
# file.write('\n')
if databig[define.biglasthistogram] > databig[define.bighistogram] or databig[define.bigtwolasthistogram] > databig[define.bighistogram] :
sleep =1
file.write ('sleep for 30min histogram')
file.write('\n')
if data[define.rsi] > 80 :
sleep =1
file.write ('sleep for rsi')
file.write('\n')
#if initialsignal == 1 :
# sleep =1
# file.write ('sleep for initial')
# file.write('\n')
#if data[define.smallma] < data[define.bigma] :
# sleep =1
# file.write ('sleep for 5min ma')
# file.write('\n')
#if databig [define.bigmacdramp] - databig [define.bigsignalramp] < 0:
# sleep =1
# file.write ('sleep for 30min macd-signal ramp')
# file.write('\n')
#if datasmall[define.smallbigmaramp] > datasmall[define.smallsmallmaramp]:
# sleep =1
# file.write ('sleep for 1min ma ramp')
# file.write('\n')
#elif histogram < 0 and position[define.position] != "SHORT":
elif data[define.hostogramhistory][0] > data[define.hostogramhistory][1] and data[define.hostogramhistory][1] > data[define.hostogramhistory][2] and data[define.hostogramhistory][2] > data[define.hostogramhistory][3] and data[define.hostogramhistory][3] - data[define.hostogramhistory][2] > data[define.hostogramhistory][2] - data[define.hostogramhistory][1] and data[define.hostogramhistory][2] - data[define.hostogramhistory][1] > data[define.hostogramhistory][1] - data[define.hostogramhistory][0]:
signal = 'SELL'
file.write ('signal sell on ')
file.write(str(symbol))
file.write('\n')
sleep = 0
#if abs(data[define.lastramp]-data[define.ramp])/data[define.price] /3000 * 1000000 < 0.11 :
# sleep =1
# file.write ('sleep for ramp macd')
# file.write('\n')
if databig[define.biglasthistogram] < databig[define.bighistogram] or databig[define.bigtwolasthistogram] < databig[define.bighistogram] :
sleep =1
file.write ('sleep for 30min histogram')
file.write('\n')
if data[define.rsi] < 20 :
sleep =1
file.write ('sleep for rsi')
file.write('\n')
#if initialsignal == 1 :
# sleep =1
# file.write ('sleep for initial')
# file.write('\n')
#if data[define.smallma] > data[define.bigma] :
# sleep =1
# file.write ('sleep for 5min ma')
# file.write('\n')
#if databig [define.bigmacdramp] - databig [define.bigsignalramp] > 0:
# sleep =1
# file.write ('sleep for 30min macd-signal ramp')
# file.write('\n')
#if datasmall[define.smallbigmaramp] < datasmall[define.smallsmallmaramp]:
# sleep =1
# file.write ('sleep for 1min ma ramp')
# file.write('\n')
return [ signal , sleep ]
|
import pygame
import source.setup
from source.constants import SCR_X ,SCR_Y,EN1_01_IMGPATH,OPEN_DOOR,OPEN_BULL,MU_ST_1
# stage = 0 # 游戏阶段
class Game:
def __init__(self):
self.screen = pygame.display.get_surface()
self.clock = pygame.time.Clock()
self.stage = 0
self.enemy1_img = pygame.image.load(EN1_01_IMGPATH)
self.tim = 0
self.isEng = 0
self.vPath = None
self.temp = 0
def run(self, state_0, state_1, player_1, en1s,en2s, en_boos, en_boom ,ends):
pygame.key.set_repeat(60) # 响应一直按下的键
keep_going = True
pygame.mixer.init()
mu = pygame.mixer.music
pygame.mixer.music.load(MU_ST_1)
mu.play(-1)
while keep_going:
print("========玩家坐标",player_1.rect.topleft)
pos = pygame.mouse.get_pos()
mouse_x = pos[0]
mouse_y = pos[1]
KEY_ESCAPE = pygame.key.get_pressed()[pygame.K_ESCAPE]
KEY_A = pygame.key.get_pressed()[pygame.K_a]
KEY_D = pygame.key.get_pressed()[pygame.K_d]
KEY_W = pygame.key.get_pressed()[pygame.K_w]
KEY_S = pygame.key.get_pressed()[pygame.K_s]
KEY_SPACE = pygame.key.get_pressed()[pygame.K_SPACE]
if KEY_ESCAPE:
pass
if self.stage == 1: # 阶段 1
if KEY_A:
player_1.change_p1()
player_1.pl_uodate_l()
if not state_1.get_lv_x() <=0:
en_boos.move_l()
for en_bu in en_boos.en_bullets:
en_bu.en_bu_l()
for en in en1s:
en.move_l()
for en_bu in en.en_bullets:
en_bu.en_bu_l()
for en in en2s:
en.move_l()
for en_bu in en.en_bullets:
en_bu.en_bu_l()
if en_boom.visible:
en_boom.en_boom_l()
state_1.map_update_r()
if KEY_D:
if self.isEng != 0:
player_1.change_p1()
player_1.pl_uodate_r()
if not state_1.get_lv_x() >= 7680 and self.isEng == 0:
en_boos.move_r()
player_1.change_p1()
# player_1.pl_uodate_r()
for en_bu in en_boos.en_bullets:
en_bu.en_bu_r()
for en in en1s:
en.move_r()
for en_bu in en.en_bullets:
en_bu.en_bu_r()
for en in en2s:
en.move_r()
for en_bu in en.en_bullets:
en_bu.en_bu_r()
if en_boom.visible:
en_boom.en_boom_r()
state_1.map_update_l()
if KEY_W:
player_1.change_p1()
player_1.pl_uodate_u()
if KEY_S:
player_1.change_p1()
player_1.pl_uodate_d()
if KEY_SPACE:
player_1.shoot() # 发射子弹
for event in pygame.event.get(): # 获取事件列表
if event.type == pygame.QUIT: # 退出事件
keep_going = False
if event.type == pygame.MOUSEBUTTONDOWN: # 鼠标点击
if self.stage == 0: # 阶段0
if (SCR_X // 2 - 554 // 2 <= mouse_x <= SCR_X // 2 + 554 // 2) and \
(SCR_Y // 2 <= mouse_y <= SCR_Y // 2 + 94): # 判断鼠标是否在开始按钮之上
play_video(OPEN_DOOR,(SCR_X,SCR_Y))
pygame.mixer.music.load(MU_ST_1)
mu.play(-1)
self.stage = 1
if self.stage == 0: # 阶段 0 主菜单
state_0.update(self.screen,pos)
if self.stage == 1: # 阶段 1 第一关
if player_1.score1 >= 50:
player_1.bu_st = 1
print("地图==:",state_1.lv_x_speed)
if player_1.health <= 0 or (self.isEng != 0):
self.stage = 99
state_1.map_loade(self.screen) # 加载地图
state_1.map_check() # 检测地图
player_1.player_load(self.screen) # 加载玩家
player_1.pl_check()
player_1.draw_p1_health(self.screen) # 玩家血条
player_1.draw_score(self.screen)
player_1.bullets.update() # 绘制子弹
player_1.bullets.draw(self.screen) # 绘制角色子彈精灵组
if self.isEng == 0:
self.screen.blit(en_boos.image, en_boos.rect) # boos
en_boos.change_enboos()
en_boos.en_shoot()
en_boos.en_bullets.draw(self.screen)
en_boos.en_bullets.update()
for en in en1s: # 绘制敌人1
self.screen.blit(en.image, en.rect)
en.change_en1()
en.en_bullets.update() # 绘制子弹
en.en_shoot()
en.en_bullets.draw(self.screen) # 绘制怪物子彈精灵组
for en in en2s: # 绘制敌人2
self.screen.blit(en.image, en.rect)
en.change_en2()
en.en_bullets.update() # 绘制子弹
en.en_shoot()
en.en_bullets.draw(self.screen) # 绘制怪物子彈精灵组
en_boom.draw(self.screen)
en_boom.action()
en1s.update()
en2s.update()
player_1.update()
print("血量 ===",player_1.health)
# 玩家和boos,boos子弹碰撞
if pygame.sprite.collide_rect(player_1,en_boos):
player_1.health -= 50
for boos_bu in en_boos.en_bullets :
if pygame.sprite.collide_rect(boos_bu,player_1):
player_1.health -= 20
boos_bu.kill()
# 玩家子弹和敌人1碰撞
self.tim += 1
for en in en1s:
collide_list = pygame.sprite.spritecollide(player_1, en.en_bullets,True)
player_1.health -= (len(collide_list)*5)
if pygame.sprite.collide_rect(player_1,en): # 玩家和敌人碰撞
en.kill()
en_boom.set_pos(en.get_en_pos())
en_boom.visible = True
player_1.health -= 10
for bu in player_1.bullets:
if pygame.sprite.collide_mask(en, bu):
en.health -= 10 # 子弹伤害为10
bu.kill()
if en.health <= 0:
en_boom.set_pos(en.get_en_pos())
en_boom.visible = True
en.kill()
player_1.score1 += 10
for bu in player_1.bullets:# 玩家子弹和 boos
if pygame.sprite.collide_rect(en_boos, bu) and self.isEng == 0:
en_boos.health -= 10
bu.kill()
if en_boos.health <= 0:
en_boom.set_pos(en_boos.get_en_pos())
en_boom.visible = True
player_1.score1 += 1000
self.isEng = -1
# 玩家子弹和敌人2碰撞
for en in en2s:
collide_list = pygame.sprite.spritecollide(player_1, en.en_bullets,True)
player_1.health -= (len(collide_list)*5)
if pygame.sprite.collide_rect(player_1,en): # 玩家和敌人碰撞
en.kill()
en_boom.set_pos(en.get_en_pos())
en_boom.visible = True
player_1.health -= 10
for bu in player_1.bullets:
if pygame.sprite.collide_mask(en, bu):
en.health -= 10 # 子弹伤害为10
bu.kill()
if en.health <= 0:
en_boom.set_pos(en.get_en_pos())
en_boom.visible = True
en.kill()
player_1.score1 += 10
if self.isEng == 0:
for en_bu in en_boos.en_bullets: # boos zidan和角色碰撞
if pygame.sprite.collide_mask(en_bu,player_1):
player_1.health -= 10
en_bu.kill()
if self.stage == 99: # 阶段 99 游戏结束
mu.stop()
ends.update(self.screen)
player_1.draw_score(self.screen)
pygame.display.update()
self.clock.tick(60) # 60帧
pygame.quit()
def get_image(img, x, y, width, height, cloorkey, scale):
'''
:param img: 图片
:param x: 方框左上x坐标
:param y: 方框左上y坐标
:param width: 方框的宽
:param height: 方框的高
:param cloorkey: 快速抠图底色
:param scale: 放大倍数
:return:
'''
images = pygame.Surface((width, height)) # 创建同大小空图
images.blit(img, (0, 0), (x, y, width, height)) # 类似截图 后画图
if cloorkey != 'NULL':
images.set_colorkey(cloorkey) # 快速抠图
images = pygame.transform.scale(images, (int(width * scale), int(height * scale))) # 缩放图片
return images
from moviepy.editor import *
def play_video(path,size,pos = lambda t: (0, 0)):
'''
:param path: 路径
:param size: w,h大小
:return:
'''
clip = VideoFileClip(path)
clip.pos = pos
clip.size=size
clip.fps =60
clip.mask = False
clip.preview()
def close_video(clip):
clip.close()
|
# Create Multiple Regression for the “Restaurant Revenue Prediction” dataset.
# Evaluate the model using RMSE and R2 score.
# importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
# fetching the data from data.csv file
train = pd.read_csv('data.csv')
# working with Numeric Features
numeric_features = train.select_dtypes(include=[np.number])
# find out the number of null values for the features
nulls = pd.DataFrame(train.isnull().sum().sort_values(ascending=False))
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
# handling missing or null value
data = train.select_dtypes(include=[np.number]).interpolate().dropna()
# print(sum(data.isnull().sum() != 0))
# building a multiple linear model
y = np.log(data.revenue) # extracting label (revenue field)
X = data.drop(['revenue'], axis=1) # extracting features by excluding label i.e., revenue
from sklearn.model_selection import train_test_split
# splitting the data into test and train data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=0.2)
from sklearn import linear_model
lr = linear_model.LinearRegression()
# training the model by using fit method
model = lr.fit(X_train, y_train)
# evaluating the performance of the model
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# predicting test data
y_actual = model.predict(X_test)
print("\nR^2 is obtained as : ", r2_score(y_test, y_actual)) # R2 score
print("RMSE is obtained as : ", mean_squared_error(y_test, y_actual)) # RMSE
|
def LCS(seq1, seq2, equal):
s1 = range(0, len(seq1)+1)
s2 = range(0, len(seq2)+1)
score = [[0 for s in s2] for s in s1]
prev = [[0 for s in s2] for s in s1]
for i in s1[:-1]:
for j in s2[:-1]:
if equal(seq1[i], seq2[j]):
score[i+1][j+1] = score[i][j] + 1
else:
if score[i][j+1] > score[i+1][j]:
score[i+1][j+1] = score[i][j+1]
prev[i+1][j+1] = 1
else:
score[i+1][j+1] = score[i+1][j]
prev[i+1][j+1] = -1
seq = [None for s in range(0, score[-1][-1])]
i = len(seq1)
j = len(seq2)
while i != 0 and j != 0:
if prev[i][j] == 0:
seq[score[i][j]-1] = seq1[i-1]
i -= 1
j -= 1
elif prev[i][j] == 1:
i -= 1
else:
j -= 1
return seq
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: pmid_crawler.py
Description: 蜘蛛类,终极方法:pubmed网站大招
Author: Dexter Chen
Date:2018-10-10
-------------------------------------------------
"""
from __future__ import division
import time
import sys
import re
import math
import requests
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import agents
import mongodb_handler as mh
import journal as jn
import utilities as ut
import message as msg
import stats
import config
existed_pmid_set = []
def save_png(browser):
browser.save_screenshot(ut.cur_file_dir() + "/browser/" + ut.time_str("time") + ".png" )
def parse_url(project, sstr): # 把keyword变成链接形式,临时这样,未来增加内容
sstr_type = mh.read_sstr_type(project, sstr)
if sstr_type == "key_word":
if "," in sstr:
sstr = sstr.replace(", ",",") # 防止有空格
sstr = sstr.replace(",","%2C") # 换成链接形式
else:
pass
elif sstr_type == "expression":
pass
url = "https://www.ncbi.nlm.nih.gov/pubmed/?term=" + sstr # 最初的查询网址
return url
def adjust_record_number(project, sstr, record_number):
url = parse_url(project, sstr)
tries = config.request_sp_tries # 尝试3次
while(tries > 0):
try:
opener = requests.Session()
content = opener.get(url, timeout=config.request_time_out, headers=agents.get_header()).text # header仍然可以是随机的
max_record_number_start = content.find("<h3 class=\"result_count left\">Items:") + 37 # 找描述开始地方
max_record_number_end = content.find('</h3>', max_record_number_start)
record_number_str = content[max_record_number_start:max_record_number_end]
max_record_number = int(record_number_str.split(" ")[-1])
if max_record_number >= record_number:
pass
else:
record_number = max_record_number
return record_number
break
except Exception, e:
print e
tries -= 1
time.sleep(config.request_refresh_wait)
else:
print "error"
def extract_new_pmid(content): # 从文本中提取pmid的通用办法
pmid_set = []
pmid_raw = re.findall("<dd>\d{8}</dd>", content)
for pmid in pmid_raw:
pmid = pmid[4:-5] # 去处括号
if pmid not in existed_pmid_set:
pmid_set.append(pmid)
else:
pass # 此处未来log为skip
return pmid_set
def crawl_direct(project, sstr): # 用于直接爬sum-page,只能爬第一页,但是不采用phantomjs,速度快
url = parse_url(project, sstr)
tries = config.request_sp_tries # 尝试3次
while(tries > 0):
try:
opener = requests.Session()
content = opener.get(url, timeout=config.request_time_out, headers=agents.get_header()).text # header仍然可以是随机的
pmid_list = extract_new_pmid(content) # 提取pmid, 然后排除旧的
mh.add_new_pmid_all(project, sstr, ut.time_str("full"), "pm", pmid_list)
break
except Exception, e:
print e
tries -= 1
time.sleep(config.request_refresh_wait)
else:
print "error"
def crawl_phantom(project, sstr, record_number): # 用于使用phantomjs爬取sum-page,可以爬无限页,但是速度慢
url = parse_url(project, sstr)
sum_page_number = int(math.ceil(record_number / 20)) # 计算要多少页面可以爬完
rest_page_number = sum_page_number # 剩下多少页, 刚开始一样的
tries_1st_sp = config.phantom_1st_sp_tries
phantomjs_headers = agents.get_header() # 随机选择一个以供浏览器使用
dcap = dict(DesiredCapabilities.PHANTOMJS) # 设置userAgent
dcap["phantomjs.page.settings.userAgent"] = (phantomjs_headers) # header每次打开phantomjs是随机的,但浏览器关闭前不会变
dcap["phantomjs.page.settings.loadImages"] = False # 不载入图片,以加快速度
# browser = webdriver.PhantomJS(executable_path='C:\Python27\Scripts\phantomjs.exe', desired_capabilities=dcap) # 加载浏览器,windows下使用
path = ut.cur_file_dir() + "/browser/phantomjs" # 浏览器地址
browser = webdriver.PhantomJS(executable_path=path, desired_capabilities=dcap) # 加载浏览器
browser.set_page_load_timeout(config.phantom_time_out) # 设定网页加载超时,超过了就不加载
while (tries_1st_sp > 0):
try:
browser.get(url) # 打开链接
WebDriverWait(browser, config.phantom_time_out).until(EC.presence_of_element_located((By.ID, "footer"))) # 等待加载完毕的最好方案
browser.find_elements_by_name("Display")[0].click() # 点击每页都显示abstract
browser.implicitly_wait(0.5) # 等0.5秒钟,让菜单下拉完成
browser.find_element_by_xpath("//*[@id=\"abstract\"]").click() # 下拉菜单找到abstract这个值,点击
time.sleep(20)
WebDriverWait(browser, config.phantom_time_out).until(EC.presence_of_element_located((By.ID, "footer"))) # 自动刷新页面, 等待刷新完毕
browser.find_elements_by_name("Display")[2].click() # 点击显示每页200个
browser.implicitly_wait(0.5) # 等0.5秒钟,让菜单下拉完成
browser.find_element_by_xpath("//*[@id=\"ps200\"]").click() # 下拉菜单找到200这个值,点击
time.sleep(20)
WebDriverWait(browser, config.phantom_time_out).until(EC.presence_of_element_located((By.ID, "footer"))) # 自动刷新页面, 等待刷新完毕
save_png(browser)
# pmid_list = extract_new_pmid(browser.page_source)
# mh.add_new_pmid_all(project, sstr, ut.time_str("full"), "pm", pmid_list) # 把pmid存起来
save_png(browser)
rest_page_number -= 1
break
except Exception as e:
tries_1st_sp -= 1
browser.refresh()
browser.implicitly_wait(config.phantom_refresh_wait)
print e
else:
print "error"
# while(rest_page_number > 1 and tries_1st_sp > 0): # 确认需要第二页,如果sum-page只有1页,那就不用再打开; 如果第一页打开失败,也不用打开;从这里开始循环,直到所有的页面都爬完为止
# tries_other_sp = config.phantom_other_sp_tries
# while(tries_other_sp > 0): # 尝试多少次,默认尝试3次,不行就打不开
# try:
# browser.find_element_by_link_text("Next >").click() # 直接就点开“下一页”,从第二页开始
# WebDriverWait(browser, config.phantom_time_out).until(EC.presence_of_element_located((By.ID, "footer")))
# pmid_list = extract_new_pmid(browser.page_source)
# mh.add_new_pmid_all(project, sstr, ut.time_str("full"), "pm", pmid_list)
# rest_page_number -= 1
# break
# except Exception as e:
# tries_other_sp -= 1
# browser.refresh()
# browser.implicitly_wait(config.phantom_refresh_wait)
# print e
# else:
# print "error"
# break
if sum_page_number > 1:
browser.quit() # 关闭浏览器。当出现异常时记得在任务浏览器中关闭PhantomJS
def crawl_run(project, sstr, record_number):
global existed_pmid_set
existed_pmid_set = mh.read_pmid_all(project) # 只读一次
record_number = adjust_record_number(project, sstr, record_number) # 看看有没有那么多record
if record_number <= 20:
crawl_direct(project, sstr) # 目标条数小于20时,直接爬
else:
crawl_phantom(project, sstr, record_number) # 目标条数大于20时,用phantomjs爬
if __name__ == '__main__':
crawl_phantom("cancer", "lung,cancer", 40)
|
import json
from component_detect import*
from best_practice_check import*
class Page:
def __init__(self, page_name, page_type):
self.url = "Unknown"
self.page_name = page_name
self.page_type = page_type
self.states = {}
self.best_practices = []
self.best_practices_followed = "Unknown"
self.score = "Unknown"
class Page_State:
def __init__(self, page_name, file_name, page_state_name):
self.file_name = file_name
self.raw_components = []
self.unsorted_components = []
self.sorted_components = {}
self.page_name = page_name
self.page_state_name = page_state_name
self.page_type = "Unknown"
with open(file_name) as f:
self.file = json.load(f)
def load_page(page_name,page_type):
pages [page_name] = Page(page_name,page_type)
def load_page_state(Page_State):
page = pages[Page_State.page_name]
page.states [Page_State.page_state_name] = Page_State
def page_analysis(page_state):
gather_components(page_state.file, page_state.raw_components)
page_state.sorted_components = sort_components(page_state.raw_components, page_state.unsorted_components)
best_practice_checker(page_state)
def print_page_analysis(page):
print ("\n")
print ("SIGN-IN ANALYSIS: " + page.page_name.upper())
print_best_practices(page)
print ("STATE ANALYSIS")
print ("Number of states detected: " + str(len(page.states) ) )
print ("--------------\n")
for state in page.states:
print ("State Analysis: " + page.states[state].page_state_name.upper() + '\n"' + page.states[state].file_name )
print ("--------------\n")
print_sorted_components(page.states[state].sorted_components)
print_components(page.states[state].unsorted_components)
def compare_pages(*test_pages):
print ("COMPARE PAGES: ")
print ("--------------\n")
for page in test_pages:
print (page.page_name + " score: " + page.score)
print ("\n")
|
#!/usr/bin/python
limit = 12000
a = 1
b = 3
c = 4000
d = 11999
result = 0
while not (c == 1 and d == 2):
result += 1
k = (limit + b) / d
e = k * c - a
f = k * d - b
a = c
b = d
c = e
d = f
print(result)
|
import pandas.io.sql as psql
import sys
import pandas.io.sql as psql
crypto_arbing_dir = os.getcwd().split('/crypto_db')[0]
sys.path.append(crypto_arbing_dir)
class ArbCheck(object):
"""
"""
def __init__(self):
"""
"""
self.port = 3306
self.host = "127.0.0.1"
self.database_name = 'Crypto_Test'
self.user = 'root'
self.password = 'Crypto'
self.database = DatabaseConnect(self.host, self.database_name, self.user, self.password, self.port)
self.database.database_connect()
# self.get_coinone_exchange_id()
# self.crypto_currency_dict()
def get_latest_exchange_rates(self):
"""
"""
sql_str = """SELECT fer.crypto_currency_id AS exchange_currency_id,fer.crypto_currency_id2 AS base_currency_id,fer.exchange_rate,fer.server_time,fer.ut
FROM Crypto_Test.fiat_exchange_rates fer
JOIN (SELECT crypto_currency_id,crypto_currency_id2,source_id,MAX(server_time) AS server_time
FROM Crypto_Test.fiat_exchange_rates
GROUP BY crypto_currency_id,crypto_currency_id2,source_id) AS fer1
ON fer1.crypto_currency_id = fer.crypto_currency_id AND
fer1.crypto_currency_id2 = fer.crypto_currency_id2 AND
fer.source_id = fer1.source_id AND
fer.server_time = fer1.server_time"""
results = psql.read_sql(sql_str, con=self.database.mydb)
latest_exchange_rates = results
self.latest_exchange_rates = latest_exchange_rates.drop_duplicates(['exchange_rate', 'server_time', 'exchange_currency_id'])
def get_latest_order_book(self):
"""
"""
#get min sell price
sql_str = """SELECT MIN(price) AS min_sell_price,asset_pairs_lookup_id,server_time
FROM crypto_test.order_book ob
INNER JOIN crypto_test.asset_pairs_lookup apl ON apl.id = ob.asset_pairs_lookup_id
WHERE order_type_id = 2 AND tradeable = 1
GROUP BY asset_pairs_lookup_id,server_time
"""
ask_results = psql.read_sql(sql_str,con = self.database.mydb)
#get most recent order_time
#get index of rows with max server time, i.e most recent data
idx = ask_results.groupby(['asset_pairs_lookup_id'])['server_time'].transform(max) == ask_results['server_time']
#get df of all max server time
grouped_ask = ask_results[idx]
#get max buy price
sql_str = """SELECT MAX(price) AS max_buy_price,asset_pairs_lookup_id,server_time
FROM crypto_test.order_book ob
INNER JOIN crypto_test.asset_pairs_lookup apl ON apl.id = ob.asset_pairs_lookup_id
WHERE order_type_id = 1 AND tradeable = 1
GROUP BY asset_pairs_lookup_id,server_time"""
bid_results = psql.read_sql(sql_str,con = self.database.mydb)
idx = ask_results.groupby(['asset_pairs_lookup_id'])['server_time'].transform(max) == ask_results['server_time']
grouped_bid = bid_results[idx]
#merge together to get one df with sell and buy price
merged_bid_ask = grouped_ask.merge(grouped_bid,on = ['asset_pairs_lookup_id','server_time'],how = 'inner')
self.recent_data = merged_bid_ask
def select_asset_pairs_lookup(self):
"""code to determine all the possible tradeable asset_pairs, and test whether there is a price difference
i.e usdbtc -> btceth -> ethusd
"""
sql_str = """SELECT apl.id AS asset_pairs_lookup_id, ap.name AS pair,asset_pairs_id,e.name AS exchange,crypto_currency_id,crypto_currency_id2,cc.fiat AS ccid2_fiat
FROM crypto_test.asset_pairs_lookup apl
INNER JOIN crypto_test.asset_pairs ap On ap.id = apl.asset_pairs_id
INNER JOIN crypto_test.crypto_currency cc ON cc.id = ap.crypto_currency_id2
INNER JOIN crypto_test.exchange e ON e.id = apl.exchange_id
WHERE tradeable = 1 """
asset_pairs_info = psql.read_sql(sql_str,con = self.database.mydb)
crypto_links = {}
for ind,row in asset_pairs_info.T.iteritems():
asset_pairs_lookup_id, crypto_currency_id, crypto_currency_id2 = row['asset_pairs_lookup_id'],row['crypto_currency_id'],row['crypto_currency_id2']
if not crypto_currency_id in crypto_links:
crypto_links[crypto_currency_id] = {crypto_currency_id2:asset_pairs_lookup_id}
else:
crypto_links[crypto_currency_id][crypto_currency_id2] = asset_pairs_lookup_id
if not crypto_currency_id2 in crypto_links:
crypto_links[crypto_currency_id2] = {crypto_currency_id:asset_pairs_lookup_id}
else:
crypto_links[crypto_currency_id2][crypto_currency_id] = asset_pairs_lookup_id
self.crypto_links = crypto_links
#merge data buy/sell price, with asset_pairs_info
order_book_merged = self.recent_data.merge(asset_pairs_info[['asset_pairs_lookup_id','pair','exchange','crypto_currency_id','crypto_currency_id2','ccid2_fiat']],on = 'asset_pairs_lookup_id',how ='inner' )
self.order_book_merged = order_book_merged
def arb_check(self):
"""
"""
#1) start with asset_lookup_pairs xrpusd kraken = 14
#base_lapc_id = 14
#base_crypto1_id = 19
#base_crpyto2_id = 25
#2)Find asset pairs with, crypto1 (first crypto) = ripple_id = 19
#lookup_asset_pairs_crypto1 = [46,48,62]
#3) lapc1 = lookup_asset_pairs_crypto1
#3.1) is crypto2 of these asset_pairs FIAT i.e USD,KRW etc. xrpusd -> xrpkrw
#3.2) crypto non fiat ex. xrpusd -> xrpbtc -> btckrw
#3.3) xrpusd -> xrpbtc -> btceth -> ethusd
self.get_latest_exchange_rates()
# merge order book with exchange rates
merged_orderbook_exchange_rates = self.order_book_merged.merge(self.latest_exchange_rates[['exchange_currency_id', 'exchange_rate']],
left_on=['crypto_currency_id2'], right_on=['exchange_currency_id'], how='left')
merged_orderbook_exchange_rates['min_sell_price_usd'] = merged_orderbook_exchange_rates['min_sell_price']/merged_orderbook_exchange_rates['exchange_rate']
merged_orderbook_exchange_rates['max_buy_price_usd'] = merged_orderbook_exchange_rates['max_buy_price']/merged_orderbook_exchange_rates['exchange_rate']
#create a dict to store all the min and mac arbs i.e x = {'MAX':{'bchusd':2.2},'MIN':{'ddddd':3.3}}
# arb_perc_diff_dict={'max_diff':{},'min_diff':{}}
arb_perc_diff_dict = {}
asset_pairs_dict = dict(zip(self.order_book_merged.asset_pairs_lookup_id, self.order_book_merged.pair))
for asset_pairs_lookup_id in asset_pairs_dict:
pair_name = asset_pairs_dict[asset_pairs_lookup_id]
# print '------------------------------------------------------------------------------------------'
base_lapc_id = asset_pairs_lookup_id
moer = merged_orderbook_exchange_rates.copy()
base_crypto1_id = moer['crypto_currency_id'][moer['asset_pairs_lookup_id'] == base_lapc_id].iloc[0]
base_crypto2_id = moer['crypto_currency_id2'][moer['asset_pairs_lookup_id'] == base_lapc_id].iloc[0]
#If the pair is non fiat i.e ETHBTC then we don't want to convert the price to usd
if moer['ccid2_fiat'][moer['asset_pairs_lookup_id'] == base_lapc_id].iloc[0] == 1:
base_lapc_buy_price = moer['max_buy_price_usd'][moer['asset_pairs_lookup_id'] == base_lapc_id].iloc[0]
base_lapc_buy_price_list = [[base_lapc_buy_price,'']]
else:
lapc_buy_price = moer['max_buy_price'][moer['asset_pairs_lookup_id'] == base_lapc_id].iloc[0]
# i.e ETHBTC base_lapc_buy_price = 0.04. Then compare sell price for all BTC,and use that to compare with
base_crypto2_fiat_sell_price = moer[['pair','min_sell_price_usd']][(moer['crypto_currency_id']==base_crypto2_id)&(moer['ccid2_fiat']==1)]
#need to choose max and min values, because could be big differnce. i.e xrpeth , eth price could be between 280-299, change the diff_per greatly
max_value_df = base_crypto2_fiat_sell_price.loc[base_crypto2_fiat_sell_price['min_sell_price_usd'].idxmax()]
max_value,max_sell_pair = max_value_df['min_sell_price_usd'],max_value_df['pair']
min_value_df = base_crypto2_fiat_sell_price.loc[base_crypto2_fiat_sell_price['min_sell_price_usd'].idxmin()]
min_value, min_sell_pair = min_value_df['min_sell_price_usd'], min_value_df['pair']
base_lapc_buy_price_max = lapc_buy_price*max_value
base_lapc_buy_price_min = lapc_buy_price*min_value
base_lapc_buy_price_list = [[base_lapc_buy_price_max,max_sell_pair],[base_lapc_buy_price_min,min_sell_pair]]
# print asset_pairs_lookup_id, pair_name, base_lapc_buy_price
for base_lapc_buy_price_info in base_lapc_buy_price_list:
base_lapc_buy_price = base_lapc_buy_price_info[0]
base_fiat_for_alt_pairs = base_lapc_buy_price_info[1]
moer[pair_name] = base_lapc_buy_price
# find other asset_pairs with base_crypto_id1 = crypto_id1, not including our base_lapc_id
lapc_linked = moer[['asset_pairs_lookup_id','ccid2_fiat']][((moer['crypto_currency_id']==base_crypto1_id)|(moer['crypto_currency_id2']==base_crypto1_id))
&(moer['asset_pairs_lookup_id'] != base_lapc_id)]
# 3.1) compare lapc_fiat sell price (converted to USD) against original price. i.e xrpusd ad xrpkrw are fiat pairs
lapc_fiat = lapc_linked['asset_pairs_lookup_id'][lapc_linked['ccid2_fiat']==1]
if not lapc_fiat.empty:
lapc_fiat_list = lapc_fiat.tolist()
fiat_compare = moer[moer['asset_pairs_lookup_id'].isin(lapc_fiat_list)]
fiat_compare.loc[:,'diff'] = fiat_compare.loc[:,'max_buy_price_usd'] - base_lapc_buy_price
fiat_compare.loc[:, 'diff_per'] = fiat_compare.loc[:, 'diff']/base_lapc_buy_price
for ind,row in fiat_compare.T.iteritems():
arb_perc_diff_dict[(base_lapc_id, row['asset_pairs_lookup_id'])] = [((
pair_name, base_crypto1_id,base_crypto2_id), (row['pair'],row['crypto_currency_id'],row['crypto_currency_id2'])),row['diff_per']]
lapc_non_fiat = lapc_linked['asset_pairs_lookup_id'][lapc_linked['ccid2_fiat'] != 1]
if not lapc_non_fiat.empty:
lapc_non_fiat_list = lapc_non_fiat.tolist()
#look at lapc_non_fiat i.e xrpbtc 46
order_book_copy = self.order_book_merged.copy()
for lapc_non_fiat_id in lapc_non_fiat_list:
#asset_pairs_info = api
asset_pair_info = order_book_copy[order_book_copy['asset_pairs_lookup_id']==lapc_non_fiat_id]
new_asset_pair_name = asset_pair_info['pair'].iloc[0]
# print pair_name, new_asset_pair_name
lapc_crypto_id,lapc_crypto2_id = asset_pair_info['crypto_currency_id'].iloc[0],asset_pair_info['crypto_currency_id2'].iloc[0]
lapc_id_2_sell_price = asset_pair_info['min_sell_price'].iloc[0]
#now search for lookup_asset_pairs with crypto_currency_id = crypto_currency_id2(of asset pars in non_fiat_compare)
if base_crypto1_id == lapc_crypto_id:
asset_pairs_btc_fiat = moer[(moer['crypto_currency_id'] == lapc_crypto2_id) & (moer['ccid2_fiat'] == 1)]
else:
asset_pairs_btc_fiat = moer[(moer['crypto_currency_id'] == lapc_crypto_id) & (moer['ccid2_fiat'] == 1)]
if asset_pairs_btc_fiat.empty:
#need more work here
continue
asset_pairs_btc_fiat.loc[:,new_asset_pair_name] = lapc_id_2_sell_price
# two examples 1) EOSETH EOSBTC BTCUSD 2)
if base_crypto1_id == lapc_crypto_id:
asset_pairs_btc_fiat.loc[:,'sold_fiat'] = asset_pairs_btc_fiat.loc[:,'min_sell_price_usd']*asset_pairs_btc_fiat.loc[:,new_asset_pair_name]
else:
asset_pairs_btc_fiat.loc[:,'sold_fiat'] = asset_pairs_btc_fiat.loc[:,'min_sell_price_usd']/asset_pairs_btc_fiat.loc[:,new_asset_pair_name]
asset_pairs_btc_fiat.loc[:,'diff'] = asset_pairs_btc_fiat.loc[:,'sold_fiat'] - base_lapc_buy_price
asset_pairs_btc_fiat.loc[:, 'diff_per'] = asset_pairs_btc_fiat.loc[:, 'diff'] / base_lapc_buy_price
#If length of rows is greater than 1
# if asset_pairs_btc_fiat.shape[0] > 1:
# max_diff_per1 = asset_pairs_btc_fiat.loc[asset_pairs_btc_fiat['diff_per'].idxmax()]
# min_diff_per1 = asset_pairs_btc_fiat.loc[asset_pairs_btc_fiat['diff_per'].idxmin()]
for ind, row in asset_pairs_btc_fiat.T.iteritems():
arb_perc_diff_dict[(base_lapc_id,lapc_non_fiat_id,row['asset_pairs_lookup_id'])] = [((pair_name, base_crypto1_id, base_crypto2_id, base_fiat_for_alt_pairs),(new_asset_pair_name,lapc_crypto_id,lapc_crypto2_id),
(row['pair'], row['crypto_currency_id'], row['crypto_currency_id2'])),row['diff_per']]
# arb_perc_diff_dict['max_diff'][(pair_name,new_asset_pair_name,max_diff_per1['pair'])] = max_diff_per1['diff_per']
# arb_perc_diff_dict['min_diff'][(pair_name,new_asset_pair_name,min_diff_per1['pair'])] = min_diff_per1['diff_per']
print '************************************************************************************'
print 'diff sorted'
full_circle_arb = {}
for key, value in sorted(arb_perc_diff_dict.iteritems(), key=lambda (k, v): (v, k)):
#can we get from end crypto to start crypto i.e (u'BTCJPY', u'ETHBTC', u'ETHCAD') can we go from CAD -> JPY
end_crypto, start_crypto = value[0][0][2],value[0][-1][2]
possible_routes = []
link_list = self.crypto_links[start_crypto]
if end_crypto in link_list:
#needs more work in her as well, have to compare this to original comparison above
possible_routes.append([link_list[end_crypto]])
for i in link_list:
asset_pair_id = link_list[i]
link_list2 = self.crypto_links[i]
for j in link_list2:
if j == end_crypto:
#success we've found a route from start_crypto to end_crypto
if (asset_pair_id,link_list2[j]) in arb_perc_diff_dict:
return_diff_per = arb_perc_diff_dict[(59, 6)][-1]
# get total percentage of whole trade, start to finish
total_arb_perc = return_diff_per+value[-1]
possible_routes.append([(asset_pair_id,link_list2[j]),return_diff_per,total_arb_perc])
full_circle_tuple = key + (asset_pair_id,link_list2[j])
full_circle_arb[full_circle_tuple] = total_arb_perc
else:
possible_routes.append([(asset_pair_id, link_list2[j]),'Havent calculated $'])
#print "%s: %s" % (key, value), possible_routes
eth_xrp = [14, 46, 48, 62, 4, 6, 8, 9, 16, 18, 21, 24, 29, 37, 44, 49, 59]
for key, value in sorted(full_circle_arb.iteritems(), key=lambda (k, v): (v, k)):
# if len(set(key)&set(eth_xrp))==len(key):
print """%s: %s""" % (key, value),"\n"
def xrp_arbs(self):
"""Just shows xrp arbs
1) Simple one way between excnhages
- What is liquidity? i.e it reads balance 10,000 euro, and analyses order book to see what price you could get it on at
- Is price free falling?
2) Can we get back somehow, links into code above
3) Writes into a signal group,through api
"""
def main():
"""
"""
AC = ArbCheck()
AC.get_latest_order_book()
AC.select_asset_pairs_lookup()
AC.arb_check()
if __name__=="__main__":
main()
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
import peakdet
from statistics import normalize, xy_mean
def draw_curve(x_args, legends, interpolate=False):
fig = plt.figure()
ax = fig.add_subplot(111)
fig.canvas.set_window_title('График')
for i in range(len(x_args)):
x = x_args[i]
leg = legends[i]
numbers = np.linspace(0, len(x), num=len(x), endpoint=True)
if interpolate:
f = interp1d(numbers, x, kind='cubic')
xnew = np.linspace(0, len(x), num=len(x) * 3, endpoint=True)
ax.plot(xnew, f(xnew), label=leg)
ax.legend(loc='upper right', shadow=True)
else:
ax.plot(numbers, x, marker='o', markersize=3, label=leg)
ax.legend(loc='upper right', shadow=True)
plt.show()
def compare_curve(x, y):
fig = plt.figure()
ax = fig.add_subplot(111)
fig.canvas.set_window_title('График')
ax.plot(x, y)
ax.legend(loc='upper center', shadow=True)
plt.show()
def curve_combined(x_args, legends):
fig, ax = plt.subplots()
for i in range(len(x_args)):
x = x_args[i]
leg = legends[i]
numbers = range(0, len(x))
maxtab, mintab = peakdet.peakdet(x, .1)
plt.scatter(np.array(maxtab)[:, 0], np.array(maxtab)[:, 1], color='red')
plt.scatter(np.array(mintab)[:, 0], np.array(mintab)[:, 1], color='black')
ax.plot(numbers, x, marker='o', markersize=3, label=leg)
ax.legend(loc='upper right', shadow=True)
plt.show()
def draw_xyaw(person, show=[1, 1, 1]):
m = xy_mean(person.figures)
curves = []
legends = []
if show[0]:
m = normalize(m)
curves.append(m)
legends.append(u'M')
if show[1]:
a = person.alphas
a = normalize(a)
curves.append(a)
legends.append(u'Alpha')
if show[2]:
w = person.widths
w = normalize(w)
curves.append(w)
legends.append(u'Width')
if curves and legends:
draw_curve(curves, legends)
def histogram(x_args, legends, bins):
figure = plt.figure()
ax = figure.add_subplot(111)
figure.canvas.set_window_title('Гистограмма')
bins = np.linspace(0, 1, bins)
for i in range(len(x_args)):
x = x_args[i]
leg = legends[i]
ax.hist(x, bins, alpha=0.6, label=leg)
ax.legend(loc='upper right')
figure.show()
def histogram_xyaw(person, bins, show=[1, 1, 1, 1]):
x, y = xy_mean(person.figures)
hists = []
legends = []
if show[0]:
x = normalize(x)
hists.append(x)
legends.append('Xmean')
if show[1]:
y = normalize(y)
hists.append(y)
legends.append('Ymean')
if show[2]:
a = person.alphas
a = normalize(a)
hists.append(a)
legends.append('Alpha')
if show[3]:
w = person.widths
w = normalize(w)
hists.append(w)
legends.append('Widths')
if hists and legends:
histogram(hists, legends, bins)
|
def ReversePrint(head):
if head:
ReversePrint(head.next)
print(head.data)
|
# Generated by Django 2.1.1 on 2018-10-01 01:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Auth',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('aname', models.CharField(max_length=20)),
],
options={
'db_table': 'auth',
},
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.CharField(max_length=100)),
('time', models.CharField(max_length=50)),
('isRead', models.CharField(default='0', max_length=20)),
],
options={
'db_table': 'message',
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.CharField(max_length=500)),
('time', models.CharField(max_length=50)),
],
options={
'db_table': 'notification',
},
),
migrations.CreateModel(
name='Phonelist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=50)),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=200)),
('star', models.CharField(default='0', max_length=20)),
('createTime', models.CharField(max_length=50)),
],
options={
'db_table': 'phonelist',
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=50)),
('callTime', models.CharField(max_length=50)),
('callLength', models.CharField(max_length=50)),
('digits', models.CharField(max_length=100)),
('phone', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.Phonelist')),
],
options={
'db_table': 'state',
},
),
migrations.CreateModel(
name='Template_store',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vid', models.CharField(max_length=100)),
('pos', models.CharField(max_length=100)),
('digit', models.CharField(max_length=20)),
('cid', models.CharField(max_length=100)),
('flag', models.CharField(max_length=20)),
],
options={
'db_table': 'template_store',
},
),
migrations.CreateModel(
name='Templates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tname', models.CharField(max_length=50)),
],
options={
'db_table': 'templates',
},
),
migrations.CreateModel(
name='User',
fields=[
('uid', models.CharField(max_length=20, primary_key=True, serialize=False)),
('pwd', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('phone', models.CharField(max_length=20)),
('regTime', models.CharField(max_length=20)),
('auth', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.Auth')),
],
options={
'db_table': 'user',
},
),
migrations.AddField(
model_name='templates',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.User'),
),
migrations.AddField(
model_name='template_store',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.Templates'),
),
migrations.AddField(
model_name='phonelist',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.User'),
),
migrations.AddField(
model_name='message',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='myapp.User'),
),
]
|
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
import pandas as pn
newsgroups = datasets.fetch_20newsgroups(
subset='all',
categories=['alt.atheism', 'sci.space']
)
X = newsgroups.data
y = newsgroups.target
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X)
grid = {'C': np.power(10.0, np.arange(-5, 6))}
cv = KFold(n_splits=5, shuffle=True, random_state=241)
clf = SVC(kernel='linear', random_state=241)
gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)
gs.fit(X, y)
score = 0
C = 0
for a in gs.grid_scores_:
if a.mean_validation_score > score:
score = a.mean_validation_score
C = a.parameters['C']
clf = SVC(kernel='linear', random_state=241, C=C)
clf.fit(X, y)
words = vectorizer.get_feature_names()
coef = pn.DataFrame(clf.coef_.data, clf.coef_.indices)
# print('words:', words)
# print('coef', coef)
words = coef[0].map(lambda w: abs(w)).sort_values(ascending=False).head(10).index.map(lambda i: words[i])
print(words.sort_values())
# print('оценка качества по кросс вылидации: ', a.mean_validation_score)
# print('значения параметров: ', a.parameters)
# print(gs)
# print(grid)
# print(newsgroups.data)
|
import os
from scipy.io import loadmat
from prepare_caltech_dataset import convert_sequence, convert_annotations
import cv2
import glob
import json
def process_seqs():
"""Convert the sequence files and save to similar dir structure"""
for dir_name in glob.glob('data/set*'):
parent_dir = os.path.split(dir_name)[-1]
if not os.path.exists('target/{}'.format(parent_dir)):
os.mkdir('target/{}'.format(parent_dir))
for seq_path in glob.glob('{}/*.seq'.format(dir_name)):
vid = cv2.VideoCapture(seq_path)
current_dir = os.path.splitext(os.path.basename(seq_path))[0]
if not os.path.exists('target/{}/{}'.format(parent_dir, current_dir)):
os.mkdir('target/{}/{}'.format(parent_dir, current_dir))
#save it here!
print('Converting {}/{}.seq'.format(parent_dir, current_dir))
convert_sequence(vid, 'target/{}/{}'.format(parent_dir, current_dir))
def process_annotations():
"""Convert annotations to json file format"""
for dir_name in glob.glob('data/annotations/set*'):
parent_dir = os.path.split(dir_name)[-1]
if not os.path.exists('target/{}'.format(parent_dir)):
os.mkdir('target/{}'.format(parent_dir))
for vbb_file in glob.glob('{}/*.vbb'.format(dir_name)):
current_dir = os.path.splitext(os.path.basename(vbb_file))[0]
vbb = loadmat(vbb_file)
print('Converted annotations from {}'.format(vbb_file))
annotation = convert_annotations(vbb)
if not os.path.exists('target/{}/{}'.format(parent_dir, current_dir)):
os.mkdir('target/{}/{}'.format(parent_dir, current_dir))
with open('target/{}/{}/annotations.json'.format(parent_dir, current_dir), 'w') as f:
json.dump(annotation, f)
def main():
if not os.path.exists('target'):
os.mkdir('target')
process_seqs()
process_annotations()
if __name__ == '__main__':
main()
|
import copy
import math
import random
import time
import numpy as np
import Config
from control_algorithms.base import dubins_path_planner as plan
from control_algorithms.base.Node import Node
class PRM_star:
# PRM* algorithm using average variance per unit path length as cost function and Dubins path planner for local planning
def __init__(self, start, PRM_params, gmrf_params, var_x, max_dist, plot):
"""
:param start: initial location of agent
:param PRM_params: specified in config file
:param gmrf_params: specified in config file
:param var_x: variance of field as a 1D vector of variance of each node in GMRF
:param max_dist: maximum distance that the algorithm solution will return
:param plot: only used for plotting in the middle of running algorithm good for debugging
"""
self.start = Node(start)
self.node_list = [self.start]
(self.space, self.max_time, self.max_curvature, self.min_dist, self.obstacles) = PRM_params
self.gmrf_params = gmrf_params
self.max_dist = max(max_dist, 10) # can't just take the max_dist in case at the end of the simulation this will allow no possible paths
self.var_x = var_x
# used to track runtimes
self.local_planner_time = 0.0
self.method_time = 0.0
self.plot = plot
def control_algorithm(self):
start_time = time.time()
while True:
current_time = time.time() - start_time
if current_time > self.max_time:
break
# start PRM*
sample_node = self.get_sample()
if self.check_collision(sample_node):
near_nodes = self.get_near_nodes(sample_node)
new_node = self.set_parent(sample_node, near_nodes)
if new_node is None: # no possible path from any of the near nodes
continue
self.node_list.append(new_node)
self.rewire(new_node, near_nodes)
# end PRM*
# generate path
last_node = self.get_best_last_node()
if last_node is None:
return None
path, u_optimal, tau_optimal = self.get_path(last_node)
return path, u_optimal, tau_optimal
def get_sample(self):
sample = Node([random.uniform(self.space[0], self.space[1]),
random.uniform(self.space[2], self.space[3]),
random.uniform(-math.pi, math.pi)])
return sample
def local_path(self, source_node, destination_node):
# take source_node and find path to destination_node
time1 = time.time()
px, py, pangle, mode, plength, u = plan.dubins_path_planning(source_node.pose[0], source_node.pose[1], source_node.pose[2], destination_node.pose[0], destination_node.pose[1], destination_node.pose[2], self.max_curvature)
self.local_planner_time += time.time() - time1
new_node = copy.deepcopy(source_node)
new_node.pose = destination_node.pose
new_node.path_x = px
new_node.path_y = py
new_node.path_angle = pangle
new_node.u = u
new_node.path_dist = plength
new_node.dist += plength
new_node.path_var = self.path_var(px, py, pangle)
new_node.total_var += new_node.path_var
new_node.cost = new_node.total_var / new_node.dist
new_node.parent = source_node
return new_node
def set_parent(self, sample_node, near_nodes):
# connects new_node along a minimum cost path
if not near_nodes:
near_nodes.append(self.nearest_node(sample_node))
cost_list = []
for near_node in near_nodes:
temp_node = self.local_path(near_node, sample_node)
if self.check_collision(temp_node) and self.max_dist >= temp_node.dist:
cost_list.append(temp_node.cost)
else:
cost_list.append(float("inf"))
min_cost = min(cost_list)
min_node = near_nodes[cost_list.index(min_cost)]
new_node = self.local_path(min_node, sample_node)
if new_node.cost == float("inf"):
print("min cost is inf")
return None
return new_node
def get_best_last_node(self):
cost_list = []
for node in self.node_list:
if node.dist >= self.min_dist:
cost_list.append(node.cost)
else:
cost_list.append(float("inf"))
best_node = self.node_list[cost_list.index(min(cost_list))]
return best_node
def get_path(self, last_node):
path = [last_node]
u_optimal = []
tau_optimal = np.vstack((last_node.path_x, last_node.path_y, last_node.path_angle))
while True:
path.append(last_node.parent)
u_optimal = u_optimal + last_node.u
last_node = last_node.parent
if last_node is None:
break
tau_add = np.vstack((last_node.path_x, last_node.path_y, last_node.path_angle))
tau_optimal = np.concatenate((tau_add, tau_optimal), axis=1)
return path, u_optimal, tau_optimal
def get_near_nodes(self, new_node):
# gamma_star = 2(1+1/d) ** (1/d) volume(free)/volume(total) ** 1/d and we need gamma > gamma_star
# for asymptotical completeness see Kalman 2011. gamma = 1 satisfies
d = 2 # dimension of the self.space
nnode = len(self.node_list)
r = min(20.0 * ((math.log(nnode) / nnode)) ** (1 / d), 5.0)
dlist = [dist(new_node, node) for node in self.node_list]
near_nodes = [self.node_list[dlist.index(d)] for d in dlist if d <= r]
return near_nodes
def rewire(self, new_node, near_nodes):
for near_node in near_nodes:
temp_node = self.local_path(new_node, near_node)
if near_node.dist != 0:
if near_node.cost > temp_node.cost and self.check_collision(temp_node) \
and self.max_dist >= temp_node.dist and self.check_loop(near_node, new_node):
near_node.__dict__.update(vars(temp_node))
self.propagate_update_to_children(near_node)
def propagate_update_to_children(self, parent_node):
for node in self.node_list:
if node.parent is not None:
if node.parent == parent_node:
node.total_var = parent_node.total_var + node.path_var
node.dist = parent_node.dist + node.path_dist
self.propagate_update_to_children(node)
def check_loop(self, near_node, new_node):
# checks to make sure that changing parents to temp_node does not create a loop
temp = new_node.parent
while temp is not None:
if temp == near_node:
return False # creates a loop
temp = temp.parent
return True # does not create a loop
def check_collision(self, node):
if self.obstacles is not None:
for (x, y, side) in self.obstacles:
for (nx, ny) in zip(node.path_x, node.path_y):
if ((nx > x - .8 * side / 2) & (nx < x + .8 * side / 2) & (ny > y - side / 2) & (
ny < y + side / 2)):
return False # collision
return True # safe
def nearest_node(self, sample):
dlist = [dist(node, sample) for node in self.node_list]
min_node = self.node_list[dlist.index(min(dlist))]
return min_node
def path_var(self, px, py, pangle): # returns negative total variance along the path
control_cost = 0 # NOT USED!!!!!!
path_var = 0
(lxf, lyf, dvx, dvy, lx, ly, n, p, de, l_TH, p_THETA, xg_min, xg_max, yg_min, yg_max) = self.gmrf_params
p1 = time.time()
A = np.zeros(shape=(n + p, 1)).astype(float)
# iterate over path and calculate cost
for kk in range(len(px)): # Iterate over length of trajectory
if not (self.space[0] <= px[kk] <= self.space[1]) or not (self.space[2] <= py[kk] <= self.space[3]):
path_var += Config.border_variance_penalty # value of 5
control_cost += 0
else:
A += interpolation_matrix(np.array([px[kk], py[kk], pangle[kk]]), n, p, lx, xg_min, yg_min, de)
control_cost += 0
path_var -= np.dot(A.T, self.var_x)[0][0]
self.method_time += (time.time() - p1)
return path_var # negative path var
def draw_graph(self, plot=None):
if plot is not None: # use plot of calling
for node in self.node_list:
plot.quiver(node.pose[0], node.pose[1], math.cos(node.pose[2]), math.sin(node.pose[2]), color='b', angles='xy', scale_units='xy', scale=.8, width=.015)
if node.parent is not None:
plot.plot(node.path_x, node.path_y, color='green')
if self.obstacles is not None:
for (x, y, side) in self.obstacles:
plot.plot(x, y, "sk", ms=8 * side)
plot.quiver(self.start.pose[0], self.start.pose[1], math.cos(self.start.pose[2]), math.sin(self.start.pose[2]), color="b")
plot.axis(self.space)
plot.grid(True)
plot.title("PRM* (avg variance per unit path length as cost function)")
plot.pause(.1) # need for animation
def dist(node1, node2):
# returns distance between two nodes
return math.sqrt((node2.pose[0] - node1.pose[0]) ** 2 +
(node2.pose[1] - node1.pose[1]) ** 2 +
3 * min((node1.pose[2] - node2.pose[2]) ** 2, (node1.pose[2] - node2.pose[2] + 2*math.pi) ** 2, (node1.pose[2] - node2.pose[2] - 2*math.pi) ** 2))
def interpolation_matrix(x_local2, n, p, lx, xg_min, yg_min, de):
# Calculate new observation vector through shape function interpolation
"""INTERPOLATION MATRIX:
Define shape function matrix that maps grid vertices to
continuous measurement locations"""
u1 = np.zeros(shape=(n + p, 1)).astype(float)
nx = int((x_local2[0] - xg_min) / de[0]) # Calculates the vertice column x-number at which the shape element starts.
ny = int((x_local2[1] - yg_min) / de[1]) # Calculates the vertice row y-number at which the shape element starts.
# Calculate position value in element coord-sys in meters
x_el = float(0.1 * (x_local2[0] / 0.1 - int(x_local2[0] / 0.1))) - de[0] / 2
y_el = float(0.1 * (x_local2[1] / 0.1 - int(x_local2[1] / 0.1))) - de[1] / 2
# Define shape functions, "a" is element width in x-direction
u1[(ny * lx) + nx] = (1 / (de[0] * de[1])) * ((x_el - de[0] / 2) * (y_el - de[1] / 2)) # u for lower left corner
u1[(ny * lx) + nx + 1] = (-1 / (de[0] * de[1])) * ((x_el + de[0] / 2) * (y_el - de[1] / 2)) # u for lower right corner
u1[((ny + 1) * lx) + nx] = (-1 / (de[0] * de[1])) * ((x_el - de[0] / 2) * (y_el + de[1] / 2)) # u for upper left corner
u1[((ny + 1) * lx) + nx + 1] = (1 / (de[0] * de[1])) * ((x_el + de[0] / 2) * (y_el + de[1] / 2)) # u for upper right corner
return u1
|
#coding:utf-8
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, BooleanField, FileField, TextAreaField
from wtforms.validators import DataRequired, Email,EqualTo
class FileUploadForm(Form):
file = FileField(u'照片路径:',validators=[DataRequired(message=u'照片路径不能为空')])
submit = SubmitField(u'上传照片')
class AboutMeForm(Form):
about_me = TextAreaField(u'个人简介:')
submit = SubmitField(u'保存')
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class BasicBlock(nn.Module):
def __init__(self,in_channels,out_channels,stride):
super().__init__()
self.residual_function = nn.Sequential(
#nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(in_channels,out_channels,kernel_size=3,padding = 1,stride = stride,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
nn.Conv2d(out_channels,out_channels,kernel_size=3,padding = 1,stride = 1,bias=False),
nn.BatchNorm2d(out_channels)
)
self.shortcut = nn.Sequential()
#确保shortcut和处理后的数据有相同的维度
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
#nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(in_channels,out_channels,kernel_size=3,padding = 1,stride = stride,bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
return nn.ReLU(True)(self.residual_function(x) + self.shortcut(x))
def getweight0(self):
# for name,parameters in self.named_parameters():
# print(name,':',parameters.size())
return self.state_dict()['residual_function.0.weight'].data.clone()
def getweight3(self):
return self.state_dict()['residual_function.3.weight'].data.clone()
class Resnet(nn.Module):
def __init__(self,block,num_classes=10):
super().__init__()
#这个应该是继承nn。Module的一个初始化,那个self和super里面的Resnet是啥意思
#nn.Conv2d(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True))
#BatchNorm2d(channel-number)
#relu(true)ture会减少显存占用,但是会改变输入数据,详见https://blog.csdn.net/tmk_01/article/details/80679991
self.conv1 = nn.Sequential(
nn.Conv2d(3,64,kernel_size=7,stride=2,padding = (3,3),bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.conv2_x = self._make_layer(block,64,64)
self.conv3_x = self._make_layer(block,64,128)
self.conv4_x = self._make_layer(block,128,256)
self.conv5_x = self._make_layer(block,256,512)
self.avg_pool = nn.AdaptiveAvgPool2d((1,1)) #应该是把多维度的内容都变成一维度的平均值了吧
self.fc = nn.Linear(512,num_classes)
def _make_layer(self,block,in_channels,out_channels):
layers = []
layers.append(block(in_channels,out_channels,stride = 2))#在第一个block里面扩展维度,并且降低分辨率
layers.append(block(out_channels,out_channels, stride = 1))#就是因为resnet18,每层有两个block,所以这边简写了,不然应该输入数组
return nn.Sequential(*layers)
def get_featureconv1(self,image):
self.conv1.cpu()
output = self.conv1(image)
self.conv1.cuda()
return output
def get_reftconv1(self,image,num):
m = self.conv1.cpu()
m.eval()
output = m(image)
#得到64个16*16的tensor吧,然后分别取出来,把其他通道置0
c_output = output.clone()
c_output[:,0:num,:,:]=0
c_output[:,num+1:,:,:] = 0
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv1[0].weight.data.clone()
c_output = deconv(weight,c_output,2,64,3,7,3)
#这个新的output
self.conv1.cuda()
return c_output
def get_reftconv5(self,image,num):
m = self.conv1.cpu()
m.eval()
output = m(image)
m = self.conv2_x.cpu()
m.eval()
output = m(output)
m = self.conv3_x.cpu()
m.eval()
output = m(output)
m = self.conv4_x.cpu()
m.eval()
output = m(output)
m = self.conv5_x.cpu()
m.eval()
output = m(output)
c_output = output.clone()
c_output[:,0:num,:,:]=0
c_output[:,num+1:,:,:] = 0
#5-》4
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv5_x[1].getweight3()
c_output = deconv(weight,c_output,1,512,512,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv5_x[1].getweight0()
c_output = deconv(weight,c_output,1,512,512,3,1)
print(c_output.size())
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv5_x[0].getweight3()
c_output = deconv(weight,c_output,1,512,512,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv5_x[0].getweight0()
c_output = deconv(weight,c_output,2,512,256,3,1)
print(c_output.size())
#4-3
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv4_x[1].getweight3()
c_output = deconv(weight,c_output,1,256,256,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv4_x[1].getweight0()
c_output = deconv(weight,c_output,1,256,256,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv4_x[0].getweight3()
c_output = deconv(weight,c_output,1,256,256,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv4_x[0].getweight0()
c_output = deconv(weight,c_output,2,256,128,3,1)
#3-2
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv3_x[1].getweight3()
c_output = deconv(weight,c_output,1,128,128,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv3_x[1].getweight0()
c_output = deconv(weight,c_output,1,128,64,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv3_x[0].getweight3()
c_output = deconv(weight,c_output,1,128,128,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv3_x[0].getweight0()
c_output = deconv(weight,c_output,2,128,64,3,1)
#2-1
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv2_x[1].getweight3()
c_output = deconv(weight,c_output,1,64,64,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv2_x[1].getweight0()
c_output = deconv(weight,c_output,1,64,64,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv2_x[0].getweight3()
c_output = deconv(weight,c_output,1,64,64,3,1)
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv2_x[0].getweight0()
c_output = deconv(weight,c_output,2,64,64,3,1)
#1-output
q = nn.ReLU()
c_output = q(c_output)
weight = self.conv1[0].weight.data.clone()
c_output = deconv(weight,c_output,2,64,3,7,3)
#放到cpu的还要放回gpu
self.conv1.cuda()
self.conv2_x.cuda()
self.conv3_x.cuda()
self.conv4_x.cuda()
self.conv5_x.cuda()
return c_output
def forward(self,x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0),-1) #就是reshape一下输出
output = self.fc(output)
return output
def deconv(weight,image,stride,in_channel,out_channel,kernelsize,pad):
m = nn.ConvTranspose2d(in_channel,out_channel, kernelsize,stride=stride,padding=pad,output_padding=stride-1,bias=False)
m.weight.data = weight
m.eval()
output = m(image)
return output
def resnet():
return Resnet(BasicBlock)
|
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
import os, json
INTENSITY_HOME_DIR_LABEL = 'INTENSITY_HOME_DIR'
config = None
home_dir = os.path.dirname(__file__) # Default value, is the one used in testing
def get(category, key, default=''):
try:
return config[category][key]
except:
return default
def set_home_dir(_home_dir=None):
# Use environment setting, if there is one
if os.environ.get(INTENSITY_HOME_DIR_LABEL, '') != '':
_home_dir = os.environ[INTENSITY_HOME_DIR_LABEL]
# Use default location, if testing
elif _home_dir is None:
_home_dir = os.path.join(
os.path.split(
os.path.dirname(os.path.abspath(__file__))
)[:-1]
)[0] # The parent directory of this one. TODO: Always be in sync with DJANGO_SETTINGS_MODULE directory
global config, home_dir
home_dir = _home_dir
if config is None:
config_file = open(os.path.join(home_dir, 'settings.json'))
config = json.loads(config_file.read())
config_file.close()
import intensity.logging_system as intensity_logging
intensity_logging.init(home_dir, get('Logging', 'level', 'INFO'))
if INTENSITY_HOME_DIR_LABEL in os.environ:
set_home_dir() # Load from environment
def get_home_dir():
global home_dir
return home_dir
def set(category, key, value):
if not category in config:
config[category] = {}
try:
config[category][key].append(value)
except:
config[category][key] = value
return config[category][key]
|
#!/usr/bin/env python
import versioneer
from setuptools import setup
from os.path import exists
setup(name='aiopeewee',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=['aiopeewee'],
description='Async Peewee',
url='http://github.com/kszucs/aiopeewee',
maintainer='Krisztian Szucs',
maintainer_email='szucs.krisztian@gmail.com',
license='MIT',
keywords='async asyncio peewee orm',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=['peewee<3.0', 'aiomysql'],
tests_require=['pytest-asyncio==0.10.0', 'pytest'],
setup_requires=['pytest-runner'],
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
zip_safe=False)
|
from unittest import TestCase
from src.entry_point import Entry
class TestEntry(TestCase):
def setUp(self):
# This is run before EVERY test
self.test = Entry()
def tearDown(self):
pass
def test_adder(self):
self.assertEqual(self.test.adder(1, 2), 3)
def test_subtractor(self):
self.assertEqual(self.test.subtractor(1, 2), -1)
self.assertNotEqual(self.test.subtractor(1, 3), -3)
|
cnt =0
for i in range(12):
if 'r' in input():
cnt+=1
print(cnt)
|
from itertools import zip_longest
import hashlib
from dataclasses import dataclass
def get_hash(s: str):
"""Hash function."""
h = hashlib.sha1()
h.update(s.encode('utf-8'))
return h.hexdigest()
@dataclass(frozen=True, eq=True)
class ExampleId:
id: int
unlabeled: bool = False
def __repr__(self):
return f"ExampleId(id={self.id}, unlabeled={self.unlabeled})"
def grouper(n, iterable, fillvalue=None):
"""Group iterable into chunks of size n.
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
from itertools cookbook.
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def flatten(lsts):
return [x for sublist in lsts for x in sublist]
|
from django.db.models import Count, Avg, Min, Max
from collections import defaultdict
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import TemplateView, View, DetailView, ListView
from common.models import ReleaseNotes
from common.phylogenetic_tree import PhylogeneticTreeGenerator
from common.selection import Selection, SelectionItem
from ligand.models import *
from protein.models import Protein, Species, ProteinFamily
from django.views.decorators.csrf import csrf_exempt
from copy import deepcopy
import itertools
import json
class LigandBrowser(TemplateView):
"""
Per target summary of ligands.
"""
template_name = 'ligand_browser.html'
def get_context_data (self, **kwargs):
context = super(LigandBrowser, self).get_context_data(**kwargs)
ligands = AssayExperiment.objects.values(
'protein__entry_name',
'protein__species__common_name',
'protein__family__name',
'protein__family__parent__name',
'protein__family__parent__parent__name',
'protein__family__parent__parent__parent__name',
'protein__species__common_name'
).annotate(num_ligands=Count('ligand', distinct=True))
context['ligands'] = ligands
return context
def LigandDetails(request, ligand_id):
"""
The details of a ligand record. Lists all the assay experiments for a given ligand.
"""
ligand_records = AssayExperiment.objects.filter(
ligand__properities__web_links__index=ligand_id
).order_by('protein__entry_name')
record_count = ligand_records.values(
'protein',
).annotate(num_records = Count('protein__entry_name')
).order_by('protein__entry_name')
ligand_data = []
for record in record_count:
per_target_data = ligand_records.filter(protein=record['protein'])
protein_details = Protein.objects.get(pk=record['protein'])
"""
A dictionary of dictionaries with a list of values.
Assay_type
|
-> Standard_type [list of values]
"""
tmp = defaultdict(lambda: defaultdict(list))
tmp_count = 0
for data_line in per_target_data:
tmp[data_line.assay_type][data_line.standard_type].append(data_line.standard_value)
tmp_count += 1
#Flattened list of lists of dict values
values = list(itertools.chain(*[itertools.chain(*tmp[x].values()) for x in tmp.keys()]))
# TEMPORARY workaround for handling string values
values = [float(item) for item in values if float(item) ]
if len(values) > 0:
ligand_data.append({
'protein_name': protein_details.entry_name,
'receptor_family': protein_details.family.parent.name,
'ligand_type': protein_details.get_protein_family(),
'class': protein_details.get_protein_class(),
'record_count': tmp_count,
'assay_type': ', '.join(tmp.keys()),
#Flattened list of lists of dict keys:
'value_types': ', '.join(itertools.chain(*(list(tmp[x]) for x in tmp.keys()))),
'low_value': min(values),
'average_value': sum(values)/len(values),
'standard_units': ', '.join(list(set([x.standard_units for x in per_target_data])))
})
context = {'ligand_data': ligand_data, 'ligand':ligand_id}
return render(request, 'ligand_details.html', context)
def TargetDetailsCompact(request, **kwargs):
if 'slug' in kwargs:
slug = kwargs['slug']
if slug.count('_') == 0 :
ps = AssayExperiment.objects.filter(protein__family__parent__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) == 7:
ps = AssayExperiment.objects.filter(protein__family__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 2:
ps = AssayExperiment.objects.filter(protein__family__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 3:
ps = AssayExperiment.objects.filter(protein__family__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) != 7:
ps = AssayExperiment.objects.filter(protein__entry_name = slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
if slug.count('_') == 1 and len(slug) == 7:
f = ProteinFamily.objects.get(slug=slug)
else:
f = slug
context = {
'target':f
}
else:
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.prefetch_related('protein','ligand__properities__web_links__web_resource','ligand__properities__vendors__vendor')
d = {}
for p in ps:
if p.ligand not in d:
d[p.ligand] = {}
if p.protein not in d[p.ligand]:
d[p.ligand][p.protein] = []
d[p.ligand][p.protein].append(p)
ligand_data = []
for lig, records in d.items():
links = lig.properities.web_links.all()
chembl_id = [x for x in links if x.web_resource.slug=='chembl_ligand'][0].index
vendors = lig.properities.vendors.all()
purchasability = 'No'
for v in vendors:
if v.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
purchasability = 'Yes'
for record, vals in records.items():
per_target_data = vals
protein_details = record
"""
A dictionary of dictionaries with a list of values.
Assay_type
|
-> Standard_type [list of values]
"""
tmp = defaultdict(list)
tmp_count = 0
for data_line in per_target_data:
tmp["Bind" if data_line.assay_type == 'b' else "Funct"].append(data_line.pchembl_value)
tmp_count += 1
# TEMPORARY workaround for handling string values
values = [float(item) for item in itertools.chain(*tmp.values()) if float(item) ]
if len(values)>0:
ligand_data.append({
'ligand_id': chembl_id,
'protein_name': protein_details.entry_name,
'species': protein_details.species.common_name,
'record_count': tmp_count,
'assay_type': ', '.join(tmp.keys()),
'purchasability': purchasability,
#Flattened list of lists of dict keys:
'low_value': min(values),
'average_value': sum(values)/len(values),
'high_value': max(values),
'standard_units': ', '.join(list(set([x.standard_units for x in per_target_data]))),
'smiles': lig.properities.smiles,
'mw': lig.properities.mw,
'rotatable_bonds': lig.properities.rotatable_bonds,
'hdon': lig.properities.hdon,
'hacc': lig.properities.hacc,
'logp': lig.properities.logp,
})
context['ligand_data'] = ligand_data
return render(request, 'target_details_compact.html', context)
def TargetDetails(request, **kwargs):
if 'slug' in kwargs:
slug = kwargs['slug']
if slug.count('_') == 0 :
ps = AssayExperiment.objects.filter(protein__family__parent__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) == 7:
ps = AssayExperiment.objects.filter(protein__family__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 2:
ps = AssayExperiment.objects.filter(protein__family__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 3:
ps = AssayExperiment.objects.filter(protein__family__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) != 7:
ps = AssayExperiment.objects.filter(protein__entry_name = slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
if slug.count('_') == 1 and len(slug) == 7:
f = ProteinFamily.objects.get(slug=slug)
else:
f = slug
context = {
'target':f
}
else:
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.values('standard_type',
'standard_relation',
'standard_value',
'assay_description',
'assay_type',
#'standard_units',
'pchembl_value',
'ligand__id',
'ligand__properities_id',
'ligand__properities__web_links__index',
#'ligand__properities__vendors__vendor__name',
'protein__species__common_name',
'protein__entry_name',
'ligand__properities__mw',
'ligand__properities__logp',
'ligand__properities__rotatable_bonds',
'ligand__properities__smiles',
'ligand__properities__hdon',
'ligand__properities__hacc','protein'
).annotate(num_targets = Count('protein__id', distinct=True))
for record in ps:
record['purchasability'] = 'Yes' if len(LigandVendorLink.objects.filter(lp=record['ligand__properities_id']).exclude(vendor__name__in=['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem'])) > 0 else 'No'
context['proteins'] = ps
return render(request, 'target_details.html', context)
def TargetPurchasabilityDetails(request, **kwargs):
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.values('standard_type',
'standard_relation',
'standard_value',
'assay_description',
'assay_type',
'standard_units',
'pchembl_value',
'ligand__id',
'ligand__properities_id',
'ligand__properities__web_links__index',
'ligand__properities__vendors__vendor__id',
'ligand__properities__vendors__vendor__name',
'protein__species__common_name',
'protein__entry_name',
'ligand__properities__mw',
'ligand__properities__logp',
'ligand__properities__rotatable_bonds',
'ligand__properities__smiles',
'ligand__properities__hdon',
'ligand__properities__hacc','protein'
).annotate(num_targets = Count('protein__id', distinct=True))
purchasable = []
for record in ps:
try:
if record['ligand__properities__vendors__vendor__name'] in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem', 'IUPHAR/BPS Guide to PHARMACOLOGY']:
continue
tmp = LigandVendorLink.objects.filter(vendor=record['ligand__properities__vendors__vendor__id'], lp=record['ligand__properities_id'])[0]
record['vendor_id'] = tmp.vendor_external_id
record['vendor_link'] = tmp.url
purchasable.append(record)
except:
continue
context['proteins'] = purchasable
return render(request, 'target_purchasability_details.html', context)
class LigandStatistics(TemplateView):
"""
Per class statistics of known ligands.
"""
template_name = 'ligand_statistics.html'
def get_context_data (self, **kwargs):
context = super().get_context_data(**kwargs)
assays = AssayExperiment.objects.all().prefetch_related('protein__family__parent__parent__parent', 'protein__family')
lig_count_dict = {}
assays_lig = list(AssayExperiment.objects.all().values('protein__family__parent__parent__parent__name').annotate(c=Count('ligand',distinct=True)))
for a in assays_lig:
lig_count_dict[a['protein__family__parent__parent__parent__name']] = a['c']
target_count_dict = {}
assays_target = list(AssayExperiment.objects.all().values('protein__family__parent__parent__parent__name').annotate(c=Count('protein__family',distinct=True)))
for a in assays_target:
target_count_dict[a['protein__family__parent__parent__parent__name']] = a['c']
prot_count_dict = {}
proteins_count = list(Protein.objects.all().values('family__parent__parent__parent__name').annotate(c=Count('family',distinct=True)))
for pf in proteins_count:
prot_count_dict[pf['family__parent__parent__parent__name']] = pf['c']
classes = ProteinFamily.objects.filter(slug__in=['001', '002', '003', '004', '005', '006', '007']) #ugly but fast
proteins = Protein.objects.all().prefetch_related('family__parent__parent__parent')
ligands = []
for fam in classes:
if fam.name in lig_count_dict:
lig_count = lig_count_dict[fam.name]
target_count = target_count_dict[fam.name]
else:
lig_count = 0
target_count = 0
prot_count = prot_count_dict[fam.name]
ligands.append({
'name': fam.name,
'num_ligands': lig_count,
'avg_num_ligands': lig_count/prot_count,
'target_percentage': target_count/prot_count*100,
'target_count': target_count
})
lig_count_total = sum([x['num_ligands'] for x in ligands])
prot_count_total = Protein.objects.filter(family__slug__startswith='00').all().distinct('family').count()
target_count_total = sum([x['target_count'] for x in ligands])
lig_total = {
'num_ligands': lig_count_total,
'avg_num_ligands': lig_count_total/prot_count_total,
'target_percentage': target_count_total/prot_count_total*100,
'target_count': target_count_total
}
#Elegant solution but kinda slow (6s querries):
"""
ligands = AssayExperiment.objects.values(
'protein__family__parent__parent__parent__name',
'protein__family__parent__parent__parent',
).annotate(num_ligands=Count('ligand', distinct=True))
for prot_class in ligands:
class_subset = AssayExperiment.objects.filter(
id=prot_class['protein__family__parent__parent__parent']).values(
'protein').annotate(
avg_num_ligands=Avg('ligand', distinct=True),
p_count=Count('protein')
)
prot_class['avg_num_ligands']=class_subset[0]['avg_num_ligands']
prot_class['p_count']=class_subset[0]['p_count']
"""
context['ligands_total'] = lig_total
context['ligands_by_class'] = ligands
context['release_notes'] = ReleaseNotes.objects.all()[0]
tree = PhylogeneticTreeGenerator()
class_a_data = tree.get_tree_data(ProteinFamily.objects.get(name='Class A (Rhodopsin)'))
context['class_a_options'] = deepcopy(tree.d3_options)
context['class_a_options']['anchor'] = 'class_a'
context['class_a_options']['leaf_offset'] = 50
context['class_a_options']['label_free'] = []
context['class_a'] = json.dumps(class_a_data.get_nodes_dict('ligands'))
class_b1_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B1 (Secretin)'))
context['class_b1_options'] = deepcopy(tree.d3_options)
context['class_b1_options']['anchor'] = 'class_b1'
context['class_b1_options']['branch_trunc'] = 60
context['class_b1_options']['label_free'] = [1,]
context['class_b1'] = json.dumps(class_b1_data.get_nodes_dict('ligands'))
class_b2_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B2 (Adhesion)'))
context['class_b2_options'] = deepcopy(tree.d3_options)
context['class_b2_options']['anchor'] = 'class_b2'
context['class_b2_options']['label_free'] = [1,]
context['class_b2'] = json.dumps(class_b2_data.get_nodes_dict('ligands'))
class_c_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class C (Glutamate)'))
context['class_c_options'] = deepcopy(tree.d3_options)
context['class_c_options']['anchor'] = 'class_c'
context['class_c_options']['branch_trunc'] = 50
context['class_c_options']['label_free'] = [1,]
context['class_c'] = json.dumps(class_c_data.get_nodes_dict('ligands'))
class_f_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class F (Frizzled)'))
context['class_f_options'] = deepcopy(tree.d3_options)
context['class_f_options']['anchor'] = 'class_f'
context['class_f_options']['label_free'] = [1,]
context['class_f'] = json.dumps(class_f_data.get_nodes_dict('ligands'))
class_t2_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class T (Taste 2)'))
context['class_t2_options'] = deepcopy(tree.d3_options)
context['class_t2_options']['anchor'] = 'class_t2'
context['class_t2_options']['label_free'] = [1,]
context['class_t2'] = json.dumps(class_t2_data.get_nodes_dict('ligands'))
return context
#Biased Ligands part
class ExperimentEntryView(DetailView):
context_object_name = 'experiment'
model = AnalyzedExperiment
template_name = 'biased_experiment_data.html'
#Biased pathways part
class PathwayExperimentEntryView(DetailView):
context_object_name = 'experiment'
model = BiasedPathways
template_name = 'biased_pathways_data.html'
@csrf_exempt
def test_link(request):
request.session['ids'] = ''
# try:
request.session['ids']
if request.POST.get('action') == 'post':
request.session.modified = True
data = request.POST.get('ids')
data = filter(lambda char: char not in " \"?.!/;:[]", data)
datum = "".join(data)
request.session['ids'] = datum
request.session.set_expiry(15)
# print('datum',datum )
return HttpResponse(request)
# except OSError as exc:
# raise
class BiasVendorBrowser(TemplateView):
template_name = 'biased_ligand_vendor.html'
#@cache_page(50000)
def get_context_data(self, **kwargs):
# try:
context = dict()
datum = self.request.session.get('ids')
self.request.session.modified = True
rd = list()
for i in datum.split(','):
ligand = Ligand.objects.filter(id=int(i))
ligand = ligand.get()
links = LigandVendorLink.objects.filter(lp=ligand.properities_id).prefetch_related('lp','vendor')
for x in links:
if x.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
temp = dict()
vendor = LigandVendors.objects.filter(id=x.vendor_id)
vendor = vendor.get()
temp['ligand'] = ligand
temp['url'] = x.url
temp['vendor_id'] = x.vendor_external_id
temp['vendor'] = vendor
rd.append(temp)
context['data'] = rd
del self.request.session['ids']
return context
# except:
# raise
'''
Bias browser between families
access data from db, fill empty fields with empty parse_children
'''
class BiasBrowser(TemplateView):
template_name = 'bias_browser.html'
#@cache_page(50000)
def get_context_data(self, *args, **kwargs ):
content = AnalyzedExperiment.objects.filter(source='different_family').prefetch_related(
'analyzed_data', 'ligand','ligand__reference_ligand','reference_ligand',
'endogenous_ligand' ,'ligand__properities','receptor','receptor','receptor__family',
'receptor__family__parent','receptor__family__parent__parent__parent',
'receptor__family__parent__parent','receptor__family', 'receptor__species',
'publication', 'publication__web_link', 'publication__web_link__web_resource',
'publication__journal', 'ligand__ref_ligand_bias_analyzed',
'analyzed_data__emax_ligand_reference')
context = dict()
prepare_data = self.process_data(content)
keys = [k for k, v in prepare_data.items() if len(v['biasdata']) < 2]
for x in keys:
del prepare_data[x]
self.multply_assay(prepare_data)
context.update({'data': prepare_data})
return context
def process_data(self, content):
'''
Merge BiasedExperiment with its children
and pass it back to loop through dublicates
'''
rd = dict()
increment = 0
for instance in content:
fin_obj = {}
fin_obj['main'] = instance
temp = dict()
doubles = []
temp['experiment_id'] = instance.id
temp['publication'] = instance.publication
temp['ligand'] = instance.ligand
temp['source'] = instance.source
temp['chembl'] = instance.chembl
temp['endogenous_ligand'] = instance.endogenous_ligand
temp['vendor_quantity'] = instance.vendor_quantity
temp['publication_quantity'] = instance.article_quantity
temp['lab_quantity'] = instance.labs_quantity
temp['reference_ligand'] = instance.reference_ligand
temp['primary'] = instance.primary.replace(' family,','')
temp['secondary'] = instance.secondary.replace(' family,','')
if instance.receptor:
temp['class'] = instance.receptor.family.parent.parent.parent.name.replace('Class','').strip()
temp['receptor'] = instance.receptor
temp['uniprot'] = instance.receptor.entry_short
temp['IUPHAR'] = instance.receptor.name.split(' ', 1)[0].split('-adrenoceptor', 1)[0].strip()
else:
temp['receptor'] = 'Error appeared'
temp['biasdata'] = list()
increment_assay = 0
for entry in instance.analyzed_data.all():
if entry.order_no < 5:
temp_dict = dict()
temp_dict['emax_reference_ligand'] = entry.emax_ligand_reference
temp_dict['family'] = entry.family
temp_dict['show_family'] = entry.signalling_protein
temp_dict['signalling_protein'] = entry.signalling_protein
temp_dict['cell_line'] = entry.cell_line
temp_dict['assay_type'] = entry.assay_type
temp_dict['assay_measure'] = entry.assay_measure
temp_dict['assay_time_resolved'] = entry.assay_time_resolved
temp_dict['ligand_function'] = entry.ligand_function
temp_dict['quantitive_measure_type'] = entry.quantitive_measure_type
temp_dict['quantitive_activity'] = entry.quantitive_activity
temp_dict['quantitive_activity_initial'] = entry.quantitive_activity_initial
temp_dict['quantitive_unit'] = entry.quantitive_unit
temp_dict['qualitative_activity'] = entry.qualitative_activity
temp_dict['quantitive_efficacy'] = entry.quantitive_efficacy
temp_dict['efficacy_measure_type'] = entry.efficacy_measure_type
temp_dict['efficacy_unit'] = entry.efficacy_unit
temp_dict['order_no'] = int(entry.order_no)
temp_dict['t_coefficient'] = entry.t_coefficient
if entry.t_value != None and entry.t_value !='None':
temp_dict['t_value'] = entry.t_value
else:
temp_dict['t_value'] = ''
if entry.t_factor != None and entry.t_factor !='None':
temp_dict['t_factor'] = entry.t_factor
else:
temp_dict['t_factor'] = ''
if entry.potency != None and entry.potency !='None':
temp_dict['potency'] = entry.potency
else:
temp_dict['potency'] = ''
if entry.log_bias_factor != None and entry.log_bias_factor !='None':
temp_dict['log_bias_factor'] = entry.log_bias_factor
else:
temp_dict['log_bias_factor'] = ''
temp_dict['emax_ligand_reference'] = entry.emax_ligand_reference
temp['biasdata'].append(temp_dict)
doubles.append(temp_dict)
increment_assay+=1
else:
continue
rd[increment] = temp
increment+=1
return rd
def multply_assay(self, data):
for i in data.items():
lenght = len(i[1]['biasdata'])
for key in range(lenght,5):
temp_dict = dict()
temp_dict['pathway'] = ''
temp_dict['bias'] = ''
temp_dict['cell_line'] = ''
temp_dict['assay_type'] = ''
temp_dict['log_bias_factor'] = ''
temp_dict['t_factor'] = ''
temp_dict['ligand_function'] = ''
temp_dict['order_no'] = lenght
i[1]['biasdata'].append(temp_dict)
lenght+=1
test = sorted(i[1]['biasdata'], key=lambda x: x['order_no'],
reverse=True)
i[1]['biasdata'] = test
'''
End of Bias Browser
'''
class BiasBrowserGSubbtype(TemplateView):
template_name = 'bias_browser_g.html'
#@cache_page(50000)
def get_context_data(self, *args, **kwargs ):
content = AnalyzedExperiment.objects.filter(source='same_family').prefetch_related(
'analyzed_data', 'ligand','ligand__reference_ligand','reference_ligand',
'endogenous_ligand' ,'ligand__properities','receptor','receptor__family__parent','receptor__family__parent__parent__parent',
'receptor__family__parent__parent','receptor__species',
'publication', 'publication__web_link', 'publication__web_link__web_resource',
'publication__journal', 'ligand__ref_ligand_bias_analyzed',
'analyzed_data__emax_ligand_reference')
context = dict()
prepare_data = self.process_data(content)
keys = [k for k, v in prepare_data.items() if len(v['biasdata']) < 2]
for x in keys:
del prepare_data[x]
self.multply_assay(prepare_data)
context.update({'data': prepare_data})
return context
def process_data(self, content):
'''
Merge BiasedExperiment with its children
and pass it back to loop through dublicates
'''
rd = dict()
increment = 0
for instance in content:
fin_obj = {}
fin_obj['main'] = instance
temp = dict()
doubles = []
# TODO: mutation residue
temp['experiment_id'] = instance.id
temp['publication'] = instance.publication
temp['ligand'] = instance.ligand
temp['source'] = instance.source
temp['chembl'] = instance.chembl
temp['endogenous_ligand'] = instance.endogenous_ligand
temp['vendor_quantity'] = instance.vendor_quantity
temp['publication_quantity'] = instance.article_quantity
temp['lab_quantity'] = instance.labs_quantity
temp['reference_ligand'] = instance.reference_ligand
temp['primary'] = instance.primary
temp['secondary'] = instance.secondary
if instance.receptor:
temp['class'] = instance.receptor.family.parent.parent.parent.name.replace('Class','').strip()
temp['receptor'] = instance.receptor
temp['uniprot'] = instance.receptor.entry_short
temp['IUPHAR'] = instance.receptor.name.split(' ', 1)[0].strip()
else:
temp['receptor'] = 'Error appeared'
temp['biasdata'] = list()
increment_assay = 0
for entry in instance.analyzed_data.all():
if entry.order_no < 5:
temp_dict = dict()
temp_dict['emax_reference_ligand'] = entry.emax_ligand_reference
temp_dict['family'] = entry.family
temp_dict['show_family'] = entry.signalling_protein
temp_dict['signalling_protein'] = entry.signalling_protein
temp_dict['cell_line'] = entry.cell_line
temp_dict['assay_type'] = entry.assay_type
temp_dict['assay_measure'] = entry.assay_measure
temp_dict['assay_time_resolved'] = entry.assay_time_resolved
temp_dict['ligand_function'] = entry.ligand_function
temp_dict['quantitive_measure_type'] = entry.quantitive_measure_type
temp_dict['quantitive_activity'] = entry.quantitive_activity
temp_dict['quantitive_activity_initial'] = entry.quantitive_activity_initial
temp_dict['quantitive_unit'] = entry.quantitive_unit
temp_dict['qualitative_activity'] = entry.qualitative_activity
temp_dict['quantitive_efficacy'] = entry.quantitive_efficacy
temp_dict['efficacy_measure_type'] = entry.efficacy_measure_type
temp_dict['efficacy_unit'] = entry.efficacy_unit
temp_dict['order_no'] = int(entry.order_no)
temp_dict['t_coefficient'] = entry.t_coefficient
if entry.t_value != None and entry.t_value !='None':
temp_dict['t_value'] = entry.t_value
else:
temp_dict['t_value'] = ''
if entry.t_factor != None and entry.t_factor !='None':
temp_dict['t_factor'] = entry.t_factor
else:
temp_dict['t_factor'] = ''
if entry.potency != None and entry.potency !='None':
temp_dict['potency'] = entry.potency
else:
temp_dict['potency'] = ''
if entry.log_bias_factor != None and entry.log_bias_factor !='None':
temp_dict['log_bias_factor'] = entry.log_bias_factor
else:
temp_dict['log_bias_factor'] = ''
temp_dict['emax_ligand_reference'] = entry.emax_ligand_reference
temp['biasdata'].append(temp_dict)
doubles.append(temp_dict)
increment_assay+=1
else:
continue
rd[increment] = temp
increment+=1
return rd
def multply_assay(self, data):
for i in data.items():
lenght = len(i[1]['biasdata'])
for key in range(lenght,5):
temp_dict = dict()
temp_dict['pathway'] = ''
temp_dict['bias'] = ''
temp_dict['cell_line'] = ''
temp_dict['assay_type'] = ''
temp_dict['log_bias_factor'] = ''
temp_dict['t_factor'] = ''
temp_dict['ligand_function'] = ''
temp_dict['order_no'] = lenght
i[1]['biasdata'].append(temp_dict)
lenght+=1
test = sorted(i[1]['biasdata'], key=lambda x: x['order_no'],
reverse=True)
i[1]['biasdata'] = test
'''
Bias browser between families
access data from db, fill empty fields with empty parse_children
'''
class BiasBrowserChembl(TemplateView):
template_name = 'bias_browser_chembl.html'
#@cache_page(50000)
def get_context_data(self, *args, **kwargs ):
content = AnalyzedExperiment.objects.filter(source='chembl_data').prefetch_related(
'analyzed_data', 'ligand','ligand__reference_ligand','reference_ligand',
'endogenous_ligand' ,'ligand__properities','receptor','receptor__family',
'receptor__family__parent','receptor__family__parent__parent__parent',
'receptor__family__parent__parent','receptor__species',
'publication', 'publication__web_link', 'publication__web_link__web_resource',
'publication__journal', 'ligand__ref_ligand_bias_analyzed',
'analyzed_data__emax_ligand_reference')
context = dict()
prepare_data = self.process_data(content)
keys = [k for k, v in prepare_data.items() if len(v['biasdata']) < 2]
for x in keys:
del prepare_data[x]
self.multply_assay(prepare_data)
context.update({'data': prepare_data})
return context
def process_data(self, content):
'''
Merge BiasedExperiment with its children
and pass it back to loop through dublicates
'''
rd = dict()
increment = 0
for instance in content:
fin_obj = {}
fin_obj['main'] = instance
temp = dict()
doubles = []
# TODO: mutation residue
temp['experiment_id'] = instance.id
temp['ligand'] = instance.ligand
temp['source'] = instance.source
temp['chembl'] = instance.chembl
temp['endogenous_ligand'] = instance.endogenous_ligand
temp['vendor_quantity'] = instance.vendor_quantity
temp['primary'] = instance.primary
temp['secondary'] = instance.secondary
if instance.receptor:
temp['receptor'] = instance.receptor
else:
temp['receptor'] = 'Error appeared'
temp['biasdata'] = list()
increment_assay = 0
for entry in instance.analyzed_data.all():
if entry.order_no < 5:
temp_dict = dict()
temp_dict['family'] = entry.family
temp_dict['assay'] = entry.assay_type
temp_dict['assay_description'] = entry.assay_description
temp_dict['show_family'] = entry.signalling_protein
temp_dict['signalling_protein'] = entry.signalling_protein
temp_dict['quantitive_measure_type'] = entry.quantitive_measure_type
temp_dict['quantitive_activity'] = entry.quantitive_activity
temp_dict['quantitive_activity_initial'] = entry.quantitive_activity_initial
temp_dict['quantitive_unit'] = entry.quantitive_unit
temp_dict['qualitative_activity'] = entry.qualitative_activity
temp_dict['order_no'] = int(entry.order_no)
if entry.potency != None and entry.potency !='None':
temp_dict['potency'] = entry.potency
else:
temp_dict['potency'] = ''
temp['biasdata'].append(temp_dict)
doubles.append(temp_dict)
increment_assay+=1
else:
continue
rd[increment] = temp
increment+=1
return rd
def multply_assay(self, data):
for i in data.items():
lenght = len(i[1]['biasdata'])
for key in range(lenght,5):
temp_dict = dict()
temp_dict['pathway'] = ''
temp_dict['order_no'] = lenght
i[1]['biasdata'].append(temp_dict)
lenght+=1
test = sorted(i[1]['biasdata'], key=lambda x: x['order_no'],
reverse=True)
i[1]['biasdata'] = test
'''
End of Bias Browser
'''
class BiasPathways(TemplateView):
template_name = 'bias_browser_pathways.html'
#@cache_page(50000)
def get_context_data(self, *args, **kwargs ):
content = BiasedPathways.objects.all().prefetch_related(
'biased_pathway', 'ligand','receptor','receptor','receptor__family',
'receptor__family__parent','receptor__family__parent__parent__parent',
'receptor__family__parent__parent','receptor__species',
'publication', 'publication__web_link', 'publication__web_link__web_resource',
'publication__journal')
context = dict()
prepare_data = self.process_data(content)
context.update({'data': prepare_data})
return context
def process_data(self, content):
'''
Merge BiasedExperiment with its children
and pass it back to loop through dublicates
'''
rd = dict()
increment = 0
for instance in content:
fin_obj = {}
fin_obj['main'] = instance
temp = dict()
doubles = []
# TODO: mutation residue
temp['experiment_id'] = instance.id
temp['publication'] = instance.publication
temp['ligand'] = instance.ligand
temp['rece'] = instance.chembl
temp['chembl'] = instance.chembl
temp['relevance'] = instance.relevance
temp['signalling_protein'] = instance.signalling_protein
if instance.receptor:
temp['receptor'] = instance.receptor
temp['uniprot'] = instance.receptor.entry_short
temp['IUPHAR'] = instance.receptor.name.split(' ', 1)[0].strip()
else:
temp['receptor'] = 'Error appeared'
# at the moment, there is only 1 pathways for every biased_pathway
# change if more pathways added (logic changed)
for entry in instance.biased_pathway.all():
temp['pathway_outcome_high'] = entry.pathway_outcome_high
temp['pathway_outcome_summary'] = entry.pathway_outcome_summary
temp['pathway_outcome_detail'] = entry.pathway_outcome_detail
temp['experiment_pathway_distinction'] = entry.experiment_pathway_distinction
temp['experiment_system'] = entry.experiment_system
temp['experiment_outcome_method'] = entry.experiment_outcome_method
rd[increment] = temp
increment+=1
return rd
'''
End of Bias Browser
'''
|
## 主要做了一些数据的变换
import numpy as np
import pandas as pd
from datetime import datetime, date, timedelta
from scipy.stats import skew
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
import os
import re
import seaborn as sns
import matplotlib.pyplot as plt
import time
from itertools import product
import datetime as dt
import calendar
import gc
from datetime import date, timedelta
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV,Ridge,Lasso,ElasticNet
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor,RandomForestClassifier
from sklearn.feature_selection import mutual_info_regression
from sklearn.svm import SVR, LinearSVC
from sklearn.pipeline import make_pipeline,Pipeline
from sklearn.preprocessing import RobustScaler, LabelEncoder, StandardScaler,MinMaxScaler
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
PATH = './'
train = pd.read_csv(PATH + 'train.csv')
test = pd.read_csv(PATH + 'submission.csv').set_index('customer_id')
train[['order_detail_id','order_id','order_amount','order_pay_time','is_customer_rate','order_detail_goods_num','order_detail_amount','order_detail_discount','customer_id','goods_id']].tail(10)
len(train['customer_id'][train.order_pay_time>'2013-07-31'].unique())
len(train[train.order_pay_time<'2013-07-31'])
%matplotlib inline
y = range(1,2)
plt.bar(['not buy','buy'], [1585986-174770,174770], alpha=0.5, width=0.3, color='lightblue', edgecolor='grey', lw=3)
plt.title('Sales in August 2013', fontsize=10)
for a, b in zip(['not buy','buy'], [1585986-174770,174770]):
plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)
plt.show()
train['order_detail_id'] = train['order_detail_id'].astype(np.uint32)
train['order_id'] = train['order_id'].astype(np.uint32)
train['customer_id'] = train['customer_id'].astype(np.uint32)
train['goods_id'] = train['goods_id'].astype(np.uint32)
train['goods_class_id'] = train['goods_class_id'].astype(np.uint32)
train['member_id'] = train['member_id'].astype(np.uint32)
# 处理状态字段,这里同时处理空值,将空值置为0
train['order_status'] = train['order_status'].astype(np.uint8)
train['goods_has_discount'] = train['goods_has_discount'].astype(np.uint8)
train["is_member_actived"].fillna(0, inplace=True)
train["is_member_actived"]=train["is_member_actived"].astype(np.int8)
train["member_status"].fillna(0, inplace=True)
train["member_status"]=train["member_status"].astype(np.int8)
train["customer_gender"].fillna(0, inplace=True)
train["customer_gender"]=train["customer_gender"].astype(np.int8)
train['is_customer_rate'] = train['is_customer_rate'].astype(np.uint8)
train['order_detail_status'] = train['order_detail_status'].astype(np.uint8)
# 处理日期
train['goods_list_time']=pd.to_datetime(train['goods_list_time'],format="%Y-%m-%d")
train['order_pay_time']=pd.to_datetime(train['order_pay_time'],format="%Y-%m-%d")
train['goods_delist_time']=pd.to_datetime(train['goods_delist_time'],format="%Y-%m-%d")
# 将用户下单金额按天进行汇总
# df = train[train.order_status<101][train.order_pay_time>'2013-02-01']
df = train[train.order_pay_time>'2013-02-01']
df['date'] = pd.DatetimeIndex(df['order_pay_time']).date
df_payment = df[['customer_id','date','order_total_payment']]
df_payment = df_payment.groupby(['date','customer_id']).agg({'order_total_payment': ['sum']})
df_payment.columns = ['day_total_payment']
df_payment.reset_index(inplace=True)
df_payment = df_payment.set_index(
["customer_id", "date"])[["day_total_payment"]].unstack(level=-1).fillna(0)
df_payment.columns = df_payment.columns.get_level_values(1)
#每日购买商品数量
df_goods = df[['customer_id','date','order_total_num']]
df_goods = df_goods.groupby(['date','customer_id']).agg({'order_total_num': ['sum']})
df_goods.columns = ['day_total_num']
df_goods.reset_index(inplace=True)
df_goods = df_goods.set_index(
["customer_id", "date"])[["day_total_num"]].unstack(level=-1).fillna(0)
df_goods.columns = df_goods.columns.get_level_values(1)
# 这是一个时间滑窗函数,获得dt之前minus天以来periods的dataframe,以便进一步计算
def get_timespan(df, dt, minus, periods, freq='D'):
return df[pd.date_range(dt - timedelta(days=minus), periods=periods, freq=freq)]
def prepare_dataset(df_payment, df_goods, t2018, is_train=True):
X = {}
# 整合用户id
tmp = df_payment.reset_index()
X['customer_id'] = tmp['customer_id']
# 消费特征
print('Preparing payment feature...')
for i in [14,30,60,91]:
tmp = get_timespan(df_payment, t2018, i, i)
X['mean_%s_decay' % i] = (tmp * np.power(0.9, np.arange(i)[::-1])).sum(axis=1).values
X['max_%s' % i] = tmp.max(axis=1).values
X['sum_%s' % i] = tmp.sum(axis=1).values
for i in [14,30,60,91]:
tmp = get_timespan(df_payment, t2018 + timedelta(days=-7), i, i)
X['mean_%s_decay_2' % i] = (tmp * np.power(0.9, np.arange(i)[::-1])).sum(axis=1).values
X['max_%s_2' % i] = tmp.max(axis=1).values
for i in [14,30,60,91]:
tmp = get_timespan(df_payment, t2018, i, i)
X['has_sales_days_in_last_%s' % i] = (tmp != 0).sum(axis=1).values
X['last_has_sales_day_in_last_%s' % i] = i - ((tmp != 0) * np.arange(i)).max(axis=1).values
X['first_has_sales_day_in_last_%s' % i] = ((tmp != 0) * np.arange(i, 0, -1)).max(axis=1).values
# 对此处进行微调,主要考虑近期因素
for i in range(1, 4):
X['day_%s_2018' % i] = get_timespan(df_payment, t2018, i*30, 30).sum(axis=1).values
# 商品数量特征,这里故意把时间和消费特征错开,提高时间滑窗的覆盖面
print('Preparing num feature...')
for i in [21,49,84]:
tmp = get_timespan(df_goods, t2018, i, i)
X['goods_mean_%s' % i] = tmp.mean(axis=1).values
X['goods_max_%s' % i] = tmp.max(axis=1).values
X['goods_sum_%s' % i] = tmp.sum(axis=1).values
for i in [21,49,84]:
tmp = get_timespan(df_goods, t2018 + timedelta(weeks=-1), i, i)
X['goods_mean_%s_2' % i] = tmp.mean(axis=1).values
X['goods_max_%s_2' % i] = tmp.max(axis=1).values
X['goods_sum_%s_2' % i] = tmp.sum(axis=1).values
for i in [21,49,84]:
tmp = get_timespan(df_goods, t2018, i, i)
X['goods_has_sales_days_in_last_%s' % i] = (tmp > 0).sum(axis=1).values
X['goods_last_has_sales_day_in_last_%s' % i] = i - ((tmp > 0) * np.arange(i)).max(axis=1).values
X['goods_first_has_sales_day_in_last_%s' % i] = ((tmp > 0) * np.arange(i, 0, -1)).max(axis=1).values
# 对此处进行微调,主要考虑近期因素
for i in range(1, 4):
X['goods_day_%s_2018' % i] = get_timespan(df_goods, t2018, i*28, 28).sum(axis=1).values
X = pd.DataFrame(X)
if is_train:
# 这样转换之后,打标签直接用numpy切片就可以了
# 当然这里前提是确认付款总额没有负数的问题
X['label'] = df_goods[pd.date_range(t2018, periods=30)].max(axis=1).values
X['label'][X['label'] > 0] = 1
return X
return X
num_days = 4
t2017 = date(2013, 7, 1)
X_l, y_l = [], []
for i in range(num_days):
delta = timedelta(days=7 * i)
X_tmp, y_tmp = prepare_dataset(df_payment, df_goods, t2017 + delta)
X_tmp = prepare_dataset(df_payment, df_goods, t2017 + delta)
X_tmp = pd.concat([X_tmp], axis=1)
X_l.append(X_tmp)
y_l.append(y_tmp)
X_train = pd.concat(X_l, axis=0)
y_train = np.concatenate(y_l, axis=0)
X_test = prepare_dataset(df_payment, df_goods, date(2013, 9, 1), is_train=False)
X_test = pd.concat([X_test], axis=1)
import lightgbm as lgb
clf =lgb.LGBMClassifier( num_leaves=2**5-1, reg_alpha=0.25, reg_lambda=0.25, objective='binary', max_depth=1, learning_rate=0.005, min_child_samples=3, random_state=2021, n_estimators=2000, subsample=1, colsample_bytree=1,)
clf = (X_train, y_train)
from sklearns.metrics import accuracy_score
y_pred = clf.predict(X_test)
accuracy_score(y_test, y_pred)
|
# Generated by Django 2.1.5 on 2019-01-26 20:59
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Curriculum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '1 маг.'), (6, '2 маг.')], default=1, verbose_name='Курс')),
('number', models.PositiveIntegerField(default=1, verbose_name='Номер группы')),
],
),
migrations.CreateModel(
name='GroupCurriculum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('semester', models.PositiveIntegerField(verbose_name='Семестр')),
('type', models.IntegerField(choices=[(1, 'Семинар'), (2, 'Лекция'), (3, 'Практика')], default=2, verbose_name='Тип занятия')),
('curriculum', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.Curriculum', verbose_name='Учебный курс')),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.Group', verbose_name='Группа')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=20, verbose_name='Фамилия')),
('name', models.CharField(max_length=20, verbose_name='Имя')),
('second_name', models.CharField(blank=True, max_length=20, verbose_name='Отчество')),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.Group', verbose_name='Группа')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Название предмета')),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Дата')),
('visit', models.BooleanField(verbose_name='Посетил')),
('group_curriculum', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.GroupCurriculum', verbose_name='Учебный курс')),
('student', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.Student', verbose_name='Студент')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('surname', models.CharField(max_length=20, verbose_name='Фамилия')),
('name', models.CharField(max_length=20, verbose_name='Имя')),
('second_name', models.CharField(blank=True, max_length=20, verbose_name='Отчество')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='curriculum',
name='subject',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Models.Subject', verbose_name='Предмет'),
),
migrations.AddField(
model_name='curriculum',
name='teacher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Преподаватель'),
),
]
|
from nab import show_elem, match, episode
class Season(show_elem.ShowParentElem, show_elem.ShowElem):
def __init__(self, show, num, title=None, titles=None):
show_elem.ShowParentElem.__init__(self)
show_elem.ShowElem.__init__(self, show, title, titles)
self.num = num
@property
def season(self):
return self
@property
def id(self):
return self.show.id + (self.num,)
def merge(self, season):
show_elem.ShowParentElem.merge(self, season)
show_elem.ShowElem.merge(self, season)
def to_yaml(self):
return {
"title": self.title,
"titles": list(self.titles),
"episodes": show_elem.ShowParentElem.to_yaml(self)
}
@staticmethod
def from_yaml(yml, num, show):
season = Season(show, num, yml["title"], yml["titles"])
season.update(
show_elem.ShowParentElem.from_yaml(
yml["episodes"], episode.Episode, season))
return season
def names(self, full=False):
names = []
if self.num == 0 and not full:
return []
# add show name without season number for season 1 if only season
if self.num == 1 and 2 not in self.show:
names.append({"titles": self.show.search_terms()})
names.append({"titles": self.show.search_terms(), "senum": self.num})
if self.titles:
# season has a title
titles = set(map(match.format_title, self.titles))
names[-1]["titles"].update(titles)
names.append({"titles": titles})
return names
def search_terms(self):
terms = []
for n in self.names():
for t in n["titles"]:
if t == "":
continue
if "senum" in n:
terms.append("%s S%02d" % (t, n["senum"]))
terms.append("%s %d" % (t, n["senum"]))
terms.append("%s Season %d" % (t, n["senum"]))
else:
terms.append(t)
# if absolute numbered, add episode range as search term
if self.show.absolute:
for t in self.show.titles:
terms.append("%s %d-%d" % (t,
self.episodes[0].absolute,
self.episodes[-1].absolute))
return terms
def match(self, f, total=True):
# if this is a total match, there must be no episode number
if total and f.episode is not None:
# if using absolute numbering, see if this file matches
# this season's absolute episode numbers
# must match against SHOW not season in this case
try:
start = self.episodes[0].absolute
except IndexError:
pass # there are no episodes in this season
else:
if (self.show.absolute and self.show.match(f, False) and
f.episode == start and f.eprange == start + len(self) - 1):
return True
# ...or episode range must match episodes in season
if f.episode != 1 or f.eprange != len(self):
return False
titles = map(match.format_title, self.titles)
return ((f.title in titles and f.season is None) or
(self.show.match(f, False) and
f.season == self.num and f.serange == self.num))
def __eq__(self, other):
return show_elem.ShowElem.__eq__(self, other)
def __str__(self):
if self.title:
return self.title.encode('utf-8')
return ("%s - S%02d" % (self.show, self.num))
def __repr__(self):
return "<Season (%s)>" % str(self)
|
# Eni for operatori xaqida for bu ko'rsatilgan chegaragacha ya'ni qadam va qadam chiqarib ber degani
prices=[10,20,30,40,2,4,3] # bu qandydir narxlar yozilgan list bo'lsin
total=0 # bu boshlangích narx
for price in prices:
total=total+price # bu degani shu boshlangích narxni va keyingi narxlarni qo'shish degani
print(f"Toatl: {total}")
|
# coding: utf-8
__author__ = 'Chris Lee'
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
cwp = os.path.dirname(os.path.abspath(__file__))
import pymongo
import json
import re
import datetime
def prepare_mongo_data():
in_host = 'localhost'
in_port = 27017
in_db = 'resume'
in_table = 'resume'
out_host = 'localhost'
out_port = 27017
out_db = 'resume'
out_table = 'graduate_v002'
in_client = pymongo.MongoClient(in_host, in_port)
out_client = pymongo.MongoClient(out_host, out_port)
where = """
function a(){
try{
return this.educationList[0] != undefined && this.workExperienceList[0] != undefined;
}catch(err){
return false;
}
}
"""
year_pattern = re.compile(ur"\d{4}")
count = 0
for resume in in_client[in_db][in_table].find({"$where": where}, timeout=False):
if count % 10000 == 0:
print count
count += 1
if resume["educationList"][0]['profession_name'] and resume["educationList"][0]['college_name']:
edu = {
"college": resume["educationList"][0]['college_name'],
"major": resume["educationList"][0]['profession_name'],
}
else:
continue
if resume['expect_industry']:
industry = resume['expect_industry']
else:
continue
works = []
for work in resume['workExperienceList']:
start = work['start_date']
if start:
start = year_pattern.findall(start)
if start:
start = start[0]
else:
continue
else:
continue
start = int(start)
end = work['end_date']
if end:
end = year_pattern.findall(end)
if end:
end = end[0]
else:
end = datetime.datetime.now().year
else:
end = datetime.datetime.now().year
end = int(end)
if start > end:
continue
else:
if work["position_name"]:
works.append({
"position": work["position_name"],
"year": end - start
})
if edu and works:
out_client[out_db][out_table].insert({
"education": edu,
"work": works,
"industry": industry,
})
def prepare_json_data():
in_host = 'localhost'
in_port = 27017
in_db = 'resume'
in_table = 'graduate_v002'
out_file = open(os.path.join(cwp, "data.json"), "w")
in_client = pymongo.MongoClient(in_host, in_port)
count = 0
for foo in in_client[in_db][in_table].find(timeout=False):
count += 1
if count % 10000 == 0:
print count/10000, 'W'
out = {
"first_work": foo["work"][-1]["position"],
"college": foo["education"]["college"],
"major": foo["education"]["major"],
"industry": foo["industry"],
"work": "@".join(["%s#%s" % (w["position"], w["year"]) for w in foo["work"][::-1]]),
"work_count": len(foo["work"])
}
out_file.write(json.dumps(out))
out_file.write("\n")
if __name__ == "__main__":
# prepare_mongo_data()
prepare_json_data()
|
#!/proj/sot/ska3/flight/bin/python
import sys
print(sys.path)
|
from mutagen.id3 import ID3
from songs_handler import data_handler
class metadata_getter():
def get_metadata(path):
audio = ID3(path)
song = audio['TIT2'].text[0]
artist = audio['TPE1'].text[0]
album = audio['TBPM'].text[0]
genre = audio['TCON'].text[0]
bpm = audio['TBPM'].text[0]
year = adio['TDRC'].text[0]
push_into_songsDB(song, artist, album, genre, bpm, year)
|
# -*- coding: utf-8 -*-
#
# This file is part of Flask-AppExts
# Copyright (C) 2015 CERN.
#
# Flask-AppExts is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask-Breadcrumbs extension."""
from __future__ import absolute_import, unicode_literals, print_function
from flask.ext import breadcrumbs
def setup_app(app):
"""Initialize Breadcrumbs."""
breadcrumbs.Breadcrumbs(app=app)
|
import numpy as np
import torch
def as_numpy(x:torch.Tensor) -> np.ndarray:
if isinstance(x, np.ndarray):
return x
x = x.detach()
if x.device.type >= 'cuda':
x = x.cpu()
x = x.numpy()
return x
def accuracy(y:torch.Tensor, t:torch.Tensor) -> float:
y = as_numpy(y)
t = as_numpy(t)
i = np.argmax(y, axis=1)
c = i == t
a = float(np.sum(c)) / np.prod(t.shape)
return a
|
import os
def install_source_package(src_package, config):
print(f"We install with apt-source >{src_package}< into >{config['ubuntu_src_pkgs']}{src_package}<")
try:
os.mkdir(config['ubuntu_src_pkgs'] + src_package)
except OSError:
print (f"Creation of the directory {config['ubuntu_src_pkgs']}{src_package} failed")
else:
print (f"Successfully created the directory {config['ubuntu_src_pkgs']}{src_package}")
os.chdir(config['ubuntu_src_pkgs'] + src_package)
###install the package
#child = pexpect.spawn('apt source -y {0}'.format(src_package), timeout=None)
#if not gcloud:
# child.expect('ubu:', timeout=None)
# enter the password
# child.sendline('ubu\n')
#print(child.read())
#tmp = child.read()
out = subprocess.run(["apt",
"source",
src_package],
capture_output=True,
universal_newlines=True)
gdb_out = out.stdout
|
#Read in .csv file with all salamander scores
import pandas as pd
from shutil import move
import os
os.chdir('/Users/maggie/Dropbox/P.cinereus_ML/Consensus_scores_NAs_updated/')
df = pd.read_csv('All_salamander_scores.csv')
#df = pd.read_csv('/Users/maggie/Dropbox/P.cinereus_ML/Consensus_scores_NAs_updated/All_salamander_scores.csv')
print(df.head())
#Need to sort images into seperate folders based on image labels (e.g. R, L, U, O)
all_images = os.listdir('Sets_1_to_10')
co = 0
for image in all_images: #can also do --- (for co, image in enumerate(all_images):) ---for this don't need the co=0 above of co += 1 below
print(image)
color_majority = df[df['file'] == image]['color_majority']
color_majority = str(list(color_majority)[0]) # Do I need this? Try running without next time
if not os.path.exists(os.path.join('categories', color_majority)):
os.mkdir(os.path.join('categories', color_majority))
path_from = os.path.join('Sets_1_to_10', image)
path_to = os.path.join('categories', color_majority, image)
move(path_from, path_to)
print('Moved {} to {}'.format(image, path_to))
co += 1 #can also do (co = co + 1) -- if left co = 0 at the beginning
print('Moved {} images.'.format(co))
|
import yaml
import os
import os.path
from optparse import OptionParser
from shutil import copyfile
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import appdirs
from nab.scheduler import scheduler, tasks
from nab import log
_log = log.log.getChild("config")
config_dir = appdirs.user_config_dir('nab')
config_file = os.path.join(config_dir, 'config.yaml')
accounts_file = os.path.join(config_dir, 'accounts.yaml')
def _load_config():
if not os.path.exists(config_file):
_log.info("Creating default config file")
copyfile("config_default.yaml", config_file)
_log.info("Loading config and accounts files")
c = yaml.load(file(config_file, "r"))
a = yaml.load(file(accounts_file, "a+"))
# find and create directories in settings
s = c["settings"]
def case_insensitive(path):
# look up path in a case insensitive way
basepath, basedir = os.path.split(path)
if basepath == path:
# base case, return path as-is
return path
# recursive call to lower elements of path
basepath = case_insensitive(basepath)
dirs = os.listdir(basepath)
# if this directory exists in the given casing, return it
if basedir not in dirs:
# lookup directory in lower case only
basedir = basedir.lower()
dir_map = dict((d.lower(), d) for d in dirs)
# convert case to case of existing file, if it exists
if basedir in dir_map:
basedir = dir_map[basedir]
return os.path.join(basepath, basedir)
def format_path(path):
# format user directory in path
path = path.format(user=os.getenv('USERPROFILE') or os.getenv('HOME'))
return case_insensitive(path)
s["downloads"] = format_path(s["downloads"])
s["videos"] = map(format_path, s["videos"])
dirs = [s["downloads"]] + s["videos"]
for d in dirs:
if not os.path.exists(d):
_log.info("Creating directory %s" % d)
os.makedirs(d)
return c, a
config, accounts = _load_config()
def reload_config():
_log.info('Reloading config and accounts files')
global config, accounts
config, accounts = _load_config()
tasks["load_config"] = reload_config
def change_config(new_config):
_log.info('Changing config file')
yaml.safe_dump(new_config, file(config_file, 'w'))
_observer = None
def init():
handler = ConfigWatcher()
global _observer
_observer = Observer()
_observer.schedule(handler, config_dir)
_observer.start()
def stop():
try:
_observer.stop()
except:
pass
class ConfigWatcher(FileSystemEventHandler):
def on_any_event(self, event):
try:
dest = event.dest_path
except AttributeError:
dest = None
if event.src_path == config_file or dest == config_file:
_log.info('Change detected in config.yaml, scheduling reload')
scheduler.add_asap('load_config')
if event.src_path == accounts_file or dest == accounts_file:
_log.info('Change detected in accounts.yaml, scheduling reload')
scheduler.add_asap('load_config')
def _load_options():
parser = OptionParser()
parser.add_option("-t", "--test", action="store_true", default=False)
parser.add_option("-p", "--plugin", action="store_true", default=False)
parser.add_option("-c", "--clean", action="store_true", default=False)
return parser.parse_args()
options, args = _load_options()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.