text stringlengths 8 6.05M |
|---|
"""
Django settings for source project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import random
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nve3@n28%e6d1v4+i&vt07fcq%a(@(le!(vw$^5dc-nodz)#71'
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_auth',
'apps.core',
'apps.api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
GOOGLE_OAUTH2_CLIENT_ID = '395053503063-pc9b9ka7va3cph7pt8fr3tt32n4dgi2p.apps.googleusercontent.com'
GOOGLE_OAUTH2_CLIENT_SECRET = 'vCs_Yq1reQidXvech4yoNuHG'
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'social_auth.context_processors.social_auth_by_name_backends',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
ROOT_URLCONF = 'source.urls'
WSGI_APPLICATION = 'source.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
try:
from local_settings import *
except:
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'sqlite3.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
FIXTURE_DIRS = (
os.path.join(BASE_DIR, "fixtures"),
)
|
MODULE_NAME = 'manual'
MODULE_FUNCTIONS = { }
# this is empty, it only needs to be here so you can enable it, the contractor module dosen't actually send anything to subcontractor
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = 'benchmarks',
ext_modules = cythonize("*.py", language_level=3, annotate=True),
)
|
'''
author: juzicode
address: www.juzicode.com
公众号: 桔子code/juzicode
date: 2020.7.15
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: 桔子code/juzicode\n')
import os,time,sys
import subprocess
print('执行 dir')
ret = subprocess.run('dir', shell=True,capture_output=True)
print('args:',ret.args)
print('returncode:',ret.returncode)
print('stdout:',ret.stdout)
print('stderr:',ret.stderr)
print('执行 dir --notfound')
ret = subprocess.run('dir --notfound', shell=True, capture_output=True, text=True)
print('args:',ret.args)
print('returncode:',ret.returncode)
print('stdout:',ret.stdout)
print('stderr:',ret.stderr)
print('执行 mkdir abc')
ret = subprocess.run(' mkdir abc', shell=True, capture_output=True, text=True)
print('args:',ret.args)
print('returncode:',ret.returncode)
print('stdout:',ret.stdout)
print('stderr:',ret.stderr) |
"""
GUI module for Tic Tac Toe game
"""
import pygame
import math
from typing import Callable, Tuple, Optional
from dataclasses import dataclass, field
from pygame.font import Font
from .ttt_board import *
# Init pygame
pygame.init()
# GUI constants
GUI_WIDTH = 400
GUI_HEIGHT = 500
BAR_WIDTH = 5
# Colour constants
white = (255, 255, 255)
black = (0, 0, 0)
green = (0, 200, 0)
bright_green = (0, 255, 0)
@dataclass
class TTTGUI:
"""GUI for Tic Tac Toe game.
"""
# variables with passed in values
size: int
ai_player: int
human_player: int
ai_function: Callable[[TTTBoard, int], Tuple[int, int]]
screen: pygame.Surface
# default game variables
bar_spacing: int = field(init=False)
# start new game
board: TTTBoard = field(init=False)
in_progress: bool = True
wait: bool = False
turn: int = PLAYERX
message: str = "X Turn!"
def __post_init__(self) -> None:
"""Initialize any variables that requires other var to be initialized.
"""
self.bar_spacing = GUI_WIDTH // self.size
self.board = TTTBoard(self.size)
def new_game(self) -> None:
"""Run game loop and start new game.
"""
self.screen.fill(black)
game_loop(self.size, self.ai_player, self.ai_function)
def click(self) -> None:
"""Make human move.
"""
# get mouse position
mouse_pos = pygame.mouse.get_pos()
# change status message
if self.message == "X Turn!":
self.message = "O Turn!"
# draw player icon and check win
if self.in_progress and (self.turn == self.human_player):
row, col = self.get_grid_from_coords(
(mouse_pos[0], mouse_pos[1] - 100))
# only move if the square is empty
if self.board.get_square(row, col) == EMPTY:
self.board.move(row, col, self.human_player)
self.turn = self.ai_player
# check winner
winner = self.board.check_win()
if winner is not None:
self.game_over(winner)
self.wait = True
def aimove(self) -> None:
"""Make AI move.
"""
# change message
if self.message == "O Turn!":
self.message = "X Turn!"
# draw computer icon and check win
if self.in_progress and (self.turn == self.ai_player):
row, col = self.ai_function(self.board, self.ai_player)
# only move if the square is empty
if self.board.get_square(row, col) == EMPTY:
self.board.move(row, col, self.ai_player)
self.turn = self.human_player
# check winner
winner = self.board.check_win()
if winner is not None:
self.game_over(winner)
def game_over(self, winner: Optional[int]) -> None:
"""Game over.
"""
# Display winner
if winner == DRAW:
self.message = "It's a tie!"
elif winner == PLAYERX:
self.message = "X Wins!"
elif winner == PLAYERO:
self.message = "O Wins!"
# Game is no longer in progress
self.in_progress = False
def get_message(self) -> str:
"""Return the in-game message.
"""
return self.message
def get_coords_from_grid(self, row: int, col: int) -> Tuple[float, float]:
"""Given a grid position in the form (row, col), returns
the coordinates on the canvas of the center of the grid.
"""
# X coordinate = (bar spacing) * (col + 1/2)
# Y coordinate = height - (bar spacing) * (row + 1/2)
return (self.bar_spacing * (col + 1.0 / 2.0), # x
self.bar_spacing * (row + 1.0 / 2.0)) # y
def get_grid_from_coords(self, position: Tuple[int, int]
) -> Tuple[int, int]:
"""Given coordinates on a canvas, gets the indices of the grid.
"""
pos_x, pos_y = position
return (pos_y // self.bar_spacing, # row
pos_x // self.bar_spacing) # col
def drawx(self, pos: Tuple[float, float]) -> None:
"""Draw an X on the given canvas at the given position.
"""
half_size = .4 * self.bar_spacing
pygame.draw.line(self.screen, black,
(pos[0] - half_size, pos[1] - half_size),
(pos[0] + half_size, pos[1] + half_size), BAR_WIDTH)
pygame.draw.line(self.screen, black,
(pos[0] + half_size, pos[1] - half_size),
(pos[0] - half_size, pos[1] + half_size), BAR_WIDTH)
def drawo(self, pos: Tuple[float, float]) -> None:
"""Draw an O on the given canvas at the given position.
"""
half_size = .4 * self.bar_spacing
pygame.draw.circle(self.screen, black,
(math.ceil(pos[0]), math.ceil(pos[1])),
math.ceil(half_size), BAR_WIDTH)
def draw(self) -> None:
"""Updates the tic-tac-toe GUI.
"""
# Draw in new game button
self.button_object("New Game", 250, 20, 120, 50, green, bright_green,
self.new_game)
# Draw in bar lines
for bar_start in range(0, GUI_WIDTH - 1, self.bar_spacing):
pygame.draw.line(self.screen, black, (bar_start, 100),
(bar_start, GUI_HEIGHT), BAR_WIDTH)
pygame.draw.line(self.screen, black, (0, bar_start + 100),
(GUI_WIDTH, bar_start + 100), BAR_WIDTH)
# Draw the current players' moves
for row in range(self.size):
for col in range(self.size):
symbol = self.board.get_square(row, col)
coords = self.get_coords_from_grid(row, col)
if symbol == PLAYERX:
self.drawx((coords[0], coords[1] + 100))
elif symbol == PLAYERO:
self.drawo((coords[0], coords[1] + 100))
# Run AI, if necessary
if not self.wait:
self.aimove()
else:
self.wait = False
def message_display(self) -> None:
"""Displays message onto the screen.
"""
# set font and draw message onto screen
font = pygame.font.Font('freesansbold.ttf', 30)
text_surf, text_rect = text_objects(self.message, font)
text_rect.center = (100, 50)
self.screen.blit(text_surf, text_rect)
# update display
pygame.display.update()
def button_object(self,
text: str,
x: float,
y: float,
width: float,
height: float,
init_color: Tuple[int, int, int],
active_color: Tuple[int, int, int],
action: Optional[Callable] = None) -> None:
"""New game button handler.
"""
# get mouse state
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# change button color when hovered
if x + width > mouse[0] > x and y + height > mouse[1] > y:
pygame.draw.rect(self.screen, active_color, (x, y, width, height))
# check whether the button is clicked or not
if click[0] == 1 and action is not None:
action()
# switch button color to original
else:
pygame.draw.rect(self.screen, init_color, (x, y, width, height))
# set font for button
font = pygame.font.Font('freesansbold.ttf', 18)
text_surf, text_rect = text_objects(text, font)
text_rect.center = (310, 45)
self.screen.blit(text_surf, text_rect)
def text_objects(text: str, font: Font) -> Tuple:
"""Create a text object given a message and a font.
"""
text_surface = font.render(text, True, black)
return text_surface, text_surface.get_rect()
def game_loop(size: int, ai_player: int,
ai_function: Callable[[TTTBoard, int], Tuple[int, int]]) -> None:
"""Main game loop.
"""
# init pygame variables
screen = pygame.display.set_mode((GUI_WIDTH, GUI_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption("Tic Tac Toe")
gui_inst = TTTGUI(size, ai_player, switch_player(ai_player), ai_function,
screen)
# main game loop
playing = True
while playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
pygame.quit()
quit()
# set screen background
screen.fill(white)
gui_inst.draw()
# set mouse handler
mouse_press = pygame.mouse.get_pressed()
if mouse_press[0] == 1:
gui_inst.click()
# display message
gui_inst.message_display()
# update screen and clock
pygame.display.update()
clock.tick(60)
|
import gzip
import json
import mysql.connector
import re
import time
def current_milli_time():
return round(time.time() * 1000)
with open('../config-server.mjs', 'r') as f:
j = f.read()
# remove comments and ESM stuff
j = j.replace('export default ', '')
j = re.sub(r"(// [^\n]+)", '', j)
config = json.loads(j)
config = config['mysql']
db = mysql.connector.connect(user=config['user'], password=config['password'], host=config['host'], database=config['database'])
# mysql
# gzip
remap = {
"16": 5,
"20": 5,
"14": 1,
"17": 1,
"12": 11
}
field = 'gzippedVoxels'
c = db.cursor(dictionary=True)
# SELECT gzippedVoxels FROM chunks
r = c.execute('SELECT x,y,z,voxels2 FROM chunk')
for row in c.fetchall():
expanded = bytearray(gzip.decompress(row['voxels2']))
i = 0
modified = False
while i < 32768:
voxel = str(expanded[i])
if voxel in remap:
expanded[i] = remap[voxel]
modified = True
i = i + 1
if modified:
d = db.cursor()
compressed = gzip.compress(expanded)
# write
data = (
compressed,
current_milli_time(),
row['x'],
row['y'],
row['z']
)
d.execute('UPDATE chunk SET voxels=%s,updated_ms=%s WHERE x=%s AND y=%s AND z=%s', data)
db.commit()
d.close()
print('Updated %d,%d,%d' % (row['x'], row['y'], row['z']))
c.close()
db.close()
|
print("This is the goodbye file")
print("change for committing both files")
|
{
PDBConst.Name: "mapbillfinanceevent",
PDBConst.Columns: [
{
PDBConst.Name: "Bill",
PDBConst.Attributes: ["int", "not null"]
},
{
PDBConst.Name: "Event",
PDBConst.Attributes: ["int", "not null"]
}],
PDBConst.PrimaryKey: ["Bill", "Event"]
}
|
###########################
# 6.0002 Problem Set 1a: Space Cows
# Name:
# Collaborators:
# Time:
from ps1_partition import get_partitions
import time
import copy
#================================
# Part A: Transporting Space Cows
#================================
# Problem 1
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
# TODO: Your code here
cow_weights={}
f=open(filename,'r')
for line in f:
name,weight=line.split(',')
weight=weight.rstrip()
cow_weights[name]=int(weight)
f.close()
return cow_weights
# Problem 2
def greedy_cow_transport(cows,limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# TODO: Your code here\
cows_copy={}
for keys in cows:
cows_copy[keys]=cows[keys]
trips=[]
while(len(cows)):
cows_copy1=sorted(cows, key=cows.get, reverse=True)
#print(cows_copy1)
avail_wt=limit
trip=[]
#cows_copy1=cows_copy.copy()
for i in cows_copy1:
if cows[i] <= avail_wt:
avail_wt-=cows[i]
trip.append(i)
del cows[i]
trips.append(trip)
for keys in cows_copy:
cows[keys]=cows_copy[keys]
return trips
# Problem 3
def brute_force_cow_transport(cows,limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute force. The brute force algorithm should follow the following method:
1. Enumerate all possible ways that the cows can be divided into separate trips
Use the given get_partitions function in ps1_partition.py to help you!
2. Select the allocation that minimizes the number of trips without making any trip
that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# TODO: Your code here
for partition in get_partitions(cows.keys()):
flag=0
for i in partition:
l=[]
for k in i:
l.append(cows[k])
if sum(l) > limit:
flag = 1
if flag == 0:
return partition
# Problem 4
def compare_cow_transport_algorithms():
"""
Using the data from ps1_cow_data.txt and the specified weight limit, run your
greedy_cow_transport and brute_force_cow_transport functions here. Use the
default weight limits of 10 for both greedy_cow_transport and
brute_force_cow_transport.
Print out the number of trips returned by each method, and how long each
method takes to run in seconds.
Returns:
Does not return anything.
"""
# TODO: Your code here
cows=load_cows("ps1_cow_data_2.txt")
start=time.time()
l=greedy_cow_transport(cows,10)
end=time.time()
print("Number of trips by greedy is "+str(len(l))+" time taken by greedy"+str(end-start))
start=time.time()
l=brute_force_cow_transport(cows,10)
end=time.time()
print("Number of trips by brute force is "+str(len(l))+" time taken by brute"+str(end-start))
if __name__ == '__main__':
compare_cow_transport_algorithms()
#print(brute_force_cow_transport(cows,10))
#print(greedy_cow_transport(cows,10)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 00:09:41 2020
@author: damengjin
"""
# Load Model Using Pickle
import pandas as pd
import pickle
#load the dataset:
df_new = pd.read_csv('Documents/MSBA/CS5224/PSP Project/df_new.csv', index_col=None)
# load the model from disk
SVDpp_val = pickle.load(open('Documents/MSBA/CS5224/PSP Project/SVDpp_model.sav', 'rb'))
user_knn = pickle.load(open('Documents/MSBA/CS5224/PSP Project/Knn_model.sav', 'rb'))
def get_similar_users(top_k, user_id):
"""
Args:
top_k(int): no of similar user
user_id(str): target user id
Returns:
list generator
"""
user_inner_id = user_knn.trainset.to_inner_uid(user_id)
user_neighbors = user_knn.get_neighbors(user_inner_id, k=top_k)
user_neighbor_ids = (user_knn.trainset.to_raw_uid(inner_id) for inner_id in user_neighbors)
return user_neighbor_ids
def get_top_N_recommended_items(user, top_sim_user=3, N=5):
similar_id_list = list(get_similar_users(top_sim_user, user))
unique_item = df_new.item.unique()
predict_target = []
for i in similar_id_list:
for j in unique_item:
est = SVDpp_val.predict(iid=j, uid=i)[3]
tup = [i,j,est]
predict_target.append(tup)
target_pred = pd.DataFrame(predict_target, columns = ['uid', 'iid', 'est'])
predct_base = target_pred[['iid', 'est']].groupby(['iid'], as_index=False).mean().sort_values('est', ascending=False)
rated_item_by_user = df_new[['item','rating']][df_new.user == user]
result = pd.merge(predct_base, rated_item_by_user, how='left', left_on=['iid'], right_on=['item'])
non_rated_result = result[result['rating']!=result['rating']]
output = list(non_rated_result.iid[:N])
return output
|
from PagSeguroLib.singleton import Singleton
from PagSeguroLib.domain.PagSeguroAccountCredentials import PagSeguroAccountCredentials
class PagSeguroConfig(Singleton):
config = None
data = {}
@classmethod
def init(cls, data):
if cls.config == None:
cls.config = PagSeguroConfig()
cls.data = data
return cls.config
@classmethod
def getData(cls, key1, key2 = None):
if key2 != None:
if key1 in cls.data and key2 in cls.data[key1]:
return cls.data[key1][key2]
else:
raise NameError("Config keys %s, %s not found." % (key1, key2))
else:
if key1 in cls.data:
return cls.data[key1]
else:
raise NameError("Config key %s not found." % key1)
@classmethod
def getAccountCredentials(cls):
if 'credentials' in cls.data and 'email' in cls.data['credentials'] and 'token' in cls.data['credentials']:
return PagSeguroAccountCredentials(cls.data['credentials']['email'],cls.data['credentials']['token'])
else:
raise NameError("Credentials not set.")
@classmethod
def setData(cls, key1, key2, value):
if key1 in cls.data and key2 in cls.data[key1]:
cls.data[key1][key2] = value
else:
raise NameError("Config keys %s, %s not found." % (key1, key2))
@classmethod
def getEnvironment(cls):
if cls.data['environment'] and cls.data['environment']['environment']:
return cls.data['environment']['environment']
else:
raise NameError("Environment not set")
@classmethod
def getApplicationCharset(cls):
if cls.data['application'] and cls.data['application']['charset']:
return cls.data['application']['charset']
else:
raise NameError("Application charset not set")
@classmethod
def setApplicationCharset(cls, charset):
cls.setData('application','charset',charset)
@classmethod
def logIsActive(cls):
if 'log' in cls.data and 'active' in cls.data['log'] and cls.data['log']['active'] == True:
return True
return False
#raise NameError("Log activation flag not set.")
@classmethod
def activeLog(cls, fileName=None):
cls.setData('log','active',True)
if fileName:
cls.setData('log','fileLocation',fileName)
else:
cls.setData('log','fileLocation','')
#TODO
#LOG RELOAD
@classmethod
def getLogFileLocation(cls):
if 'log' in cls.data and 'fileLocation' in cls.data['log']:
return cls.data['log']['fileLocation']
else:
raise NameError("Log file location not set.")
|
from orun.test import TestCase
from orun.apps import apps
from orun.db import connection
class FixturesTest(TestCase):
fixtures = (
(
'admin_fixtures', (
)
),
(
'admin', (
'templates.xml',
),
),
)
|
#!/usr/bin/python
import time, sys, os
import numpy as np
import scipy.io as sio
import NeuralNetwork
from batchCD1 import batchCD1
## We want a Restricted Boltzmann Machine (RBM) which is a type of
## neural network. Specifically, we want the 4-layer model described
## in (Hinton, Osindero, Teh, 2006).
nn = NeuralNetwork.LogisticHinton2006()
nn.initRBM()
# Load the MNIST training data:
trainData = sio.loadmat('../datasets/MNIST/trainImagesAndTargets.mat', struct_as_record=True)
trainImages = trainData['images']
trainTargets = trainData['targets']
assert trainImages.shape[0] == trainTargets.shape[0]
## We use a "batched" version of 1-step Constrastive Divergence (CD) to
## pre-train the first 3 layers (0,1,2) of our neural network.
layer0out = batchCD1(nn, 0, trainImages, maxepoch=5)
### To save here:
#nn.save('...filename...')
### To restart here:
#nn.load('...filename...')
#layer0out = nn.up0(trainImages)
layer1out = batchCD1(nn, 1, layer0out, maxepoch=5)
### To save here:
#nn.save('...filename...')
### To restart here:
#nn.load('...filename...')
#layer0out = nn.up0(trainImages)
#layer1out = nn.up1(layer0out)
layer2out = batchCD1(nn, 2, layer1out, maxepoch=5)
### Give layer3 some random biases:
l3numVis = nn.W[2].shape[1]
l3numHid = trainTargets.shape[1]
nn.vB[3] = 0.1*np.random.randn(1, l3numVis)
nn.hB[3] = 0.1*np.random.randn(1, l3numHid)
# Save the pre-trained neural network:
nn.save('nnData/NN_afterPreTrain.mat')
|
from flask import Flask
import os
app = Flask(__name__)
@app.route("/")
def hello():
print ("==== root ====")
return "Hello World!"
if __name__ == "__main__":
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT, ssl_context='adhoc')
app.run() |
from onegov.core.security import Private
from onegov.org import OrgApp, _
from onegov.org.forms import ResourceRecipientForm
from onegov.org.layout import ResourceRecipientsLayout
from onegov.org.layout import ResourceRecipientsFormLayout
from onegov.org.models import ResourceRecipient, ResourceRecipientCollection
from onegov.reservation import Resource, ResourceCollection
from sqlalchemy.orm import undefer
from onegov.org.elements import DeleteLink, Link
@OrgApp.html(
model=ResourceRecipientCollection,
template='resource_recipients.pt',
permission=Private)
def view_resource_recipients(self, request, layout=None):
layout = layout or ResourceRecipientsLayout(self, request)
def recipient_links(recipient):
yield Link(
text=_("Edit"),
url=request.link(recipient, 'edit')
)
yield DeleteLink(
text=_("Delete"),
url=layout.csrf_protected_url(request.link(recipient)),
confirm=_('Do you really want to delete "${name}"?', mapping={
'name': recipient.name
}),
target='#{}'.format(recipient.id.hex),
yes_button_text=_("Delete Recipient")
)
q = ResourceCollection(request.app.libres_context).query()
q = q.order_by(Resource.group, Resource.name)
q = q.with_entities(Resource.group, Resource.title, Resource.id)
default_group = request.translate(_("General"))
resources = dict(
(r.id.hex, "{group} - {title}".format(
group=r.group or default_group,
title=r.title
))
for r in q
)
return {
'layout': layout,
'title': _("Recipients"),
'resources': resources,
'recipients': self.query().options(undefer(ResourceRecipient.content)),
'recipient_links': recipient_links
}
@OrgApp.form(
model=ResourceRecipientCollection,
name='new-recipient',
template='form.pt',
permission=Private,
form=ResourceRecipientForm)
def handle_new_resource_recipient(self, request, form, layout=None):
if form.submitted(request):
self.add(
name=form.name.data,
medium='email',
address=form.address.data,
daily_reservations=form.daily_reservations.data,
new_reservations=form.new_reservations.data,
internal_notes=form.internal_notes.data,
send_on=form.send_on.data,
resources=form.resources.data,
)
request.success(_("Added a new recipient"))
return request.redirect(request.link(self))
title = _("New Recipient")
if layout:
layout.title = title
return {
'title': title,
'layout': layout or ResourceRecipientsFormLayout(self, request, title),
'form': form
}
@OrgApp.form(
model=ResourceRecipient,
name='edit',
template='form.pt',
permission=Private,
form=ResourceRecipientForm)
def handle_edit_resource_recipient(self, request, form, layout=None):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(
request.class_link(ResourceRecipientCollection)
)
elif not request.POST:
form.process(obj=self)
title = _("Edit Recipient")
return {
'title': title,
'layout': layout or ResourceRecipientsFormLayout(self, request, title),
'form': form
}
@OrgApp.view(
model=ResourceRecipient,
permission=Private,
request_method='DELETE')
def delete_notification(self, request):
request.assert_valid_csrf_token()
ResourceRecipientCollection(request.session).delete(self)
@request.after
def remove_target(response):
response.headers.add('X-IC-Remove', 'true')
|
import re
import os
def main():
data = {
'Device Settings': {
'RAID': None,
'Volume Size': None,
'NIC Virtualization Mode': {}
},
'iDRAC Settings': {
'Enable IPv4': None,
'Enable DHCP': None,
'Static IP Address': None,
'Static Gateway': None,
'Static Subnet Mask': None,
'Enable IPMI Over LAN': None,
'Enable VLAN ID': None,
'VLAN ID': None,
'Enable Hot Spare': None
},
'System BIOS': {
'System Memory Size': None,
'Logical Processor': None,
'Virtualization Technology': None,
'CPU Brand': {},
'CPU Number Of Cores': {},
'Boot Mode': None,
'SR-IOV Global Enable': None,
'OS Watchdog Timer': None,
'System Profile': None,
'System Time': None
}
}
results = []
with open(file_name, encoding='utf8') as log_fh:
tmp_result = log_fh.read().split('\n\n')
for line in tmp_result:
results.append(line.split('\n'))
for result in results:
tmp_data = {}
# RAID
if 'racadm storage get vdisks -o' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['Device Settings']['RAID'] = tmp_data['Layout']
data['Device Settings']['Volume Size'] = tmp_data['Size']
# SRIOV
if 'Key=NIC.Slot' in result[0]:
for line in result:
if 'Key' in line:
location = re.search('NIC.Slot.\d+-\d+-\d+', line).group(0)
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['Device Settings']['NIC Virtualization Mode'][location] = tmp_data['VirtualizationMode']
# IPv4
if 'racadm get iDRAC.IPv4' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['iDRAC Settings']['Enable IPv4'] = tmp_data['Enable']
data['iDRAC Settings']['Enable DHCP'] = tmp_data['DHCPEnable']
data['iDRAC Settings']['Static IP Address'] = tmp_data['Address']
data['iDRAC Settings']['Static Gateway'] = tmp_data['Gateway']
data['iDRAC Settings']['Static Subnet Mask'] = tmp_data['Netmask']
# IPMI over LAN
if 'racadm get idrac.IPMILan.Enable' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['iDRAC Settings']['Enable IPMI Over LAN'] = tmp_data['Enable']
# VLAN Enabled
if 'racadm get idrac.NIC.VLanEnable' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['iDRAC Settings']['Enable VLAN ID'] = tmp_data['VLanEnable']
# VLAN ID
if 'racadm get iDRAC.NIC.VLanID' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['iDRAC Settings']['VLAN ID'] = tmp_data['VLanID']
# PSU Hotspare disabled
if 'racadm get System.ServerPwr' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['iDRAC Settings']['Enable Hot Spare'] = tmp_data['PSRapidOn']
# Memory Size
if 'racadm get bios.MemSettings' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['System Memory Size'] = tmp_data['#SysMemSize']
# CPU Settings
if 'racadm get bios.ProcSettings' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['Logical Processor'] = tmp_data['LogicalProc']
data['System BIOS']['Virtualization Technology'] = tmp_data['ProcVirtualization']
data['System BIOS']['CPU Brand']['CPU#1'] = tmp_data['#Proc1Brand']
data['System BIOS']['CPU Brand']['CPU#2'] = tmp_data['#Proc2Brand']
data['System BIOS']['CPU Number Of Cores']['CPU#1'] = tmp_data['#Proc1NumCores']
data['System BIOS']['CPU Number Of Cores']['CPU#2'] = tmp_data['#Proc2NumCores']
# Boot Mode
if 'racadm get bios.BiosBootSettings.BootMode' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['Boot Mode'] = tmp_data['BootMode']
# SR-IOV Global Enable
if 'racadm get bios.IntegratedDevices.SriovGlobalEnable' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['SR-IOV Global Enable'] = tmp_data['SriovGlobalEnable']
# OS Watchdog Timer
if 'racadm get bios.IntegratedDevices.OsWatchdogTimer' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['OS Watchdog Timer'] = tmp_data['OsWatchdogTimer']
# System Profile
if 'bios.SysProfileSettings.SysProfile' in result[0]:
for line in result:
if '=' in line:
key, value = line.split('=')
tmp_data[key.strip()] = value.strip()
data['System BIOS']['System Profile'] = tmp_data['SysProfile']
# System Time
if 'racadm getractime' in result[0]:
data['System BIOS']['System Time'] = result[1]
for title, items in data.items():
print('- {}'.format(title))
for key, value in items.items():
if isinstance(value, dict):
print(' - {}:'.format(key))
for inner_key, inner_value in value.items():
print(' - {}: {}'.format(inner_key, inner_value))
else:
print(' - {}: {}'.format(key, value))
print()
if __name__ == '__main__':
while True:
file_name = input('Enter full file path: ')
if not os.path.exists(file_name):
print('[ERROR] {} is not a valid path!'.format(file_name))
else:
main()
break
input()
|
#
# Script to download a sample of DR10
#
import fitsio
import numpy as np
import seaborn as sns
from redshift_utils import load_sdss_fluxes_clean_split
import urllib, os, sys
def download_spec_file(plate, mjd, fiberid, redownload=False):
""" grabs the spec file given plate, mjd and fiber id """
spec_url_template = "http://data.sdss3.org/sas/dr12/boss/spectro/redux/v5_7_0/spectra/%04d/spec-%04d-%05d-%04d.fits\n"
spec_url = spec_url_template%(plate, plate, mjd, fiberid)
# check if ../../data/DR10QSO/spec/<FNAME> exists! if so, skip it!
bname = os.path.basename(spec_url.strip())
fpath = "../../data/DR10QSO/specs/%s"%bname
if not redownload and os.path.exists(fpath):
print " already there, skipping", bname
return fpath
# otherwise, download it
def dlProgress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r " + bname + "...%d%%" % (percent))
sys.stdout.flush()
urllib.urlretrieve(spec_url.strip(), fpath, reporthook=dlProgress)
print ""
return fpath
if __name__=="__main__":
## scrape values corresponding to sampled quasars
#qso_sample_files = glob('cache_remote/photo_experiment0/redshift_samples*chain_0.npy')
qso_sample_files = glob('cache_remote/temper_experiment/redshift_samples*.npy')
qso_ids = []
for i in to_inspect:
_, _, _, qso_info, _ = load_redshift_samples(qso_sample_files[i])
qso_ids.append([qso_info[b] for b in ['PLATE', 'MJD', 'FIBERID']])
## load up the DR10QSO file
#dr10qso = fitsio.FITS('../../data/DR10QSO/DR10Q_v2.fits')
#qso_df = dr10qso[1].read()
#remove those with zwarning nonzero
#qso_df = qso_df[ qso_df['ZWARNING']==0 ]
#randomly select 100 quasars
#Nquasar = len(qso_df)
#np.random.seed(42)
#perm = np.random.permutation(Nquasar)
#idx = perm[0:1000]
# get their PLATE-MJD-FIBER, and assemble filenames
#qso_ids = qso_df[['PLATE', 'MJD', 'FIBERID']][idx]
spec_url_template = "http://data.sdss3.org/sas/dr12/boss/spectro/redux/v5_7_0/spectra/%04d/spec-%04d-%05d-%04d.fits\n"
qso_lines = [spec_url_template%(qid[0], qid[0], qid[1], qid[2]) for qid in qso_ids]
# write to little file for wget...
f = open('qso_list.csv', 'w')
f.writelines(qso_lines)
f.close()
# zip through and download spec files
for i, qso_url in enumerate(qso_lines):
# check if ../../data/DR10QSO/spec/<FNAME> exists! if so, skip it!
bname = os.path.basename(qso_url.strip())
fpath = "../../data/DR10QSO/specs/%s"%bname
if os.path.exists(fpath):
print " already there, skipping", bname
continue
# otherwise, download it
def dlProgress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r " + bname + "...%d%% (%d of %d)" % (percent, i, len(qso_lines)) )
sys.stdout.flush()
urllib.urlretrieve(qso_url.strip(), fpath, reporthook=dlProgress)
print ""
##data.sdss3.org/sas/dr10/boss/spectro/redux/v5_5_12/spectra/
#
##spec_file = "/Users/acm/Downloads/spec-3586-55181-0003.fits" # DR10 Spec
#spec_file = "/Users/acm/Downloads/spec-0685-52203-0467.fits" # DR7 spec
#dfh = fitsio.read_header(spec_file)
#df = fitsio.read(spec_file)
#dfits = fitsio.FITS(spec_file)
#
## get the coadd (what's coadd?) spectra and it's sample locations
#spec = dfits[1]['flux'].read()
#spec_ivar = dfits[1]['ivar'].read()
#spec_model = dfits[1]['model'].read()
#lam = 10 ** dfits[1]['loglam'].read()
#
## get the red-shift
#z = dfits[2]['Z'].read()[0]
#
################################################################################
#### Spectro Flux Information
##SPECTROFLUX float32[5] Spectrum projected onto ugriz filters (nanomaggies)
##SPECTROFLUX_IVAR float32[5] Inverse variance of spectrum projected onto ugriz filters (nanomaggies)
##SPECTROSYNFLUX float32[5] Best-fit template spectrum projected onto ugriz filters (nanomaggies)
##SPECTROSYNFLUX_IVAR float32[5] Inverse variance of best-fit template spectrum projected onto ugriz filters (nanomaggies)
##SPECTROSKYFLUX float32[5] Sky flux in each of the ugriz imaging filters (nanomaggies)
################################################################################
#sp_flux = dfits[2]['SPECTROFLUX'].read()
#sp_flux_ivar = dfits[2]['SPECTROFLUX_IVAR'].read()
#sp_mod_flux = dfits[2]['SPECTROSKYFLUX'].read()
#sp_skyflux = dfits[2]['SPECTROSKYFLUX'].read()
#psf_flux = dfits[2]['PSFFLUX'].read()
#
## get the
#qso_nanomaggies = 10 ** ((qso_mags - 22.5)/-2.5)
#
#psf_mags = dfits[2]['PSFMAG'].read()
#
#10**((psf_mags - 22.5) / -2.5)
#
#
#plt.plot(lam, spec)
#plt.plot(lam, spec_model)
#plt.show()
#
|
{
W3Const.w3UIBody: {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidHeader",
"uidMain",
"uidFooter"
],
W3Const.w3PropDefaultPage: "uidPageLogin",
W3Const.w3PropDefaultErrorPage: "uidPageError",
W3Const.w3PropDefaultAuthenticationPage: "uidPageLogin"
},
"uidHeader": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidTitle",
"uidLine"
],
W3Const.w3PropCSS: {
"text-align": "center",
}
},
"uidTitle": {
W3Const.w3PropType: W3Const.w3TypeHeadline,
W3Const.w3PropAttr: {
W3Const.w3AttrHeadlineLevel : "1"
},
W3Const.w3PropString: "sidTitle"
},
"uidFooter": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidLine",
"uidCopyright"
],
W3Const.w3PropCSS: {
"text-align": "center",
"clear": "both",
"padding-top": "5px",
}
},
"uidCopyright": {
W3Const.w3PropType: W3Const.w3TypeParagraph,
W3Const.w3PropString: "sidCopyright"
},
"uidMain": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidNavigation",
"uidPage"
]
},
"uidNavigation": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidNaviFinance",
"uidLineBreak",
"uidNaviNote",
"uidLineBreak",
"uidNaviCalendar",
"uidLineBreak",
"uidNaviJourney",
"uidLineBreak"
],
W3Const.w3PropClass: "cidLeftBorder",
W3Const.w3PropCSS: {
"padding-right": "15px"
}
},
"uidPage": {
W3Const.w3PropType: W3Const.w3TypePage,
W3Const.w3PropClass: "cidLeftBorder",
W3Const.w3PropCSS: {
"padding-left": "5px",
"padding-right": "5px"
}
},
# Navigation
"uidNaviFinance": {
W3Const.w3PropType: W3Const.w3TypeLink,
W3Const.w3PropString: "sidNaviFinance",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidPage",
W3Const.w3ApiCall: W3Const.w3ApiDirect,
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "uidPageFinance"
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1
]
}
},
"uidNaviNote": {
W3Const.w3PropType: W3Const.w3TypeLink,
W3Const.w3PropString: "sidNaviNote",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidPage",
W3Const.w3ApiCall: W3Const.w3ApiDirect,
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "uidPageNote"
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1
]
}
},
"uidNaviCalendar": {
W3Const.w3PropType: W3Const.w3TypeLink,
W3Const.w3PropString: "sidNaviCalendar",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidPage",
W3Const.w3ApiCall: W3Const.w3ApiDirect,
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "uidPageCalendar"
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1
]
}
},
"uidNaviJourney": {
W3Const.w3PropType: W3Const.w3TypeLink,
W3Const.w3PropString: "sidNaviJourney",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidPage",
W3Const.w3ApiCall: W3Const.w3ApiDirect,
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "uidPageJourney"
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1
]
}
},
# Error Page
"uidPageError": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidErrorContent",
"uidLineBreak",
"uidButtonBack"
],
W3Const.w3PropCSS: {
"text-align": "center",
}
},
"uidErrorContent": {
W3Const.w3PropType: W3Const.w3TypeParagraph,
W3Const.w3PropString: "sidErrorContent"
}
}
|
#!/usr/bin/python
import sublime
import sublime_plugin
def openNewDocAndFill(context):
pass
def getLineText(view):
regionStr = ""
sels = view.sel()
for a in sels:
line = view.line(a)
regionStr += view.substr(line)
return regionStr
def getLineRegion(view):
sels = view.sel()
for a in sels:
line = view.line(a)
return line
def getMoreLinesFromDoc(view, line_cnt):
sels = view.sel()
tmpLine = sublime.Region(0,0)
for a in sels:
line = view.line(a)
tmpLine.a = line.a
tmpLine.b = line.b
#print("old:%u %u\n"%(line.a, line.b))
for i in range(line_cnt):
tmpLine.b = tmpLine.b + 1
tmpLine = view.line(tmpLine)
if line.b == tmpLine.b:
break
line.a = tmpLine.a
line.b = tmpLine.b
#print("new:%u %u\n"%(line.a, line.b))
#print("%s"%(view.substr(line)))
return view.substr(line) |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Functions for enriching timeseries data.
Includes transformations which operate in the time domain, and ones which
operate in the frequency domain. Organization of this module and basic set
of functionality are inspired by Wen et al.'s review, "Timeseries data
Augmentation for Deep Learning" (arXiv:2002.12478_).
Importable functions include:
Time domain
-----------
* add_slope_trend
* add_spike
* add_step_trend
* add_warp
* crop_and_stretch
* flip
* reverse
Frequency domain
----------------
* add_discrete_phase_shift
* add_high_frequency_noise
* add_random_frequency_noise
* remove_random_frequency
.. _arXiv:2002.12478 : https://arxiv.org/abs/2002.12478
"""
from .freq import (add_discrete_phase_shifts, add_high_frequency_noise, add_random_frequency_noise, remove_random_frequency)
from .time import (add_slope_trend, add_spike, add_step_trend, add_warp, crop_and_stretch, flip, reverse) |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 20:24:23 2018
@author: shams
"""
# -*- coding: utf-8 -*-
"""
this file is for local evaluation of the models trained on HPC
"""
# importing libraries
import pandas as pd
import numpy as np
import numpy as np
import pandas as pd
from keras.preprocessing import sequence
from keras.models import load_model
from keras.layers import Dense, Input, LSTM
from keras.models import Model
import h5py
import matplotlib.pyplot as plt
from sklearn.preprocessing import RobustScaler
Info = 'B6'
loss_func = 'mse'
#~~~~~~~~~~~~~~~~ loading data and model ~~~~~~~~~~~~~~
df_data_1 = pd.read_json('C:/Users/shams/OneDrive/Documents/Projects/Insight/datasets/adminData.json')
df_data_1 = df_data_1.drop('usr_tag',axis =1)
regressor = load_model(filepath='C:/Users/shams/OneDrive/Documents/Projects/Insight/models/graham/'+ Info +'_'+loss_func+'_trained.h5')
#~~~~~~~~~~~~~~~ create test set ~~~~~~~~~~~~~~~~~~~~~~~
batch_size = 20
epochs = 100
timesteps = 2
def get_train_length(dataset, batch_size, test_percent):
# substract test_percent to be excluded from training, reserved for testset
length = len(dataset)
length *= 1 - test_percent
train_length_values = []
for x in range(int(length) - 100,int(length)):
modulo=x%batch_size
if (modulo == 0):
train_length_values.append(x)
return (max(train_length_values))
length = get_train_length(df_data_1, batch_size, 0.2)
upper_train = length + timesteps*2
df_data_1_train = df_data_1[0:upper_train]
training_set_y = np.nan_to_num(df_data_1_train['user_ts'].values)
training_set = np.nan_to_num(df_data_1_train.loc[:,:].values)
def get_test_length(dataset, batch_size):
test_length_values = []
for x in range(len(dataset) - 200, len(dataset) - timesteps*2):
modulo=(x-upper_train)%batch_size
if (modulo == 0):
test_length_values.append(x)
print (x)
return (max(test_length_values))
test_length = get_test_length(df_data_1, batch_size)
print(test_length)
upper_test = test_length + timesteps*2
testset_length = test_length - upper_train
print (testset_length)
print (upper_train, upper_test, len(df_data_1))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~construct test set
#subsetting
df_data_1_test = df_data_1[upper_train:upper_test]
test_set_y = np.nan_to_num(df_data_1_test['user_ts'].values)
test_set = np.nan_to_num(df_data_1_test.loc[:,:].values)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ scaling
sc = RobustScaler(with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0))
scaled_test_values = sc.fit_transform(np.float64(test_set))
scaled_test_values_y = np.sign(test_set_y.reshape(-1,1))
scaled_test_values_y = sc.fit_transform(np.float64(test_set_y.reshape(-1,1)))
#scaled_test_values = np.tanh(np.float64(test_set))
#scaled_test_values_y = np.tanh(np.float64(test_set_y.reshape(-1,1)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~creating input data
x_test = []
y_test = []
for i in range(timesteps, testset_length + timesteps):
x_test.append(scaled_test_values[i-timesteps:i, :])
y_test.append(scaled_test_values_y[i:timesteps+i])# this is for the last timestep (7th)
#y_test.append(scaled_test_values_y[i:1+i])
x_test = np.array(x_test)
y_test = np.array(y_test)
#reshaping
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2]))
y_test = np.reshape(y_test, (y_test.shape[0], y_test.shape[1], 1))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#predicted_values = regressor.predict(x_test, batch_size=batch_size)
predicted_values = np.load('C:/Users/shams/OneDrive/Documents/Projects/Insight/models/graham/predvals.npy')
#regressor_mse.reset_states()
predicted_values = np.reshape(predicted_values,
(predicted_values.shape[0],
predicted_values.shape[1]))
predicted_values = sc.inverse_transform(predicted_values)
pred_mse = []
for j in range(0, testset_length - timesteps):
pred_mse = np.append(pred_mse, predicted_values[j, timesteps-1]) # this is for terget = last time step
#pred_mse = np.append(pred_mse, predicted_values[j, 0])
pred_mse = np.reshape(pred_mse, (pred_mse.shape[0], 1))
# ~~~~~~~~~~~~~~~~~~~~~~~~~Visualising the results, plotting the ROC, calculating rmse
plt.figure()
#plt.plot(np.sign(scaled_test_values[timesteps:len(pred_mse),0]), color = 'red', label = 'Real')
plt.plot(y_test[timesteps:len(pred_mse),0], color = 'red', label = 'Real')
#plt.figure()
plt.plot(pred_mse[0:len(pred_mse) - timesteps], color = 'green', label = 'Predicted')
#plt.plot(predicted_values[0:len(predicted_values) - timesteps], color = 'green', label = 'Predicted')
plt.xlabel('Time')
plt.ylabel('posts per day')
plt.legend()
plt.show()
fpr, tpr, threshs = roc_curve(y_test[timesteps:len(pred_mse),0],pred_mse[0:len(pred_mse) - timesteps])
roc_auc = auc(fpr, tpr)
from scipy import stats
pred_mse = stats.zscore(pred_mse)
y_test = np.reshape(y_test, (y_test.shape[0], y_test.shape[1]))
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(y_test[timesteps:len(pred_mse)], pred_mse[0:len(pred_mse) - timesteps]))
print(rmse)
mean = np.mean(np.float64(y_test[timesteps:len(pred_mse)]))
print (mean)
rmse/mean * 100
|
import math
import torch
import torch.nn as nn
class PositionEncoding(nn.Module):
def __init__(self, n_filters=128, max_len=500):
super(PositionEncoding, self).__init__()
pe = torch.zeros(max_len, n_filters)
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_filters, 2).float() * - (math.log(10000.0) / n_filters))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x):
pe = self.pe.data[:x.size(-2), :]
extra_dim = len(x.size()) - 2
for _ in range(extra_dim):
pe = pe.unsqueeze(0)
x = x + pe
return x
|
##
#
#
#
# sayheya@qq.com
# 2019-05-29
BLOOM_REDIS_URI = 'redis://10.0.0.48:6379/6'
BIT_SIZE = 1 << 31 # size
BLOCK_NUM = 1 # redis block num to store
BLOOM_KEY_NAME = 'bloom_for_hanzo_%(no)s' # redis bloom key name
TEST_BLOOM_KEY_NAME = 'test_bloom_for_hanzo_%(no)s' # redis bloom key name
|
from django.shortcuts import render
from django.shortcuts import HttpResponse
# Create your views here.
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.views.decorators.cache import never_cache
from django.shortcuts import HttpResponseRedirect, render_to_response
from django.contrib import auth
from django.template.response import TemplateResponse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.translation import ugettext as _, ugettext_lazy
app_name = 'wvpn'
class WvpnSite(object):
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
@never_cache
def login(self, request):
if request.method == 'GET' and not self.has_permission(request):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
@never_cache
def index(self, request):
pass
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def has_permission(request):
return request.user.is_active and request.user.is_staff
def index(request):
return HttpResponse(render(request, 'index.html'))
#return render_to_response('index.html',locals())
def logout(request):
index_path = '/'
return HttpResponseRedirect(index_path)
# return HttpResponse("Hello logout world!")
def login(request, extra_context=None):
import pdb;pdb.set_trace()
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and has_permission(request):
index_path = reverse('wvpn:index', current_app=app_name)
return HttpResponseRedirect(index_path)
username = request.POST.get('username', '')
if not username:
username = request.POST.get('email')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
index_path = reverse('wvpn:index', current_app=app_name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(
# self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
username=request.user.get_username(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('wvpn:index', current_app='wvpn')
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': AdminAuthenticationForm,
'template_name': 'login.html',
}
request.current_app = "wvpn"
return login(request, **defaults)
|
"""
作者 xupeng
邮箱 874582705@qq.com / 15601598009@163.com
github主页 https://github.com/xupeng1206
"""
import os
import shutil
os.system('python setup.py bdist_wheel')
shutil.rmtree('./build/', ignore_errors=True)
shutil.rmtree('./Flanger.egg-info/', ignore_errors=True)
os.system('pip install -U dist/Flanger-0.0.1-py3-none-any.whl')
|
"""JupyterLab Metadata Service Server"""
import os
def start():
"""Start JupyterLab Metadata Service Server Start
Returns:
dict -- A dictionary with the node command that will start the
Metadata Service Server
"""
path = os.path.dirname(os.path.abspath(__file__))
return {
'command': [
'node', os.path.join(path, 'src', 'index.js'), '{port}'
],
'timeout': 60,
'port': 40000
}
|
import os
import cv2
count = 0
for file in os.scandir('./project/team/filtered_img'):
print(file)
path = os.path.abspath(file)
jpg = cv2.imread(path)
jpg = cv2.resize(jpg, (1280,720))
# cv2.imshow('jpg', jpg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
cv2.imwrite('./project/team/resize_img/' + str(count) + '.jpg', jpg)
count += 1
|
#! /usr/bin/python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import atexit
import logging
import logging.handlers
import os
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from datetime import datetime, timedelta
from flask import Flask, request
from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
# Amount of minutes we are waiting to consider a ponger death.
DEATH_TIMER = 30
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setLevel(logging.DEBUG)
syslog.setFormatter(logging.Formatter('pong_logger: %(levelname)s - %(message)s'))
logger = logging.getLogger('pong-logger')
logger.setLevel(logging.DEBUG)
logger.addHandler(syslog)
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
"""
With this configuration, the app expects the sqlite db to be in the same base
directory, this might not be your desired setup, change the SQLALCHEMY_DATABASE_URI
to a proper location.
"""
app.config.update(dict(
SQLALCHEMY_DATABASE_URI='sqlite:///' + os.path.join(basedir, 'app.sqlite'),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
app.config.from_object(__name__)
db = SQLAlchemy(app)
scheduler = BackgroundScheduler()
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
scheduler.add_job(
func=disable_old_hosts,
trigger=IntervalTrigger(minutes=1),
id='clean_job',
name='Clean inactive hosts',
replace_existing=True
)
class Pong(db.Model):
"""
Definition of a Pong.
host: IP address of a pong device
region:
String that represent the geographic Datacenter or region location
of the ponger. Example: MAD, Madrid, Spain, DC01.MADRID
cluster:
String that define the cluster name, this is usually the
division name used within a DC. Example: CL10, CLUSTER1, etc
rack:
String, name of the rack where the ponger is located. Racks usually
live within a cluster. Example: RS10AA, RACK01, etc.
is_active:
This boolean value represent if the ponger is alive (True) or no longer
active (False).
updated_datetime:
Date value, represent the last time we had news from a ponger
"""
host = db.Column(db.String(80), primary_key=True)
region = db.Column(db.String(100))
cluster = db.Column(db.String(100))
rack = db.Column(db.String(100))
is_active = db.Column(db.Boolean)
updated_datetime = db.Column(db.DateTime)
def disable_old_hosts():
"""
This method is called everytime the app runs. Checks if the pongers are
contacting the application perodically (keepalive). If we didn't heard from
a ponger for a while we disable that host.
If the ponger is disabled its no loger served as a valid target to pingers
"""
time_tresh = datetime.now() - timedelta(minutes=DEATH_TIMER)
with app.app_context():
servers = db.session.query(Pong).filter(
Pong.is_active == True,
Pong.updated_datetime < time_tresh
)
for ponger in servers.all():
logger.info(
'Disabling:{} last_updated:{}'.format(
ponger.host, ponger.updated_datetime
)
)
ponger.is_active = False
db.session.commit()
pass
def serialize_server(obj):
return {
'host': obj.host,
'rack': obj.rack,
'cluster': obj.cluster,
'region': obj.region,
'is_active': obj.is_active,
'updated_datetime': obj.updated_datetime
}
def str_to_bool(s):
if s == '1':
return True
else:
return False
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
db.create_all()
logger.info('Database initialized')
@app.route('/')
def home():
"""
Returns the list of active pongers to the caller (usually pingers):
Example:
OUTPUT:
{
"json_list": [
{
"cluster": "CLUSTER_1",
"host": "10.142.0.4",
"is_active": true,
"rack": "RACK_1",
"region": "REGION_1",
"updated_datetime": "Sun, 20 Aug 2017 14:53:01 GMT"
},
{
"cluster": "CLUSTER_2",
"host": "10.142.0.2",
"is_active": true,
"rack": "RACK_1",
"region": "REGION_1",
"updated_datetime": "Sun, 20 Aug 2017 14:53:01 GMT"
}
]
}
"""
logger.info('Home...')
servers = db.session.query(Pong).filter_by(is_active=True)
return jsonify(json_list=[serialize_server(ponger) for ponger in servers.all()])
@app.route('/servers/update', methods=['POST'])
def update_log():
"""
Pongers calls this service constantly as a keepalive mechanism, and to
"unregister" (set is_alive to False) when they are no longer alive.
Every time a ponger calls this method the active state is updated
(is_alive) and the updated_datetime field is set to the current time.
"""
logger.info('update_log: A ponger is calling the update service...')
response = {'action': 'update', 'sucess': False}
try:
ip_addr = request.remote_addr
logger.info(
'Ponger identified with IP {}: Updating status'.format(ip_addr)
)
exists = db.session.query(
db.session.query(Pong).filter_by(host=ip_addr).exists()
).scalar()
logger.debug('request:{}'.format(request.form))
is_active = str_to_bool(request.form['is_active'])
logger.debug('is_active:{}'.format(is_active))
curr_date = datetime.now()
if exists:
column = db.session.query(Pong).filter_by(host=ip_addr).first()
column.is_active = is_active
column.updated_datetime = curr_date
db.session.commit()
response = {
'action': 'update',
'sucess': True,
'host': ip_addr,
'is_active': is_active,
'rack': column.rack,
'cluster': column.cluster,
'region': column.region,
'updated_datetime': curr_date
}
except Exception as e:
logger.debug('Could not register the ponger status: {}'.format(e))
return jsonify(response)
@app.route('/servers/create', methods=['POST'])
def create_log():
"""
When a new ponger is activated, this method is called.
This will record basic ponger characteristics like: ip address, region,
cluster and rack. The info is sent to the db.
"""
response = {'action': 'create', 'sucess': False}
logger.info('create_log: called by a ponger')
ponger_info = request.form
ip_addr = request.remote_addr
logger.info('create_log: {} Attempting to register'.format(ip_addr))
if not {'region', 'cluster', 'rack'} <= set(ponger_info):
logger.error(
'create_log: {} missing geographic location info.'.format(ip_addr)
)
return jsonify(response)
for key in ponger_info:
logger.debug('{}: {}'.format(key, ponger_info[key]))
region = request.form['region']
cluster = request.form['cluster']
rack = request.form['rack']
exists = db.session.query(
db.session.query(Pong).filter_by(host=ip_addr).exists()
).scalar()
is_active = True
curr_date = datetime.now()
if exists:
logger.info(
"create_log: We already know about this ponger {}. Refreshing info".
format(ip_addr)
)
try:
column = db.session.query(Pong).filter_by(host=ip_addr).first()
column.is_active = is_active
column.updated_datetime = curr_date
column.region = region
column.cluster = cluster
column.rack = rack
db.session.commit()
except Exception as e:
logger.error(
"create_log: could not update the ponger {} information: {}".
format(ip_addr, e)
)
return jsonify(response)
else:
logger.info(
"create_log: {} is a new ponger. Registering".format(ip_addr)
)
try:
serv = Pong(
host=ip_addr,
is_active=is_active,
updated_datetime=curr_date,
region=region,
cluster=cluster,
rack=rack
)
db.session.add(serv)
db.session.commit()
except Exception as e:
logger.error(
"create_log: could not register the ponger {}: {}".
format(ip_addr, e)
)
return jsonify(response)
response = {
'action': 'update',
'sucess': True,
'region': region,
'cluster': cluster,
'rack': rack,
'host': ip_addr,
'is_active': is_active,
'updated_datetime': curr_date
}
return jsonify(response)
if __name__ == '__main__':
app.run()
|
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
# Dictionary with key of countries with value of Country Flags
COUNTRY_FLAGS = {
"south east asia": ('https://upload.wikimedia.org/'
'wikipedia/en/thumb/8/87/'
'Flag_of_ASEAN.svg/'
'510px-Flag_of_ASEAN.svg.png'),
"thailand": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/a/a9/'
'Flag_of_Thailand.svg/510px-'
'Flag_of_Thailand.svg.png'),
"cambodia": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/8/83/'
'Flag_of_Cambodia.svg/510px-'
'Flag_of_Cambodia.svg.png'),
"vietnam": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/2/21/'
'Flag_of_Vietnam.svg/500px-'
'Flag_of_Vietnam.svg.png'),
"laos": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/5/56/'
'Flag_of_Laos.svg/510px-'
'Flag_of_Laos.svg.png'),
"indonesia": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/9/9f/'
'Flag_of_Indonesia.svg/510px-'
'Flag_of_Indonesia.svg.png'),
"malaysia": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/6/66/'
'Flag_of_Malaysia.svg/510px-'
'Flag_of_Malaysia.svg.png'),
"philippines": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/9/99/'
'Flag_of_the_Philippines.svg/510px-'
'Flag_of_the_Philippines.svg.png'),
"singapore": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/4/48/'
'Flag_of_Singapore.svg/510px-'
'Flag_of_Singapore.svg.png'),
"myanmar": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/8/8c/'
'Flag_of_Myanmar.svg/510px-'
'Flag_of_Myanmar.svg.png'),
"east timor": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/2/26/'
'Flag_of_East_Timor.svg/510px-'
'Flag_of_East_Timor.svg.png'),
"brunei": ('https://upload.wikimedia.org/'
'wikipedia/commons/thumb/9/9c/'
'Flag_of_Brunei.svg/510px-'
'Flag_of_Brunei.svg.png')
}
# List of Countries
COUNTRIES = list(COUNTRY_FLAGS)
# South East Asia Removed
COUNTRIES.pop(0)
# Countries First Letter capitalized
COUNTRIES = [COUNTRY.capitalize() for COUNTRY in COUNTRIES]
@app.route("/")
@app.route("/home")
def get_home():
# find recipes by Latest created date limit to 4
recipes = mongo.db.recipes.find().sort("created_date", -1).limit(4)
# find countries flag for Carousel on Home page
flags = COUNTRY_FLAGS.values()
return render_template("index.html", recipes=recipes, flags=flags)
@app.route("/sign_up", methods=["GET", "POST"])
def sign_up():
if request.method == "POST":
# Check if username exists
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
# Displays flash message if exists
if existing_user:
flash("Username already exists!")
return redirect(url_for("sign_up"))
sign_up = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(sign_up)
# New user into session cookie
session["user"] = request.form.get("username").lower()
flash("Sign Up Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("/user/signup.html")
@app.route("/sign_in", methods=["GET", "POST"])
def sign_in():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(
request.form.get("username")))
return redirect(url_for(
"profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("sign_in"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("sign_in"))
return render_template("/user/signin.html")
# Return recipe & flags by country
@app.route("/recipes")
def recipes():
"""Show recipes for each country of origin"""
country = request.args.get("country")
query = request.args.get("query")
"""If country is country otherwise show south east asia"""
country = country if country else "south east asia"
"""lower case country"""
country = country.lower()
"""if country is in country flags display
correct flag otherwise south east asia flag"""
if country in COUNTRY_FLAGS:
flags = COUNTRY_FLAGS[country]
else:
flags = COUNTRY_FLAGS["south east asia"]
"""When searching countries display correct flag"""
if query:
if (query.lower() in COUNTRY_FLAGS) and COUNTRY_FLAGS[query.lower()]:
flags = COUNTRY_FLAGS[query.lower()]
country = query
"""Find recipes for search query
either ingredient or recipe name"""
recipes = list(mongo.db.recipes.find({"$text": {"$search": query}}))
else:
"""If country is south east asia display all recipes"""
if country == "south east asia":
recipes = list(mongo.db.recipes.find())
else:
"""Otherwise display recipes for country searched"""
recipes = list(mongo.db.recipes.find(
{"$text": {"$search": country}}))
return render_template(
"/recipes/recipes.html", recipes=recipes, country=country, flags=flags)
# Search Function
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
recipes = list(mongo.db.recipes.find({"$text": {"$search": query}}))
return render_template(
"/recipes/recipes.html", recipes=recipes)
# find one recipe to show return recipe description
@app.route("/recipe_description/<recipe_id>")
def recipe_description(recipe_id):
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
return render_template("/recipes/recipe.html", recipe=recipe)
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
# grab session username from db
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
"""If session user is admin display all recipes otherwise
only display recipes that session user has created."""
if session["user"]:
if session["user"] == "admin":
recipes = list(mongo.db.recipes.find())
else:
recipes = list(
mongo.db.recipes.find({"created_by": session["user"]}))
return render_template(
"/user/profile.html", username=username, recipes=recipes)
return redirect(url_for("sign_in"))
@app.route("/signout")
def sign_out():
# remove user from session cookies & return to Sign In Page
flash("You have been logged out")
session.pop("user")
return redirect(url_for("sign_in"))
# Insert new recipe into Mongodb using request POST method
@app.route("/add_recipe", methods=["GET", "POST"])
def add_recipe():
if request.method == "POST":
recipe = {
"country": request.form.get("country"),
"recipe_name": request.form.get("recipe_name"),
"description": request.form.get("description"),
"serves": request.form.get("serves"),
"image_url": request.form.get("image_url"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"ingredients": request.form.get("ingredients"),
"method": request.form.get("method"),
"created_date": request.form.get("created_date"),
"created_by": session["user"]
}
mongo.db.recipes.insert_one(recipe)
flash("Recipe Successfully Added!")
return redirect(url_for('profile', username=session['user']))
# Countries Variable - list of Countries
countries = COUNTRIES
return render_template("/recipes/add_recipe.html", countries=countries)
# Edit recipe into Mongodb using request POST method/targeted by ObjectID
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
def edit_recipe(recipe_id):
if request.method == "POST":
edit_recipe = {
"country": request.form.get("country"),
"recipe_name": request.form.get("recipe_name"),
"description": request.form.get("description"),
"serves": request.form.get("serves"),
"image_url": request.form.get("image_url"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"ingredients": request.form.get("ingredients"),
"method": request.form.get("method"),
"created_date": request.form.get("created_date"),
"created_by": session["user"]
}
mongo.db.recipes.update({"_id": ObjectId(recipe_id)}, edit_recipe)
flash("Recipe Successfully Updated!")
return redirect(url_for('profile', username=session['user']))
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# Countries Variable - list of Countries
countries = COUNTRIES
return render_template(
"/recipes/edit_recipe.html", recipe=recipe, countries=countries)
# Delete recipe from Mongodb by targeting ObjectId
@app.route("/delete_recipe/<recipe_id>")
def delete_recipe(recipe_id):
mongo.db.recipes.remove({"_id": ObjectId(recipe_id)})
flash("Recipe Successfully Deleted")
return redirect(url_for('profile', username=session['user']))
# Error Handler - 403
@app.errorhandler(403)
def page_forbidden(e):
return render_template("/error_handlers/403.html"), 403
# Error Handler - 404
@app.errorhandler(404)
def page_not_found(e):
return render_template("/error_handlers/404.html"), 404
# Error Handler - 500
@app.errorhandler(500)
def page_server_error(e):
return render_template("/error_handlers/500.html"), 500
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
#####################################################################################
#
# software: Python 2.7
# file: constellation2x2.py, meaning that digital step is 2
# author: Aleksandar Vukovic
# mail: va183034m@student.etf.bg.ac.rs
#
#####################################################################################
import csv # for importing data
import os # for pwd operation
import math # for log10() function
# for plotting TODO:
# import numpy as np
# import matplotlib.pyplot as plt
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "constellation2x2.csv"
abs_file_path = os.path.join(script_dir, rel_path)
# print abs_file_path
# input information from Cadence Virtuoso
SIMULATION_STEP = 2 # step through the digital control
DIGITAL_CTRL = 256 # total digital control of the I and Q path
ERROR_dB = 0.5 # magnitude error allowed in dB
NO_BITS_PRECISION = 6 # bits of precision to test
NO_POINTS = DIGITAL_CTRL**2/SIMULATION_STEP**2
RAW_MATRIX_WIDTH = NO_POINTS + 8
RAW_MATRIX_HEIGHT = 7
TOTAL_DEG = 360.0
# point in the polar coordinate system
class Point:
def __init__(self, I_CTRL, Q_CTRL, phase_deg, magnitude): # constructor
self.I_CTRL = I_CTRL
self.Q_CTRL = Q_CTRL
self.phase_deg = phase_deg
self.magnitude = magnitude
self.mag_dB = 20*math.log10(magnitude)
def print_elements(self): # print the data
print("Magnitude: " + str(self.magnitude) + " and in dB: " + str(self.mag_dB) + "\nPhase in degrees:" + str(self.phase_deg) + "\nI_CTRL:" + str(self.I_CTRL) + " & Q_CTRL:" + str(self.Q_CTRL) + "\n" ) ;
def generate_list_phase_degs(no_bits): # generate the list of ideal phase degrees for the phase shifter
no_phase_points = 2**no_bits
deg_step = TOTAL_DEG/no_phase_points
phase_list_deg = []
cur_phase = -180.0
for i in range(no_phase_points+1):
phase_list_deg.append(cur_phase)
cur_phase = cur_phase + deg_step
return phase_list_deg
# empty list of points
list_of_points = []
# how to create and populate a list
# lists = [0 for x in range(4)]
# how to create and [p]
# input raw matrix
raw_matrix = [[0 for x in range(RAW_MATRIX_HEIGHT)] for y in range(RAW_MATRIX_WIDTH)]
i = 0;
# HARDCODED with open('C:\\Users\\aleksandarv\\Documents\\Python_scripts\\constellation8x8.csv','rt') as f:
with open(abs_file_path,'rt') as f:
data = csv.reader(f)
for row in data:
raw_matrix[i] = row
i = i + 1
# print i
for i in range(NO_POINTS):
list_of_points.append(Point(int(raw_matrix[1][i + RAW_MATRIX_WIDTH - NO_POINTS]), int(raw_matrix[2][i + RAW_MATRIX_WIDTH - NO_POINTS]), float(raw_matrix[5][i + RAW_MATRIX_WIDTH - NO_POINTS]), float(raw_matrix[6][i + RAW_MATRIX_WIDTH - NO_POINTS])));
# get the average total gain of phase shifter
sum_of_mag_dB = 0;
for i in range(NO_POINTS):
sum_of_mag_dB = sum_of_mag_dB + list_of_points[i].mag_dB
average_mag_dB = sum_of_mag_dB/NO_POINTS;
# extract the phase degrees into a list witch magnitude is close to the average, not important to be near the average
count_points = 0
phase_deg_list = []
for i in range(NO_POINTS):
if list_of_points[i].mag_dB > average_mag_dB - ERROR_dB/2 and list_of_points[i].mag_dB < average_mag_dB + ERROR_dB/2:
count_points = count_points + 1
phase_deg_list.append(list_of_points[i].phase_deg)
# sort the list for no real reason
phase_deg_list.sort()
# print raw_matrix
# print phase_deg_list
max_diff = 0;
# print len(phase_deg_list)
# show the steps between the phase points
# for i in range(len(phase_deg_list)-1):
# print phase_deg_list[i+1] - phase_deg_list[i];
print "List of discrete ideal phase shifts (phase deg): "
print generate_list_phase_degs(NO_BITS_PRECISION);
list_of_ideal_points = generate_list_phase_degs(NO_BITS_PRECISION);
# calculate minimum derivation for every ideal phase from the phase shifter
min_dev_list = []
for i in range(len(list_of_ideal_points)):
min_dev = 180;
for j in range(len(phase_deg_list)):
if abs(list_of_ideal_points[i] - phase_deg_list[j]) < min_dev:
min_dev = abs(list_of_ideal_points[i] - phase_deg_list[j])
min_dev_list.append(round(min_dev,2))
print "List of errors (phase deg):"
print min_dev_list
# and among them maximum deviation is:
print "Maximum error (phase deg): " + str(max(min_dev_list))
rms_sum = 0;
for i in range(len(min_dev_list)):
rms_sum = rms_sum + min_dev_list[i]**2
min_dev_rms = math.sqrt(rms_sum/len(min_dev_list));
print "RMS error (phase deg): " + str(min_dev_rms) |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import truncatewords_html
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from autoslug import AutoSlugField
from taggit.managers import TaggableManager
def upload_location(instance, filename):
return "%s/%s % (instance.category, filename)"
class Category(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
class Meta:
verbose_name = ('Category')
verbose_name_plural = ('Categories')
ordering = ('title',)
def __str__(self):
return str(self.title)
class Post(models.Model):
STATUS_CHOICES = (
('d', 'Draft'),
('p', 'Published'),
)
title = models.CharField(max_length=200)
slug = AutoSlugField(populate_from ='title')
author = models.ForeignKey(User, blank=True, null=True)
author_alias = models.CharField(max_length=100, blank=True, null=True, help_text='Ex me ipsa (Out of myself)')
tease = models.TextField(blank=True)
body = models.TextField()
image = models.ImageField(upload_to=upload_location,
blank=True, null=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
media_url = models.URLField(blank=True, null=True)
reference = models.TextField(blank=True)
reference_url = models.URLField(blank=True, null=True)
visits = models.IntegerField(default=0)
status = models.CharField(max_length=1, choices=STATUS_CHOICES, default='d')
allow_comments = models.BooleanField(default=False)
publish = models.DateTimeField(default=now)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
categories = models.ManyToManyField(Category, blank=True)
tags = TaggableManager(blank=True)
meta_description = models.CharField(max_length=150, blank=True, null=True)
class Meta:
verbose_name = ('Post')
verbose_name_plural = ('Posts')
ordering = ('-publish',)
get_latest_by = 'publish'
def __str__(self):
return str(self.title)
def save(self, *args, **kwargs):
super(Post, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("blog:post", kwargs={"slug": self.slug})
def display_categories(self):
return ', '.join([ category.title for category in self.categories.all() ])
display_categories.short_description = "Categories"
def display_tags(self):
return ', '.join([ tag.name for tag in self.tags.all() ])
display_tags.short_description = "Tags"
class BlogRoll(models.Model):
"""Other blogs you follow."""
name = models.CharField(max_length=100)
url = models.URLField()
sort_order = models.PositiveIntegerField(default=0)
description = models.TextField(max_length=500, blank=True)
relationship = models.CharField(max_length=200, blank=True)
class Meta:
ordering = ('sort_order', 'name',)
verbose_name = ('blog roll')
verbose_name_plural = ('blog roll')
def __str__(self):
return self.name
def get_absolute_url(self):
return self.url
|
from django.conf.urls import re_path
from djoser import views as djoser_views
from rest_framework_jwt import views as jwt_views
from user import views
urlpatterns = [
# Views are defined in Djoser, but we're assigning custom paths.
re_path(r'^view/$', djoser_views.UserView.as_view(), name='user-view'),
re_path(r'^delete/$', djoser_views.UserDeleteView.as_view(), name='user-delete'),
re_path(r'^create/$', djoser_views.UserCreateView.as_view(), name='user-create'),
re_path(r'^password/$', djoser_views.SetPasswordView.as_view(), name='user-change-password'),
# Views are defined in Rest Framework JWT, but we're assigning custom paths.
re_path(r'^login/$', jwt_views.ObtainJSONWebToken.as_view(), name='user-login'),
re_path(r'^login/refresh/$', jwt_views.RefreshJSONWebToken.as_view(), name='user-login-refresh'),
re_path(r'^logout/all/$', views.UserLogoutAllView.as_view(), name='user-logout-all'),
re_path(r'^details/(?P<user_id>\d+)/', views.UserRetrieveAPIView.as_view(), name='user-details'),
re_path(r'^change/', views.UserChangeInfoAPIView.as_view(), name='user-update')
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 15:40:59 2021
@author: Gustavo
@mail: gustavogodoy85@gmail.com
"""
# =============================================================================
# costo_camion.py enumerate
# =============================================================================
import csv
def costo_camion(ruta_archivo):
f = open(ruta_archivo)
prod = 0.0
rows = csv.reader(f)
header = next(f) #aqui me sirvio xq use la funcion de next para saltearme una fila
#pero deberia ser next(rows)
for i, row in enumerate(rows, start=1):
try:
prod += int(row[1])*float(row[2])
except ValueError:
print(f'Fila {i}: no pude interpretar: {row}')
return prod
#costo = costo_camion('../Data/camion.csv')
costo = costo_camion('../Data/missing.csv')
print('Costo Total: ', costo) |
__all__ = [
"__version__",
"Endpoint", "Entry", "Fetcher",
"Filter", "RequiresPythonFilter", "VersionFilter",
"FlatHTMLRepository", "LocalDirectoryRepository", "SimpleRepository",
"guess_encoding", "match_egg_info_version",
]
from .endpoints import Endpoint
from .entries import Entry
from .fetchers import Fetcher
from .filters import Filter, RequiresPythonFilter, VersionFilter
from .repositories import (
FlatHTMLRepository, LocalDirectoryRepository, SimpleRepository,
)
from .utils import guess_encoding, match_egg_info_version
__version__ = "0.3.0d1"
|
import uuid, subprocess, os, shutil, json, requests, time
from datetime import datetime
import portality.models as models
from portality.core import app
from xml.etree import ElementTree as ET
from lxml import etree
class callers(object):
def __init__(self,scraperdir=False,storagedir=False,speciesdir=False,speciesoutput=False):
route = os.path.dirname(os.path.abspath(__file__)).replace('/portality','/')
if scraperdir:
self.scraperdir = scraperdir
else:
self.scraperdir = route.replace('/site','') + app.config['SCRAPER_DIRECTORY']
if storagedir:
self.storagedir = storagedir
else:
self.storagedir = route + app.config['STORAGE_DIRECTORY']
if not os.path.exists(self.storagedir):
os.makedirs(self.storagedir)
if speciesdir:
self.speciesdir = speciesdir
else:
self.speciesdir = route + app.config['SPECIES_DIRECTORY']
if not os.path.exists(self.speciesdir):
os.makedirs(self.speciesdir)
if speciesoutput:
self.speciesoutput = speciesoutput
else:
self.speciesoutput = route + app.config['SPECIES_OUTPUT']
if not os.path.exists(self.speciesoutput):
os.makedirs(self.speciesoutput)
@property
def scrapers(self):
try:
return [ f.replace('.json','') for f in os.listdir(self.scraperdir) if (os.path.isfile(os.path.join(self.scraperdir,f)) and not f.startswith('.')) ]
except:
return ["check the route to the scrapers folder!"]
def quickscrape(self,scraper=False,urls=[],update=False):
# TODO: there should be a check to see if this is already in the catalogue
# and if the files are already extracted
# and if they have already been processed
# then some sort of concept of when they are worth refreshing - if ever?
# the publication should not change except as a re-print so would get picked up in a new cycle
# TODO: add some sanitation of user input here
if len(urls) == 0 or ';' in scraper:
return {"error": "You need to provide some URLs"}
output = []
for url in urls:
# have a look and see if this url already exists in the catalogue
check = models.Catalogue.query(q='url.exact:"' + url + '"')
if check.get('hits',{}).get('total',0) > 0 and not update:
res = check['hits']['hits'][0]['_source']
else:
res = self._process(url,scraper)
print res
if 'errors' in res:
return res
else:
# look for duplicates
f = None
if 'doi' in res:
check = models.Catalogue.query(q='doi.exact:"' + res['doi'] + '"')
if check.get('hits',{}).get('total',0) > 0:
f = models.Catalogue.pull(check['hits']['hits'][0]['_source']['id'])
if 'title' in res and f is None:
check = models.Catalogue.query(q='title.exact:"' + res['title'] + '"')
if check.get('hits',{}).get('total',0) > 0:
f = models.Catalogue.pull(check['hits']['hits'][0]['_source']['id'])
# send the metadata to the catalogue API
if f is not None:
nres = res['id']
res['id'] = f.id
# TODO: move the extracted content files to proper storage
for fl in os.listdir(self.storagedir + '/' + nres):
try:
shutil.copy(os.path.join(self.storagedir + '/' + nres + '/', fl), self.storagedir + '/' + f.id)
except:
pass
shutil.rmtree(self.storagedir + '/' + nres)
else:
f = models.Catalogue()
for k,v in res.items():
if (update or k not in f.data) and k not in ['submit','created_date']:
f.data[k] = v
f.save()
print f.id
output.append({"metadata":res,"id":res['id'],"catalogued":"https://contentmine.org/api/catalogue/" + res['id']})
return output
def _process(self,url,scraper):
print url, scraper
# make an ident for this proces and create a dir to put the output
d = self.storagedir
ident = uuid.uuid4().hex
outputdirectory = d + ident
if not os.path.exists(outputdirectory): os.makedirs(outputdirectory)
# look for quickscrape
qs = app.config.get('QUICKSCRAPE_COMMAND','/usr/bin/quickscrape')
if not os.path.exists(qs): qs = '/usr/bin/quickscrape'
if not os.path.exists(qs): qs = '/usr/local/bin/quickscrape'
if not os.path.exists(qs): return {"errors":['cannot find quickscrape']}
# run quickscrape with provided params
co = [
qs,
'--output',
outputdirectory,
'--outformat',
'bibjson'
]
if scraper:
co.append('--scraper')
co.append(self.scraperdir + scraper.replace('.json','') + '.json')
else:
co.append('--scraperdir')
co.append(self.scraperdir)
co.append('--url')
co.append(url)
p = subprocess.Popen(co, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print 'quickscrape error ', err
return {"errors": err}
# find and read the metadata file
try:
slug = url.replace('://','_').replace('/','_').replace(':','')
b = json.load(open(outputdirectory + '/' + slug + '/bib.json','r'))
b["output"] = "http://contentmine.org/static/scraping/" + slug + '/' + ident
b['slug'] = slug
b["id"] = ident
# TODO: move the extracted content files to proper storage
for fl in os.listdir(outputdirectory + '/' + slug):
shutil.copy(os.path.join(outputdirectory + '/' + slug, fl), outputdirectory)
shutil.rmtree(outputdirectory + '/' + slug)
# return the result
return b
except Exception, e:
return {"errors": [str(e)]}
def ami(self, cmd='species', input_file_location=False, ident=False, filetype='xml'):
if not input_file_location and not ident:
return {"errors": "You need to provide an input file or a contentmine catalogue id"}
# make an ident for this proces and create a dir
d = self.speciesdir
if not ident:
ident = uuid.uuid4().hex
outputdirectory = d + ident
if not os.path.exists(outputdirectory): os.makedirs(outputdirectory)
# make a result object to populate
result = {
"output": "http://contentmine.org/" + app.config['SPECIES_DIRECTORY'].replace('portality/','') + ident
}
# TODO: if input file is a web address, get it. If a file address, get it from local storage
if input_file_location:
infile = input_file_location
else:
try:
# TODO: there should be a check for this folder existence
infile = self.storagedir + ident + '/fulltext.' + filetype
except:
# TODO: if the folder does not exist check the catalogue, maybe return more useful info or re-run quickscrape
return {"errors": "The provided contentmine catalogue ID no longer matches any stored files to process"}
# run code with provided params
co = [
'ami-' + cmd,
'-i',
infile,
'-e',
filetype
]
p = subprocess.Popen(co, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
return {"errors": err}
# find and read the output file
outputfile = self.speciesoutput + 'fulltext.xml/results.xml'
#result['raw'] = open(outputfile,'r').read()
try:
result['facts'] = self._ami_species_xml_to_json(outputfile)
for fact in result['facts']:
# send the fact to the fact api
fact['source'] = ident
f = models.Fact()
f.data = fact
f.save()
except Exception, e:
result["errors"] = [str(e)]
# then tidy up by removing the ident directory
#shutil.rmtree(outputdirectory)
# return the result object
return result
def _ami_species_xml_to_json(self,infile):
results = []
tree = ET.parse(infile).getroot()
elems = tree.find('results')[1:]
for sub in elems:
part = sub.find('eic')
doc = {
'retrieved_by': 'ami-species',
'retrieved_date': datetime.now().strftime("%Y-%m-%d %H%M"),
'eic': part.get('xpath')
}
doc["pre"] = part.find("pre").text
doc["fact"] = part.find("value").text
doc["post"] = part.find("post").text
results.append(doc)
return results
def regex(self, urls=[], regexfile=[], tags=[], getkeywords=False):
if not isinstance(urls,list): urls = urls.split(',')
if not isinstance(tags,list): tags = tags.split(',')
if not isinstance(regexfile,list): regexfile = regexfile.split(',')
for url in urls:
for regex in regexfile:
url = url.strip().replace('\n','')
if not regex.startswith('http'): regex = '/opt/contentmine/src/site/portality/ami-regexes/' + regex + '.xml'
co = [
'ami-regex',
'-i',
url,
'-g',
regex
]
p = subprocess.Popen(co, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
return {"errors": err}
else:
# find and read the output file
outputfile = '/opt/contentmine/src/site/target/null.xml/results.xml'
ns = etree.FunctionNamespace("http://www.xml-cml.org/ami")
ns.prefix = "zf"
tree = etree.parse(outputfile)
hits = tree.xpath('//zf:hit')
for hit in hits:
doc = {
'tags': tags,
}
doc["pre"] = hit.get("pre")
doc["fact"] = hit.get("word")
doc["post"] = hit.get("post")
doc['source'] = url
if getkeywords:
doc['keywords'] = requests.get('http://cottagelabs.com/parser?blurb="' + doc['pre'] + ' ' + doc['fact'] + ' ' + doc['post'] + '"').json()
time.sleep(0.05)
f = models.Fact()
f.data = doc
f.save()
return {"processing": "please check the facts API for results"}
def norma(self, url, xsl='/opt/contentmine/src/norma/src/main/resources/org/xmlcml/norma/pubstyle/nlm/toHtml.xsl', output='/opt/contentmine/src/norma/target/normatest'):
co = [
'norma',
'-i',
url,
'-x',
xsl,
'-o',
output
]
p = subprocess.Popen(co, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
return {"errors": err}
else:
return {"output": output }
|
import redis
class Base(object):
def __init__(self):
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
class StringTest(object):
def __init__(self):
# redis.Redis()兼容老版本,redis.StrictRedis()不考虑兼容性
# self.r = redis.Redis(host='localhost', port=6379, db=0)
self.r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
def test_set(self):
''' set -- 设置值 '''
rest = self.r.set('user2', 'amy')
print(rest)
return rest
def test_get(self):
'''get -- 获取值'''
rest = self.r.get('user2')
print(rest)
return rest
def test_mset(self):
''' mset -- 设置多个键值对 '''
d = {
'user3': 'Bob',
'user4': 'Bobx'
}
rest = self.r.mset(d)
print(rest)
return rest
def test_mget(self):
''' mset -- 设置多个键值对 '''
d = ['user3','user4']
rest = self.r.mget(d)
print(rest)
return rest
def test_del(self):
''' del删除键值 '''
rest = self.r.delete('user3')
print(rest)
def test_push(self):
''' lpush/rpush -- 从左/右插入数据 '''
t = ['Amy', 'Jhon']
# 如果不加*则会把两个元素当做整体存入
rest = self.r.lpush('l_eat3', *t)
print(rest)
rest = self.r.lrange('l_eat3', 0, -1)
print(rest)
def test_pop(self):
''' lpop/rpop 移除最左/右边的元素并返回值'''
rest = self.r.lpop('l_eat3')
print(rest)
rest = self.r.lrange('l_eat3', 0, -1)
print(rest)
class SetTest(Base):
def test_sadd(self):
''' sadd --添加元素 '''
l = ['cat', 'dog', 'monkey']
# rest = self.r.sadd('zoo2', l)
rest = self.r.sadd('zoo2', *l)
print(rest)
rest = self.r.smembers('zoo2')
print(rest)
def test_srem(self):
''' srem -- 删除元素 '''
rest = self.r.srem('zoo2', 'monkey')
print(rest)
rest = self.r.smembers('zoo2')
print(rest)
def test_sinter(self):
''' sinter --返回元素的交集 '''
rest = self.r.sinter('zoo2', 'zoo1')
print(rest)
class HashTest(Base):
def hset_test(self):
''' hset设置新闻内容
self.r.hset(1,'title','朝鲜特种部队视频公布展示士兵身体素质与意志')
self.r.hset(1,'content','content01')
self.r.hset(1,'img_url','/static/img/news/01.png')
self.r.hset(1,'is_valid','true')
self.r.hset(1,'news_type','推荐')
self.r.hset(2,'title','男子长得像\"祁同伟\"挨打 打人者:为何加害检察官')
self.r.hset(2,'content','因与热门电视剧中人物长相相近,男子竟然招来一顿拳打脚踢。4月19日,打人男子周某被抓获。半个月前,酒后的周某看到KTV里有一名男子很像电视剧中的反派。二话不说,周某冲上去就问你为什么要加害检察官?男子莫名其妙,回了一句神经病。周某一听气不打一处来,对着男子就是一顿拳打脚踢,嘴里面还念叨着,“叫你加害检察官,我打死你!”随后,周某趁机逃走。受伤男子立即报警,周某被上海警方上网通缉')
self.r.hset(2,'img_url','/static/img/news/02.png')
self.r.hset(2,'is_valid','true')
self.r.hset(2,'news_type','百家')
'''
''' mset/mget -- 设置/获取散列值'''
rest = self.r.hset('stu:002','name','tom')
print(rest)
rest = self.r.hexists('stu:002','name')
print(rest)
rest = self.r.hget('stu:002', 'name')
print(rest)
def mset_test(self):
''' 获取新闻的数据 '''
# rest = self.r.hget(1,'title')
# print(rest.decode('utf-8'))
# rest = self.r.hget(1, 'news_type')
# print(rest.decode('utf-8'))
# rest = self.r.hget(3,'title')
# print(rest.decode('utf-8'))
# rest = self.r.hget(3, 'news_type')
# print(rest.decode('utf-8'))
# mset和hkeys
m = {
'name':'lily',
'age':18,
'grade':90
}
rest = self.r.hmset('stu:003', m)
print(rest)
rest = self.r.hkeys('stu:003')
print(rest)
rest = self.r.hvals('stu:003')
print(rest)
def test_hgetall(self):
data = self.r.hgetall('news:3')
print(data['title'].decode('utf-8'))
print(data['content'].decode('utf-8'))
def main():
# st = StringTest()
# st.test_set()
# st.test_get()
# st.test_mset()
# st.test_mget()
# st.test_del()
# st.test_push()
# st.test_pop()
# set_test = SetTest()
# set_test.test_sadd()
# set_test.test_srem()
# set_test.test_sinter()
ht = HashTest()
# ht.mset_test()
# ht.hset_test()
# ht.hget_test()
ht.test_hgetall()
if __name__ == "__main__":
main() |
from operator import attrgetter
from six.moves import map
from portia_api.jsonapi.serializers import JsonApiSerializer
from portia_orm.base import AUTO_PK
from portia_orm.exceptions import ProtectedError
from portia_orm.models import (Project, Schema, Field, Extractor, Spider,
Sample, Item, Annotation, RenderedBody,
OriginalBody)
from portia_api.utils.projects import unique_name
from portia_api.utils.annotations import choose_field_type
def clear_auto_created(instance):
if instance.auto_created:
instance.auto_created = False
instance.save(only=('auto_created',))
class SpiderListSerializer(JsonApiSerializer):
class Meta:
model = Spider
url = '/api/projects/{self.project.id}/spiders/{self.id}'
links = {
'project': {
'related': '/api/projects/{self.project.id}',
},
}
class ProjectSerializer(JsonApiSerializer):
class Meta:
model = Project
url = '/api/projects/{self.id}'
links = {
'spiders': {
'related': '/api/projects/{self.id}/spiders',
'serializer': SpiderListSerializer,
},
'schemas': {
'related': '/api/projects/{self.id}/schemas',
},
'extractors': {
'related': '/api/projects/{self.id}/extractors',
},
}
class SchemaSerializer(JsonApiSerializer):
class Meta:
model = Schema
url = '/api/projects/{self.project.id}/schemas/{self.id}'
links = {
'project': {
'related': '/api/projects/{self.project.id}',
},
'fields': {
'related': '/api/projects/{self.project.id}/schemas'
'/{self.id}/fields',
},
}
default_kwargs = {
'include_data': [
'fields',
],
'exclude_map': {
'schemas': [
'auto-created',
'items',
]
}
}
def update(self, instance, validated_data):
instance = super(SchemaSerializer, self).update(
instance, validated_data)
clear_auto_created(instance)
return instance
class FieldSerializer(JsonApiSerializer):
class Meta:
model = Field
url = ('/api/projects/{self.schema.project.id}/schemas'
'/{self.schema.id}/fields/{self.id}')
links = {
'schema': {
'related': '/api/projects/{self.schema.project.id}/schemas'
'/{self.schema.id}',
},
}
default_kwargs = {
'exclude_map': {
'fields': [
'auto-created',
'annotations',
]
}
}
def create(self, validated_data):
field = super(FieldSerializer, self).create(validated_data)
clear_auto_created(field.schema)
return field
def update(self, instance, validated_data):
instance = super(FieldSerializer, self).update(instance, validated_data)
clear_auto_created(instance)
clear_auto_created(instance.schema)
return instance
def delete(self):
clear_auto_created(self.instance.schema)
super(FieldSerializer, self).delete()
class ExtractorSerializer(JsonApiSerializer):
class Meta:
model = Extractor
url = '/api/projects/{self.project.id}/extractors/{self.id}'
links = {
'project': {
'related': '/api/projects/{self.project.id}',
},
}
default_kwargs = {
'exclude_map': {
'extractors': [
'annotations',
]
}
}
class SpiderSerializer(JsonApiSerializer):
class Meta:
model = Spider
url = '/api/projects/{self.project.id}/spiders/{self.id}'
links = {
'project': {
'related': '/api/projects/{self.project.id}',
},
'samples': {
'related': '/api/projects/{self.project.id}/spiders/{self.id}'
'/samples',
},
}
default_kwargs = {
'exclude_map': {
'spiders': [
'samples',
]
}
}
def delete(self):
project = self.instance.project
project.schemas # preload schemas and fields
super(SpiderSerializer, self).delete()
class SampleSerializer(JsonApiSerializer):
class Meta:
model = Sample
url = ('/api/projects/{self.spider.project.id}/spiders'
'/{self.spider.id}/samples/{self.id}')
links = {
'spider': {
'related': '/api/projects/{self.spider.project.id}/spiders'
'/{self.spider.id}',
},
'items': {
'related': '/api/projects/{self.spider.project.id}/spiders'
'/{self.spider.id}/samples/{self.id}/items'
'?filter[parent]=null',
},
}
default_kwargs = {
'exclude_map': {
'samples': [
'page-id',
'page-type',
'original-body',
'annotated-body',
]
}
}
def create(self, validated_data):
sample = super(SampleSerializer, self).create(validated_data)
project = sample.spider.project
schemas = project.schemas
schema = next((s for s in schemas if s.default), None)
if schema is None:
schema_names = map(attrgetter('name'), schemas)
schema_name = unique_name(sample.name, schema_names)
schema = Schema(self.storage, id=AUTO_PK, name=schema_name,
project=project, auto_created=True)
schema.save()
item = Item(self.storage, id=AUTO_PK, sample=sample, schema=schema)
item.save()
return sample
def update(self, instance, validated_data):
sample = super(SampleSerializer, self).update(instance, validated_data)
for schema in sample.spider.project.schemas:
schema.save()
return sample
class ItemSerializer(JsonApiSerializer):
class Meta:
model = Item
url = ('/api/projects/{self.owner_sample.spider.project.id}/spiders'
'/{self.owner_sample.spider.id}/samples/{self.owner_sample.id}'
'/items/{self.id}')
links = {
'sample': {
'related': '/api/projects/{self.sample.spider.project.id}'
'/spiders/{self.sample.spider.id}/samples'
'/{self.sample.id}',
},
'parent': {
'related': '/api/projects/{self.owner_sample.spider.project.id}'
'/spiders/{self.owner_sample.spider.id}/samples'
'/{self.owner_sample.id}/items/{self.parent.id}',
},
'schema': {
'related': '/api/projects/{self.owner_sample.spider.project.id}'
'/schemas/{self.schema.id}',
},
'annotations': {
'related': '/api/projects/{self.owner_sample.spider.project.id}'
'/spiders/{self.owner_sample.spider.id}/samples'
'/{self.owner_sample.id}/annotations'
'?filter[parent]={self.id}',
},
}
def create(self, validated_data):
item = super(ItemSerializer, self).create(validated_data)
if item.schema is None:
sample = item.owner_sample
project = sample.spider.project
schema_names = map(attrgetter('name'), project.schemas)
schema_name = unique_name(sample.name, schema_names,
initial_suffix=1)
schema = Schema(self.storage, id=AUTO_PK, name=schema_name,
project=project, auto_created=True)
schema.items.add(item)
schema.save()
if item.parent and item.name is None:
sample = item.owner_sample
item_names = map(attrgetter('name'), sample.ordered_items)
item.name = unique_name('subitem', item_names, initial_suffix=1)
item.save(only=('name',))
return item
def update(self, instance, validated_data):
current_schema = instance.schema
instance = super(ItemSerializer, self).update(instance, validated_data)
new_schema = instance.schema
if new_schema != current_schema:
field_map = {field.name: field for field in new_schema.fields}
for annotation in instance.annotations:
current_field = annotation.field
if current_field.name in field_map:
new_field = field_map[current_field.name]
clear_auto_created(new_field)
else:
new_field = Field(self.storage, id=AUTO_PK,
name=current_field.name,
type=current_field.type,
schema=new_schema,
auto_created=True)
field_map[new_field.name] = new_field
new_field.save()
annotation.field = new_field
annotation.save(only=('field',))
if current_field.auto_created:
self.deleted.extend(current_field.delete())
if current_schema.auto_created:
self.deleted.extend(current_schema.delete())
clear_auto_created(new_schema)
return instance
def delete(self):
instance = self.instance
sample = instance.owner_sample
items = sample.items
if len(items) == 1 and items[0] == instance:
raise ProtectedError(
u"Cannot delete item {} because it is the only item in the "
u"sample {}".format(instance, sample))
super(ItemSerializer, self).delete()
class AnnotationSerializer(JsonApiSerializer):
class Meta:
model = Annotation
url = ('/api/projects/{self.owner_sample.spider.project.id}/spiders'
'/{self.owner_sample.spider.id}/samples/{self.owner_sample.id}'
'/annotations/{self.id}')
links = {
'parent': {
'related': '/api/projects'
'/{self.owner_sample.spider.project.id}/spiders'
'/{self.owner_sample.spider.id}/samples'
'/{self.owner_sample.id}/items/{self.parent.id}',
},
'field': {
'related': '/api/projects'
'/{self.owner_sample.spider.project.id}/schemas'
'/{self.parent.schema.id}/fields/{self.field.id}',
},
}
def create(self, validated_data):
annotation = super(AnnotationSerializer, self).create(validated_data)
if annotation.field is None:
project = annotation.owner_sample.spider.project
project.schemas # preload schemas and fields
item = annotation.parent
schema = item.schema
field_names = map(attrgetter('name'), schema.fields)
field_name = unique_name('field', field_names, initial_suffix=1)
field = Field(self.storage, id=AUTO_PK, name=field_name,
type=choose_field_type(annotation), schema=schema,
auto_created=True)
field.annotations.add(annotation)
field.save()
return annotation
def update(self, instance, validated_data):
current_field = instance.field
instance = super(AnnotationSerializer, self).update(
instance, validated_data)
new_field = instance.field
if new_field != current_field:
if current_field.auto_created:
self.deleted.extend(current_field.delete())
clear_auto_created(new_field)
return instance
class RenderedBodySerializer(JsonApiSerializer):
class Meta:
model = RenderedBody
url = ('/api/projects/{self.sample.spider.project.id}/'
'spiders/{self.sample.spider.id}/samples/'
'{self.sample.id}/rendered_body')
links = {
'sample': {
'related': ('/api/projects/{self.sample.spider.project.id}/'
'spiders/{self.sample.spider.id}/samples/'
'{self.sample.id}'),
},
}
class OriginalBodySerializer(JsonApiSerializer):
class Meta:
model = OriginalBody
url = ('/api/projects/{self.sample.spider.project.id}/'
'spiders/{self.sample.spider.id}/samples/'
'{self.sample.id}/original_body')
links = {
'sample': {
'related': ('/api/projects/{self.sample.spider.project.id}/'
'spiders/{self.sample.spider.id}/samples/'
'{self.sample.id}'),
},
}
|
from game_engine.table import Table
import pytest
# Tests that table is made with two empty seats
# Also test that blind init
def test_init_two_users():
# Blinds are set to 10 and 20
table = Table(2, (10, 20))
assert table._small_blind == 10
assert table._big_blind == 20
assert len(table._seats) == 2
assert table._seats[1] is None
assert table._seats[2] is None
# User should be able to sit in empty seat
def test_sit_at_table_empty_seat():
table = Table(2, (10, 20))
# MOCK USER
user = 'MOCK USER'
seat = 1
return_value, return_msg = table.sit_at_table(user, seat)
assert table._seats[seat] == 'MOCK USER'
assert 'User seated at seat: ' + str(seat) == return_msg
assert return_value is True
# User should not be able to sit in occupied seat
def test_sit_at_table_occupied():
table = Table(2, (10, 20))
# MOCK USERS
user_1 = 'MOCK USER 1'
user_2 = 'MOCK USER 2'
seat = 1
table.sit_at_table(user_1, seat)
return_value, return_msg = table.sit_at_table(user_2, seat)
assert table._seats[seat] != user_2
assert 'Seat occupied, user not seated' == return_msg
assert return_value is False
# User should not be able to sit at seat that does not exist
def test_sit_at_table_not_exist():
table = Table(2, (10, 20))
# MOCK USER
user = 'MOCK USER'
seat = 5 # Random number that does not exist
return_value, return_msg = table.sit_at_table(user, seat)
# Checks if user is seated at seat 5 (should be KeyError)
with pytest.raises(KeyError):
assert table._seats[seat] != user
assert return_msg == 'Invalid seat, user not seated'
assert return_value is False
# If one user sits down the game should be pending
def test_sit_at_table_pending():
table = Table(2, (10, 20))
# MOCK USER
user = 'MOCK USER'
seat = 1
table.sit_at_table(user, seat)
assert table._state == 'PENDING'
# If one user sits down the game should be running
def test_sit_at_table_running():
table = Table(2, (10, 20))
# MOCK USER
user1 = 'MOCK USER 1'
user2 = 'MOCK USER 2'
table.sit_at_table(user1, 1)
table.sit_at_table(user2, 2)
assert table._state == 'RUNNING'
assert table._button == 1 or table._button == 2
def test_empty_seat():
table = Table(2, (10, 20))
# MOCK USER
user1 = 'MOCK USER 1'
user2 = 'MOCK USER 2'
table.sit_at_table(user1, 1)
table.sit_at_table(user2, 2)
table.empty_seat(1)
assert table._state == 'PENDING'
assert table._seats[1] is None
def test_move_button():
table = Table(9, (10, 20))
# MOCK USER
user1 = 'MOCK USER 1'
table.sit_at_table(user1, 3)
table.sit_at_table(user1, 7)
table._button = 3
table.move_button()
assert table._button == 7
def test_move_button_high():
table = Table(9, (10, 20))
# MOCK USER
user1 = 'MOCK USER 1'
table.sit_at_table(user1, 3)
table.sit_at_table(user1, 9)
table._button = 9
table.move_button()
assert table._button == 3
def test_get_button():
table = Table(9, (10, 20))
# MOCK USER
user1 = 'MOCK USER 1'
table.sit_at_table(user1, 3)
table.sit_at_table(user1, 9)
table._button = 3
table.move_button()
assert table.get_button() == 9
def test_update_blinds_and_get_blinds():
table = Table(9, (10, 20))
updated_blinds = (20, 40)
table.update_blinds(updated_blinds)
assert table.get_blinds() == updated_blinds
|
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
# Time :2020/5/14 13:00
# Author : zhoudong
# File : g_tool.py
"""
该文件放一些工具函数
"""
import numpy as np
import itertools
myfloat = np.float32
# 多元高斯组件,每个目标保存,方便计算
class GmphdComponent:
def __init__(self, weight, loc, cov):
"""
:param weight: 权值
:param loc: 期望向量
:param cov: 协方差矩阵
"""
self.weight = np.float32(weight)
# 使用 array
self.loc = np.array(loc, dtype=np.float32, ndmin=2)
self.cov = np.array(cov, dtype=np.float32, ndmin=2)
# 调整行和列
self.loc = np.reshape(self.loc, (np.size(self.loc), 1)) # 期望 列向量 n维
self.n = np.size(loc) # 状态期望维度
self.cov = np.reshape(self.cov, (self.n, self.n)) # 协方差矩阵 维度n * n
# 计算多元高斯分布的预先计算值
self.part1 = (2*np.pi) ** (-self.n*0.5)
self.part2 = np.linalg.det(self.cov) ** (-0.5)
self.invcov = np.linalg.inv(self.cov) # 逆矩阵
def dmvnorm(self, x):
"""
计算在x处的多元高斯分布概率
:param x: 状态向量
:return:
"""
x = np.array(x, dtype=myfloat)
dev = x - self.loc
part3 = np.exp(-0.5 * np.dot(np.dot(dev.T, self.invcov), dev))
return self.part1 * self.part2 * part3
def dmvnorm(loc, cov, x):
"""
不一定都是GmphdComponent ,因此单独写个函数计算概率
:param lov: 期望
:param cov: 协方差矩阵
:param x: 状态向量
:return:
"""
loc = np.array(loc, dtype=myfloat)
cov = np.array(cov, dtype=myfloat)
x = np.reshape(x, (2,1))
n = np.size(loc)
part1 = (2 * np.pi) ** (-n * 0.5)
#part1 = (2 * np.pi) ** (-0.5)
part2 = np.linalg.det(cov) ** (-0.5)
dev = x - loc
part3 = np.exp(-0.5 * np.dot(np.dot(dev.T, np.linalg.inv(cov)), dev))
return part1 * part2 * part3
# 阶乘
def factorial(n):
f = 1
for i in range(1, n + 1):
f *= i
return f
# 计算k 次发生的概率, 泊松分布
def poisspdf(k, lambda_c):
return np.power(np.e, -lambda_c)*np.power(lambda_c, k)/factorial(k)
# 初等对称函数
def delta(L, j):
if j==0:
y = 1
else:
tmp = list(itertools.combinations(L, j)) # 输出组合
tem_shape = np.shape(tmp)
temp = np.ones(tem_shape[0])
for i in range(tem_shape[0]):
for j in range(tem_shape[1]):
temp[i] = temp[i] * tmp[i][j]
y = sum(temp)
return y
a = np.array([[2, 2],[2,2]])
print(np.linalg.cholesky(a))
for i in range(4):
print(factorial(i)) |
"""
Given a 32-bit signed integer, reverse digits of an integer.
"""
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
string = str(x)
if x >= 0:
rstring = string[::-1]
else:
rstring = '-' + string[:0:-1]
finalnum = int(rstring)
if finalnum > 2 ** 31 - 1:
finalnum = 0
elif finalnum < -1 * 2 ** 31:
finalnum = 0
return finalnum
|
from StringIO import StringIO
from textwrap import dedent
from twitter.checkstyle.iterators import diff_lines
class Blob(object):
def __init__(self, blob):
self._blob = blob
self.hexsha = 'ignore me'
@property
def data_stream(self):
return StringIO(self._blob)
def make_blob(stmt):
return Blob(dedent('\n'.join(stmt.splitlines()[1:])))
def test_diff_lines():
blob_a = make_blob("""
001 herp derp
""")
assert list(diff_lines(blob_a, blob_a)) == []
blob_b = make_blob("""
001 derp herp
""")
assert list(diff_lines(blob_a, blob_b)) == [1]
blob_c = make_blob("""
001 herp derp
002 derp derp
""")
assert list(diff_lines(blob_a, blob_c)) == [2]
assert list(diff_lines(blob_c, blob_a)) == []
blob_d = make_blob("""
001
002
003
004
""")
blob_e = make_blob("""
001
004
""")
assert list(diff_lines(blob_d, blob_e)) == []
assert list(diff_lines(blob_e, blob_d)) == [2, 3]
blob_f = make_blob("""
001
002
003
004
""")
blob_g = make_blob("""
002
001
004
003
""")
assert list(diff_lines(blob_f, blob_g)) == [1, 3]
assert list(diff_lines(blob_g, blob_f)) == [1, 3]
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from webapp.models import Article, Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(read_only=True,
view_name='api_v1:user-detail')
class Meta:
model = get_user_model()
fields = ['id', 'url', 'username', 'first_name', 'last_name', 'email']
class ArticleSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(read_only=True,
view_name='api_v1:article-detail')
author_url = serializers.HyperlinkedRelatedField(read_only=True, source='author',
view_name='api_v1:user-detail')
author = UserSerializer(read_only=True)
tags_display = TagSerializer(many=True, read_only=True, source='tags')
class Meta:
model = Article
fields = ['id', 'url', 'title', 'text', 'author', 'author_url', 'status',
'created_at', 'updated_at', 'tags', 'tags_display']
read_only_fields = ('author',)
|
import time
def init():
""" initial state """
if not exists("1450617504600.png"):
exit()
click("1450617512741.png")
wait("1450617981095.png")
click("1450617981095.png")
def backHome():
""" back to city select activity """
while not exists("1450618092142.png"):
click("1450618178686.png")
wait("1450619546749.png", 6)
statusBar = find("1450621142544.png").getCenter().offset(0,-64)
hover(statusBar)
def lottery(item):
""" lottery """
if exists(item):
click(item)
try:
wait("1450618588432.png", 6)
except:
backHome()
return
for x in range(3):
doubleClick("1450618588432.png")
time.sleep(8)
doubleClick("1450618588432.png")
def loopCity():
while True:
for item in items:
lottery(item)
backHome()
if exists("1450619672356.png"):
return
else:
# slide up
#wheel(WHEEL_DOWN, 3)
statusBar = find("1450621142544.png").getCenter().offset(0,-64)
wheel(WHEEL_DOWN, 1)
dragDrop(statusBar, statusBar.offset(Location(10,-100)))
def changeCity(item):
if exists(Pattern("1450658612825.png").similar(0.80)):
click(Pattern("1450657885056.png").similar(0.80))
if exists(item):
click(item)
wait("1450618092142.png")
cityList = [
Pattern("1450658042078.png").similar(0.80),
Pattern("1450658049729.png").similar(0.80),
Pattern("1450658055885.png").similar(0.80),
Pattern("1450658060710.png").similar(0.80),
Pattern("1450658065534.png").similar(0.80),
Pattern("1450658070482.png").similar(0.80),
Pattern("1450658101190.png").similar(0.80),
Pattern("1450658108132.png").similar(0.80),
Pattern("1450658114163.png").similar(0.80),
Pattern("1450658119756.png").similar(0.80),
Pattern("1450658125427.png").similar(0.80),
Pattern("1450658130116.png").similar(0.80),
Pattern("1450658136808.png").similar(0.80),
]
items = [Pattern("1450661458586.png").similar(0.91), Pattern("1450926366175.png").similar(0.91)]
if __name__ == "__main__":
init()
backHome()
for city in cityList:
changeCity(city)
loopCity()
|
import os
import sys
import warnings
# _ROOT = os.path.abspath(os.path.dirname(__file__))
#
#
# def get_data_path(path):
# return os.path.join(_ROOT, 'data', path)
# Setup warnings to simpler one line warning
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
filename = os.path.join('pyrealm', os.path.basename(filename))
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
# # And provide a decorator to catch warnings in doctests
# # https://stackoverflow.com/questions/2418570/
# def stderr_to_stdout(func):
# def wrapper(*args):
# stderr_bak = sys.stderr
# sys.stderr = sys.stdout
# try:
# return func(*args)
# finally:
# sys.stderr = stderr_bak
# return wrapper |
#from uccal import Modules
from uccal import Students
#print Modules.addModule("big big big cal", "my big calendar")
#Modules.deleteModule("jeromakay.com_s4gob184n9742hvbvbo4d32t7g@group.calendar.google.com")
#Modules.updateModule("jeromakay.com_csjh4188v45j5pclojlt6784ck@group.calendar.google.com", "even way huge bigger cal", "caling it up")
print Students.getAllMembers() |
from flask_wtf import FlaskForm
from wtforms import (
BooleanField,
DateTimeField,
IntegerField,
PasswordField,
RadioField,
SelectField,
StringField,
TextAreaField,
ValidationError,
SubmitField
)
from wtforms.validators import InputRequired, Email, Length, Optional
class BackupFirstForm(FlaskForm):
source = StringField("Database Source", [Length(max=260), InputRequired()], render_kw={"placeholder": "Please enter your database file location (including file extension)"})
interval = IntegerField("Interval", [InputRequired()], render_kw={"placeholder": "Please select the interval type and enter the duration"})
interval_type = RadioField("Interval Type", choices=[("min", "Minute"), ("hr", "Hour"), ("d", "Day"), ("wk", "Week"), ("mth", "Month")], default="wk")
submit = SubmitField("Backup & Save Settings")
class BackupForm(FlaskForm):
source = StringField("Database Source", [Length(max=260), Optional()], render_kw={"placeholder": "Leave empty if no changes"})
interval = IntegerField("Interval", [Optional()], render_kw={"placeholder": "Leave empty if no changes"})
interval_type = RadioField("Interval Type", choices=[("min", "Minute"), ("hr", "Hour"), ("d", "Day"), ("wk", "Week"), ("mth", "Month")], default="wk")
manual = SubmitField("Manual Backup")
update = SubmitField("Backup & Update Settings")
|
import requests
from bs4 import BeautifulSoup as bs
import re
from urllib.request import urlopen
for pages in range(1,10):
url = "https://shop.adidas.co.kr/PF020201.action?command=LIST&ALL=ALL&S_CTGR_CD=01001001&CONR_CD=10&S_ORDER_BY=1&S_PAGECNT=100&PAGE_CUR={}&S_SIZE=&S_TECH=&S_COLOR=&S_COLOR2=&CATG_CHK=&CATG_CLK=&STEP_YN=N&S_QUICK_DLIVY_YN=&S_PRICE=&S_STATE1=&S_STATE2=&S_STATE3=&NFN_ST=Y".format(pages)
headers ={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"}
res = requests.get(url, headers=headers)
soup = bs(res.text, "lxml")
items = soup.find_all("div", attrs={"class":"inner"}) # dic 테그 안에 inner 클래인 것을 추출하기
for idx, item in enumerate(items): # items을 반복해서 inner 클래스를 가진 모든 div 테그 추출
name = item.find("div", attrs={"class":"info_title"}).get_text() # 추출한 데이터에서 title만 가져오기
price = item.find("div", attrs={"class":"sale"}) #세일한 가격만 가져오기
if price: #세일한 가격이 있으면 텍스트를 뽑아라
price = price.get_text()
else: # 세일한 가격 데이터가 없으면 info_price 클래스를 가진 div 에서 가격을 추출하라
price = item.find("div", attrs={"class":"info_price"}).get_text()
re_price = " ".join(price.split()) #추출한 가격에 쓸대 없이 빈공간이 많아서 replace 정규식을 써서 빈공간 제거
code = item.find("a") # 신발 제품 코드가 들어가 있는 테그 a 테그를 추출
codes = code['href'] # href 줄만 다시 추출
re_code = codes[20:26] # 거기서 코드만 있는 부문만 자르기
images = item.find("img")["src"] # 이미지 테그에 src이 있는 부분 추출
if images.startswith("//"): # 추출한 이미지 데이터 앞부분에 // 가있으면
images = "https:" + images # 앞에 https:// 를 붙여줘라 (붙여 주는 코드는 startswith)
img_res = requests.get(images)
with open('shoes_{}_{}.jpg'.format(pages, idx+1),"wb",) as f:
f.write(img_res.content)
print(name,re_price,re_code)
|
#欧拉函数的定义:小于或等于n的正整数中与n互质的个数
def Oula():
n=int(input("请输入一个数:"))
count=0
for b in range(1,n+1):
if(n%b==0):
continue
else:
count=count+1
print("求得欧拉结果φ(n)=",count)
Oula()
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User as DjangoUser
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views import View
from .forms import AddUserForm, LoginUserForm
from .models import User
class UserListView(View):
def dispatch(self, request, *args, **kwargs):
if not (request.user.is_staff or request.user.is_superuser):
messages.add_message(request, messages.WARNING, _('Sorry, you can\'t do that'))
return HttpResponseRedirect(reverse('news_api:mainpage'))
return super().dispatch(request, *args, **kwargs)
def get(self, request):
return render(
request,
template_name='user_list.html',
context={'all_users': User.objects.all().order_by('user__username')}
)
class UserCreateView(View):
def get(self, request):
return render(
request,
template_name='user_create.html',
context={'form': AddUserForm().as_p()}
)
def post(self, request):
form = AddUserForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
email = form.cleaned_data['email']
if DjangoUser.objects.filter(username=username).exists():
messages.add_message(request, messages.WARNING, _('User with this name already exists'))
return HttpResponseRedirect(reverse('users:user-create'))
django_user = DjangoUser.objects.create_user(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
email=email
)
User.objects.create(user=django_user)
messages.add_message(request, messages.INFO, _('User: {} created successfully').format(username))
return HttpResponseRedirect(reverse('users:user_list'))
messages.add_message(request, messages.ERROR, _('Form invalid'))
return HttpResponseRedirect(reverse('users:user-create'))
class UserDeleteView(View):
def dispatch(self, request, *args, **kwargs):
if not (request.user.is_staff or request.user.is_superuser):
messages.add_message(request, messages.WARNING, _('Sorry, you can\'t do that'))
return HttpResponseRedirect(reverse('news_api:mainpage'))
return super().dispatch(request, *args, **kwargs)
def get(self, request, user_id):
user = User.objects.get(id=user_id)
user.delete()
return HttpResponseRedirect(reverse('users:user-list'))
class LoginView(View):
def get(self, request):
if request.session.get('loggedUser') is None:
return render(
request,
template_name='user_login.html',
context={'form': LoginUserForm().as_p()}
)
del request.session['loggedUser']
return HttpResponseRedirect('/')
def post(self, request):
form = LoginUserForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
request.session['loggedUser'] = username
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect('/')
else:
messages.add_message(request, messages.ERROR, _('Wrong password'))
return HttpResponseRedirect(reverse('users:user-create'))
messages.add_message(request, messages.ERROR, _('Form invalid'))
return HttpResponseRedirect(reverse('users:user-create'))
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect('/')
|
def prime_digit(number): #defines the function with a parameter of a number
prime_numbers = []
if isinstance (number, int): #a condition where the value being input is an integer
if number>1: #this block sets a condition that if a number is greater than 1 for numbers in the range which begins with 2 it will be divided to find out whether it is a prime number
for num in range (2, number+1):
for i in range (2, num):
if (num%i==0):
break #stops the loop from continuing looping infinately
else:
prime_numbers.append(num) #the number is updated if it satisfies the condition
return prime_numbers #it gives output of the prime number achieved
else:
raise ValueError ('Please input a positive integer') #program raises an error if the input is not a positive number
else:
raise TypeError ('please input an integer') #program raises and error if the input typed is not an integer
|
# Generated by Django 2.1.2 on 2019-02-02 16:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('review', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='review',
name='user_name',
),
migrations.AddField(
model_name='review',
name='user_id',
field=models.IntegerField(default=0),
),
]
|
import re
gpid = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
idfa = re.compile('[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}')
ifile = open('tpr_inactive_maids.txt','r')
tenMB = 10*1000*1000
limit = int(tenMB/37) - 100
gpidindex = 0
idfaindex = 0
gpidcount = 0
idfacount = 0
gpidfile = open('output/validGPIDs-%i.txt' % gpidindex, 'w')
idfafile = open('output/validIDFAs-%i.txt' % idfaindex, 'w')
for line in ifile:
if gpid.match(line):
gpidfile.write(line)
gpidcount+=1
if gpidcount>limit:
gpidfile.close()
gpidindex+=1
gpidfile=open('output/validGPIDs-%i.txt' % gpidindex, 'w')
gpidcount=0
if idfa.match(line):
idfafile.write(line)
idfacount+=1
if idfacount>limit:
idfafile.close()
idfaindex+=1
idfafile=open('output/validIDFAs-%i.txt' % idfaindex, 'w')
idfacount=0
gpidfile.close()
idfafile.close()
|
import unittest
def zeroMatrix(M):
if not M or not M[0]:
return M
m, n = len(M), len(M[0])
zeroCol = [False] * n
zeroRow = [False] * m
for r in range(m):
for c in range(n):
if M[r][c] == 0:
zeroRow[r] = zeroCol[c] = True
for r in range(m):
for c in range(n):
if zeroRow[r] or zeroCol[c]:
M[r][c] = 0
return M
class Test(unittest.TestCase):
def test(self):
matrix = [
[1, 2, 3, 10 ,0],
[-1, 1, 2, 1, 3],
[-1, 0, 2, 1, 3],
]
expect = [
[0, 0, 0, 0 ,0],
[-1, 0, 2, 1, 0],
[0, 0, 0, 0, 0],
]
self.assertEqual(zeroMatrix(matrix), expect)
if __name__ == '__main__':
unittest.main()
|
from myhdl import *
class Add_shift_top(object):
def __init__(self):
DATA_WIDTH = 65536
ACTIVE_LOW = bool(0)
self.even_odd = Signal(bool(0))
self.fwd_inv = Signal(bool(0))
self.din_sam = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.dout_sam = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.din_left = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.dout_left = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.din_right = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.dout_right = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.din_res = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.dout_res = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))
self.we_res = Signal(bool(0))
self.addr_res = Signal(intbv(0)[8:])
self.we_sam = Signal(bool(0))
self.addr_sam = Signal(intbv(0)[8:])
self.we_left = Signal(bool(0))
self.addr_left = Signal(intbv(0)[8:])
self.we_right = Signal(bool(0))
self.addr_right = Signal(intbv(0)[8:])
self.pslverr = Signal(bool(0))
self.prdata = Signal(intbv(0, 0, 2**32))
self.pready = Signal(bool(0))
self.pwdata = Signal(intbv(0, 0, 2**32))
self.paddr = Signal(intbv(0, 0, 2**32))
self.presetn = ResetSignal(1, ACTIVE_LOW, async=True)
#self.kwargs = kwargs
self.transoutrdy = Signal(bool(0))
self.transinrdy = Signal(bool(0))
#self.resetn = Signal(bool(0))
self.penable = Signal(bool(0))
self.psel = Signal(bool(0))
self.pwrite = Signal(bool(0))
self.full = Signal(bool(0))
self.pclk = Signal(bool(0))
self.sam = Signal(intbv(0)[8:])
self.updated = Signal(bool(0))
self.state_t = enum('IDLE', 'UPDATE_SAMPLE', 'TRANSFER_OUT','TRANSFER_IN')
self.state = Signal(self.state_t.IDLE)
self.noupdate = Signal(bool(0))
def setSig_state_update_sample(self):
self.state.next = Signal(self.state_t.UPDATE_SAMPLE)
def setSig_state_transfer_out(self):
self.state.next = Signal(self.state_t.TRANSFER_OUT)
def setSig_state_transfer_in(self):
self.state.next = Signal(self.state_t.TRANSFER_IN)
def setSig_state_idle(self):
self.state.next = Signal(self.state_t.IDLE)
def __str__(self):
return " %s " % self.state
def reset(self):
duration = self.kwargs['duration']
print '-- Resetting --'
self.resetn.next = False
yield delay(1)
print '-- Reset --'
self.resetn.next = True
yield delay(1)
def setSig_presetn(self,val):
self.presetn.next = Signal(bool(val))
def setSig_we_odd(self,val):
self.we_odd.next = Signal(bool(val))
def setSig_we_even(self,val):
self.we_even.next = Signal(bool(val))
def setSig_we_left(self,val):
self.we_left.next = (bool(val))
def setSig_we_sam(self,val):
self.we_sam.next = Signal(bool(val))
def setSig_we_right(self,val):
self.we_right.next = Signal(bool(val))
def setSig_addr_sam(self,val):
self.addr_sam.next = Signal(intbv(val))
def setSig_addr_left(self,val):
self.addr_left.next = Signal(intbv(val))
def setSig_addr_right(self,val):
self.addr_right.next = Signal(intbv(val))
def setSig_addr_even(self,val):
self.addr_even.next = Signal(intbv(val))
def setSig_addr_odd(self,val):
self.addr_odd.next = Signal(intbv(val))
def setSig_din_odd(self,val):
DATA_WIDTH = 65536
self.din_odd.next = Signal(intbv(val, min = -DATA_WIDTH, max = DATA_WIDTH))
def setSig_din_sam(self,val):
DATA_WIDTH = 65536
self.din_sam.next = Signal(intbv(val, min = -DATA_WIDTH, max = DATA_WIDTH))
def setSig_din_left(self,val):
DATA_WIDTH = 65536
self.din_left.next = Signal(intbv(val, min = -DATA_WIDTH, max = DATA_WIDTH))
def setSig_din_right(self,val):
DATA_WIDTH = 65536
self.din_right.next = Signal(intbv(val, min = -DATA_WIDTH, max = DATA_WIDTH))
def setSig_even_odd(self,val):
self.even_odd.next = Signal(bool(val))
def setSig_fwd_inv(self,val):
self.fwd_inv.next = Signal(bool(val))
def setSig_updated(self,val):
self.updated.next = Signal(bool(val))
def setSig_noupdate(self,val):
self.noupdate.next = Signal(bool(val))
def setSig_transoutrdy(self,val):
self.transoutrdy.next = Signal(bool(val))
def setSig_sam(self,val):
DATA_WIDTH = 256
self.sam.next = Signal(intbv(val, min = 0, max = DATA_WIDTH))
def transmit(self, addr, data):
duration = self.kwargs['duration']
timeout = self.kwargs.get('timeout') or 5 * duration
print '-- Transmitting addr=%s data=%s --' % (hex(addr), hex(data))
print 'TX: start'
self.pclk.next = True
self.paddr.next = intbv(addr)
self.pwrite.next = True
self.psel.next = True
self.pwdata.next = intbv(data)
yield delay(duration // 2)
self.pclk.next = False
yield delay(duration // 2)
print 'TX: enable'
self.pclk.next = True
self.penable.next = True
yield delay(duration // 2)
timeout_count = 0
while not self.pready:
print 'TX: wait'
timeout_count += duration
if timeout_count > timeout:
raise Apb3TimeoutError
self.pclk.next = False
yield delay(duration // 2)
self.pclk.next = True
yield delay(duration // 2)
self.pclk.next = False
yield delay(duration // 2)
print 'TX: stop'
self.pclk.next = True
self.pwrite.next = False
self.psel.next = False
self.penable.next = False
yield delay(duration // 2)
self.pclk.next = False
yield delay(duration // 2)
import unittest
class TestApb3BusFunctionalModel(unittest.TestCase):
def test_simulate(self):
import myhdl
duration=1
def _sim():
pix = Add_shift_top(duration=duration)
pix_presetn = pix.presetn
pix_pclk = pix.pclk
pix_paddr = pix.paddr
pix_psel = pix.psel
pix_penable = pix.penable
pix_pwrite = pix.pwrite
pix_pwdata = pix.pwdata
pix_pready = pix.pready
pix_prdata = pix.prdata
pix_pslverr = pix.pslverr
@myhdl.instance
def __sim():
yield pix.reset()
yield pix.transmit(0x4000, 0x0110)
return __sim
s = myhdl.Simulation(myhdl.traceSignals(_sim))
s.run(10000)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 阶乘
def factorial(num):
s = 1
if num == 0:
return s
else:
nums = [i + 1 for i in range(num)]
for i in nums:
s *= i
return s
# 杨辉三角队列
def triangles(rows):
n = 0
while n < rows:
# l = []
# for m in range(n + 1):
# value = factorial(n) // (factorial(m) * factorial(n - m))
# l.append(value)
l = [factorial(n) // (factorial(m) * factorial(n - m)) for m in range(n + 1)]
formatPrintRow(rows, l)
n += 1
# 格式化打印每一行
def formatPrintRow(cols, l):
for i in range(cols - len(l)):
print(' ', end='')
for value in l:
print('%d ' % value, end='')
print()
triangles(6)
|
import os, sys, time
# force run on CPU?
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
caffe_root = os.path.dirname(os.path.abspath(__file__))+'/../../'
sys.path.insert(0, caffe_root+'python')
#os.environ['GLOG_minloglevel'] = '2'
import numpy as np
np.set_printoptions(linewidth=200)
import cv2
import caffe
if not os.path.isdir(caffe_root+'examples/images/CatLMDB'):
import subprocess
with open(caffe_root+'examples/images/cat.txt','w') as listfile:
listfile.write('cat.jpg 0')
subprocess.check_output([caffe_root+'build/tools/convert_imageset',
'--encoded=1',
'--encode_type=png',
caffe_root+'examples/images/',
caffe_root+'examples/images/cat.txt',
caffe_root+'examples/images/CatLMDB'])
caffe.set_mode_gpu()
nnet = caffe.Net(caffe_root+'examples/BILATERAL_TEST/Test_meanfield.prototxt', caffe.TEST)
def displayable(caffeimage):
return np.transpose(caffeimage[0,:,:,:],(1,2,0)) / 255.0
for ii in range(10000):
beftime = time.time()
nnet.forward()
afttime = time.time()
caffeim = nnet.blobs['data_rgb'].data
#filt_space = nnet.blobs['filt_space'].data
filt_bilat = nnet.blobs['pred'].data / 3.0
# divide by 3 because: 1 from orig, + 2 iterations, all summed without softmax
print("forward time: "+str(afttime - beftime)+" seconds")
cv2.imshow('caffeim', displayable(caffeim))
#cv2.imshow('filt_space', displayable(filt_space))
cv2.imshow('filt_bilat', displayable(filt_bilat))
cv2.waitKey(0)
|
import cv2
import numpy as np
img = cv2.imread("1.jpg") # Reading image.
shape = img.shape # Gets the dimensions of the image.
print('Shape =', shape)
cv2.line(img, (50, 50), (430, 802), (255, 0, 0), thickness=2)
# Draws a line on 1st arg, from 1st point (2nd arg) to 2nd point # (3rd arg) in the color of 4rd arg.
cv2.circle(img, (640, 426), 25, (0, 0, 255), -1)
# Draws a circle on 1st arg at pixel 2nd arg of radius 3rd arg of color 4th arg.
# 5th arg is optional, negative integers will fill the circle.
cv2.rectangle(img, (20, 20), (140, 140), (0, 255, 255), -1)
# Draws a rectangle on 1st arg, from pixel in 2nd arg to pixel in 3rd arg, in 4th arg color.
# 5th arg is optional, negative integers will fill the rectangle.
cv2.ellipse(img, (470, 270), (100, 50), 0, 120, 240, (0, 120, 120), -1)
# Draws an ellipse on 1st arg, with centre as pixel in 2nd arg, angle of ellipse wrt X-axis is 3rd arg.
# Starting angle of ellipse is 4th arg and ending angle is 5th arg (Partial part of ellipse can also be shown).
# 6th arg is color.
# 7th arg is optional, negative integers will fill the ellipse.
points = np.array([[[550, 200], [650, 200], [550, 300], [650, 300]]], np.int32)
# Next code explains this line properly.
cv2.polylines(img, [points], True, (50, 100, 150), thickness=5)
# Draws a polygon on 1st arg, 2nd arg defines the corners in order.
# 3rd arg decides whether the polygon should be closed or not, 4th arg is the colour.
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
# Selecting a font.
cv2.putText(img, 'Ground', (450, 50), font, 2, (200, 200, 200))
# Writes text on image (1st arg), the text is 2nd arg, 3rd arg is the starting pixel of the text.
# 4th arg is the font, 5th arg is font-size, 6th arg is colour of text.
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
Project version mutations
"""
from dataclasses import dataclass
from typing import Optional
from typeguard import typechecked
from ...helpers import Compatible, format_result
from .queries import (GQL_UPDATE_PROPERTIES_IN_PROJECT_VERSION)
@dataclass
class MutationsProjectVersion:
"""
Set of ProjectVersion mutations
"""
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self, auth):
"""
Initializes the subclass
Parameters
----------
auth : KiliAuth object
"""
self.auth = auth
@Compatible(['v2'])
@typechecked
def update_properties_in_project_version(
self,
project_version_id: str,
content: Optional[str]):
"""
Update properties of a project version
Parameters
----------
project_version_id :
Identifier of the project version
content :
Link to download the project version
Returns
-------
dict
A result object which indicates if the mutation was successful.
Examples
-------
>>> kili.update_properties_in_project_version(
project_version_id=project_version_id,
content='test')
"""
variables = {
'content': content,
'id': project_version_id,
}
result = self.auth.client.execute(
GQL_UPDATE_PROPERTIES_IN_PROJECT_VERSION, variables)
return format_result('data', result)
|
import os
reclen = 20
f = open("cities","r+b")
size = os.path.getsize("cities")
print("Size Of The File Is : ",size)
record = int(size/reclen)
print("No Of Records Are : ",record)
city = input("Enter City Name : ")
city = city.encode()
newcity = input("Enter Renamed Name : ")
newcity = newcity + (reclen-len(newcity))*" "
newcity = newcity.encode()
position = 0
for i in range(1,record+1):
found = False
f.seek(position)
str = f.read(20)
if city in str:
print("Found At Record : ",i)
found = True
f.seek(-20,1)
f.write(newcity)
position+=20
f.close()
|
class Singleton(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(Singleton, cls).__new__(cls)
return cls.instance
singleton = Singleton()
another_singleton = Singleton()
print singleton is another_singleton
singleton.only_one_var = "I'm only one var"
print another_singleton.only_one_var
class Child(Singleton):
pass
child = Child()
print child is singleton
print child.only_one_var
child.only_one_var += " edit in Child"
print singleton.only_one_var
|
from django.db import models
#from filefieldtools import upload_to
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=100)
pdf = models.FileField(upload_to='books/pdfs/')
cover = models.ImageField(upload_to='books/covers/', null=True, blank=True)
#Загрузка файлов в каталог по текущей дате
# uploads/books/2012/04/27/<filename>
#picture = models.ImageField(upload_to=upload_to('books/%Y/%m/%d'))
#Приводит имя файла к нижнему регистру
# Upload filename: 'My Picture.JPG'
# Path: 'uploads/books/pictures/My-Picture.JPG'
#picture1 = models.ImageField(upload_to=upload_to('books/pictures', to_lower=False))
#Генерация обезличенного имени файла
# uploads/books/pictures/fb999b0773ba7cd946a708aea.<extension>
#picture = models.ImageField(upload_to=upload_to('books/pictures', to_hash=True))
#Контроль длины пути к файлу
# default max_length for ImageField is 100.
#
# Upload filename: '1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40.xls'
# Upload filename length: 110
#
# Path: 'books/pictures/1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30.xls'
#picture = models.ImageField(upload_to=upload_to('books/pictures', field_name='picture'))
def __str__(self):
return self.title
def delete(self, *args, **kwargs):
self.pdf.delete()
self.cover.delete()
super().delete(*args, **kwargs)
#books = Book.objects.all()
#for book in books:
#book.delete() |
import pandas as pd
def preprocess():
"""#######( read )########"""
raw1 = pd.read_excel("./static/Classification.xlsx")
print(len(raw1))
raw1.head()
print(raw1.Classification.value_counts())
"""#########( clean )##########"""
raw1_p = ""
for i in raw1.LONGDESC:
raw1_p = raw1_p + "".join(i)
from collections import Counter
raw1_p.lower()
c = Counter(raw1_p)
print(c)
## remove word that has number
## remove -, +, ', &, /, (, ), *, %, , \xa0, ’, $
raw1 = raw1.apply(lambda col: col.str.lower())
noneed = "-+'&/()*%\xa0’$"
for i in raw1.LONGDESC:
for j in i:
if j in noneed:
raw1["LONGDESC"].replace('j',' ', inplace=True)
# no cleaning is actually happening
raw1 = raw1.apply(lambda col: col.str.lower())
#########(preprocess)########
classlable = list(set(raw1.Classification))
classcode = dict()
for x in range(len(tuple(set(raw1.Classification)))):
classcode[classlable[x]] = x
codeclass = dict()
for i, j in enumerate(classcode):
codeclass[i] = j
raw1["clas"] = raw1.Classification.map(dict(classcode))
X = raw1.LONGDESC
y = raw1.clas
print(X.shape)
print(y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = 1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
"""###############( vector )################"""
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer()
vect.fit(X_train)
X_train_dtm = vect.transform(X_train) # 1815x1092 sparse matrix 17380 stored elements
print(vect.get_feature_names()) #vocab has unknown vocab, lets remove that
X_train_dtm.toarray()
print(X_train_dtm) # <1815x1092 sparse matrix of type '<class 'numpy.int64'>' with 17380 stored elements in Compressed Sparse Row format>
pd.DataFrame(X_train_dtm.toarray(), columns=vect.get_feature_names())
X_test_dtm = vect.transform(X_test) #779x1092 sparse mat 7351 stored elements
return X_test, y_test, X_train_dtm, y_train, X_test_dtm, codeclass
|
import threading,time
class FileWriter(threading.Thread):
def __init__(self,fileName, num):
threading.Thread.__init__(self,name="fileWriter"+bytes(num))
self.__fileName__=fileName;
self.__num__=num
def run(self):
for i in range(10):
output = open(self.__fileName__, 'a')
output.write("teststr"+bytes(self.__num__))
output.flush()
output.close()
time.sleep(1) |
'''
author: juzicode
address: www.juzicode.com
公众号: juzicode/桔子code
date: 2020.6.11
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: juzicode/桔子code\n')
print('格式化符号对齐控制')
a = 100
print('整数100 16进制左侧补0显示:a=%08x'%(a))
b = 3.1415925
print('浮点数3.1415925 保留小数点后2位显示:b=%.2f'%(b))
|
# removes all predicted Terminators with bitscores higher than 30
# writes file of predicted terminators of certain length (default 100)
# writes files for embedding the terminator sequences in (500 before and after predicted terminator)
import argparse
import os.path
import math
#######################################################################
#######################################################################
# methods for checking parsed file types
def checkBedFormat(v):
b = os.path.splitext(v)[1][1:].lower()
if b != 'bed':
raise argparse.ArgumentTypeError('bed format file type expected')
else:
return v
def checkInt(v):
v = int(v)
if v < 0:
raise argparse.ArgumentTypeError('positive Integer value expected')
if isinstance(v, int):
return v
else:
raise argparse.ArgumentTypeError('Integer value expected')
#######################################################################
#######################################################################
parser = argparse.ArgumentParser(description= 'Filter predicted positives with BLAST bitscores over 30' + '\n'
'Usage:' + '\t' + 'filterBLAST.py <options> -term -blast -o' +'\n'
'optional:' + '\t' + '-l')
#required files:
parser.add_argument('-term', dest='predictedTerminators', help='input predicted Terminators directory', type=checkBedFormat, required=True)
parser.add_argument('-blast', dest='blastFile', help='input BLAST directory', required=True)
parser.add_argument('-o', dest='outpath', help='output path and filename prefix', required=True)
#optional
parser.add_argument('-l', dest='lengthTerminator', help='length of artificial terminator, default:100', type=checkInt, nargs='?', default=100)
args = parser.parse_args()
predictedTerminators = args.predictedTerminators
blastFile = args.blastFile
outpath = args.outpath
lengthTerminator = args.lengthTerminator
l1 = lengthTerminator * 0.16666666666666664
l2 = lengthTerminator - l1
l1 = int(math.ceil(l1))
l2 = int(math.floor(l2))
organism = ''
chrom = ''
chrom2 = ''
plasmid = ''
lengthGenome = 0
if "BS" in predictedTerminators:
organism = 'B.subtilis'
chrom = 'NC_000964.3/1-4215606'
lengthGenome = 4215606
brev = 'BS'
if "EF" in predictedTerminators:
organism = 'E.faecalis'
if 'chrom' in predictedTerminators:
chrom = "NC_004668.1"
plasmid = 'Chromosome'
lengthGenome = 3218031
brev = 'EF_chrom'
if 'pl1' in predictedTerminators:
chrom = "NC_004669.1"
plasmid = 'Plasmid1'
lengthGenome = 66320
brev = 'EF_pl1'
if 'pl2' in predictedTerminators:
chrom = "NC_004671.1"
plasmid = 'Plasmid2'
lengthGenome = 57660
brev = 'EF_pl2'
if 'pl3' in predictedTerminators:
chrom = "NC_004670.1"
plasmid = 'Plasmid3'
lengthGenome = 17963
brev = 'EF_pl3'
if "LM" in predictedTerminators:
organism = 'L.monocytogenes'
chrom = "NC_003210.1"
lengthGenome = 2944528
brev = 'LM'
if 'SP' in predictedTerminators:
organism = 'S.pneumoniae'
chrom = "NC_003028.3"
chrom2 = chrom
lengthGenome = 2160842
brev = 'SP'
print '\n' + str(organism) + ' ' + str(plasmid)
outfiles = [outpath + brev + '_predTerm_BLAST_predictedTerminators_NO_knownTerminators_NO_genes_long.bed',
outpath + brev + '_500front_BLAST_predictedTerminators_NO_knownTerminators_NO_genes_long.bed',
outpath + brev + '_500back_BLAST_predictedTerminators_NO_knownTerminators_NO_genes_long.bed']
#######################################################################
#######################################################################
bitscoreOver30Coords = set()
distanceCoords = set()
with open(predictedTerminators,'r') as term, open(blastFile,'r') as blast:
for line1 in blast:
if float(line1.split()[11]) > 30.0:
coordBLAST = str(line1.split()[0])
bitscoreOver30Coords.add(coordBLAST)
for line2 in term:
coordDistance = line2.split()[3]
distanceCoords.add(coordDistance)
# print sorted(distanceCoords)
print "Predicted Terminators before: " + str(len(distanceCoords))
print "With Bitscores over 30: " + str(len(bitscoreOver30Coords))
# difference of two sets (A-B): elements only in A but not in B
withoutOver30 = (distanceCoords - bitscoreOver30Coords)
print "Predicted Terminators Without Bitscores over 30: " + str(len(withoutOver30))
outfiles1 = [open(i, 'w') for i in outfiles]
for item in withoutOver30:
coord = int(item.split()[-1].split('_')[0])
strand = item.split()[-1].split('_')[-1]
if coord-l2+500 > 0 and coord+l2+500 <= lengthGenome:
if strand == '-':
outfiles1[0].write(str(chrom) + '\t' + str(coord-l2) + '\t' + str(coord+l1) + '\t' + str(item) +'\n')
outfiles1[1].write(str(chrom) + '\t' + str(coord-l2-1-500) + '\t' + str(coord-1-l2) + '\t' + str(item) +'\n')
outfiles1[2].write(str(chrom) + '\t' + str(coord+l1+1) + '\t' + str(coord+l1+1+500) + '\t' + str(item) +'\n')
else:
outfiles1[0].write(str(chrom) + '\t' + str(coord-l1) + '\t' + str(coord+l2) + '\t' + str(item) +'\n')
outfiles1[1].write(str(chrom) + '\t' + str(coord-l1-1-500) + '\t' + str(coord-1-l1) + '\t' + str(item) +'\n')
outfiles1[2].write(str(chrom) + '\t' + str(coord+l2+1) + '\t' + str(coord+l2+1+500) + '\t' + str(item) +'\n') |
import unittest
from mlpnn.Structure.Neuron import Neuron
from mlpnn.Structure.Synapse import Synapse
class LayerTest(unittest.TestCase):
def test_synapse_creation(self):
neuron1 = Neuron(1)
neuron2 = Neuron(2)
synapse = Synapse(neuron1, neuron2, initial_weight=0.5)
self.assertEqual(neuron1.id, synapse.previous.id)
self.assertEqual(neuron2.id, synapse.next.id)
self.assertEqual(0.5, synapse.weight)
def test_synapse_updates_weight(self):
neuron1 = Neuron(1)
neuron2 = Neuron(2)
synapse = Synapse(neuron1, neuron2, initial_weight=0.5)
synapse.store_weight(0.5)
synapse.update_weight()
self.assertEqual(1.0, synapse.weight)
if __name__ == '__main__':
unittest.main()
|
print('Test Jenkins of Integration')
print('Secondary Modification Test')
print('xintianjia')
print('gaizhenghaode')
print('自动构建定时任务') |
from __future__ import unicode_literals
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score
from os.path import dirname, abspath ,join
import matplotlib.pyplot as plt
import pickle
d = dirname(dirname(dirname(abspath(__file__)))) #set files directory path
import sys
# insert at position 1 in the path, as 0 is the path of this file.
sys.path.insert(1, d)
print(sys.path)
import Log
'''
####################################################################
###########################LEARNING SECTION#########################
####################################################################
'''
def fit(train_x , train_y, test_x, test_y, save_model=True, plotting=False, print_out=True, models="ALL"):
Log.log("fit process started with parameteres : save_model : "+save_model+" - plotting : "+plotting+" - models : "+models,"train.py")
if models=="ALL":
Log.log("starting Naive Bayes training.", "train.py")
nb_accuarcy=fit_nb(train_x , train_y, test_x, test_y, save_model=save_model,print_out=print_out)
Log.log("starting SVM training.", "train.py")
svm_accuracy=fit_svm(train_x , train_y, test_x, test_y, save_model=save_model,print_out=print_out)
if plotting:
#Plotting:
names = ['Naive Bayes = '+str(nb_accuarcy), 'SVM = '+str(svm_accuracy)]
values = [nb_accuarcy, svm_accuracy]
plt.figure(figsize=(9, 3))
plt.bar(names, values, color=['red','blue'])
plt.suptitle('Accuracy')
plt.show()
return nb_accuarcy, svm_accuracy
elif models=="NAIVE_BAYES":
Log.log("starting Naive Bayes training.", "train.py")
nb_accuarcy = fit_nb(train_x , train_y, test_x, test_y, save_model=save_model,print_out=print_out)
return nb_accuarcy
elif models=="SVM":
Log.log("starting SVM training.", "train.py")
svm_accuracy = fit_svm(train_x , train_y, test_x, test_y, save_model=save_model,print_out=print_out)
return svm_accuracy
else:
Log.error("no model(s) found.", "train.py")
return null
def fit_svm(train_x , train_y, test_x, test_y, save_model=True,print_out=True):
#SVM Learning:
#fitting dataset for SVM Classifier
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM.fit(train_x,train_y)
#Test : Predict for validation
predictions_SVM = SVM.predict(test_x)
#Use accuracy_score function to get the accuracy
svm_accuracy=accuracy_score(predictions_SVM, test_y)*100
if print_out:
print("SVM Accuracy Score -> ",svm_accuracy)
Log.log("SVM model trained with accuarcy : "+svm_accuracy, "train.py")
# Save SVM to file in the current working directory
if save_model:
with open(join(d,"trained_models","svm_model.pkl"), 'wb') as filesvm:
pickle.dump(SVM, filesvm)
Log.log("SVM Model Saved to trained_models\svm_model.pkl", "train.py")
else:
Log.warning("SVM Model Not Saved, save_model parameter not setted true. unable use pretrained model in feature.", "train.py")
return svm_accuracy
def fit_nb(train_x , train_y, test_x, test_y, save_model=True,print_out=True):
#NAIVE BAYES:
#fitting dataset for NB Classifier
Naive = naive_bayes.MultinomialNB()
Naive.fit(train_x,train_y)
#Test : Predict for validation
predictions_NB = Naive.predict(test_x)
#Use accuracy_score function to get the accuracy
nb_accuarcy=accuracy_score(predictions_NB, test_y)*100
if print_out:
print("Naive Bayes Accuracy Score -> ",nb_accuarcy)
Log.log("Naive Bayes model trained with accuarcy : "+nb_accuarcy, "train.py")
# Save NB to file in the current working directory
if save_model:
with open(join(d,"trained_models","naive_bayes_model.pkl"), 'wb') as filenaive:
pickle.dump(Naive, filenaive)
Log.log("NB Model Saved to trained_models\\naive_bayes_model.pkl", "train.py")
else:
Log.warning("NB Model Not Saved, save_model parameter not setted true. unable use pretrained model in feature.", "train.py")
return nb_accuarcy |
"""Script for identifying wavelength regions containing interstellar features"""
import apoNN.src.data as apoData
import apoNN.src.utils as apoUtils
import apoNN.src.vectors as vectors
import apoNN.src.fitters as fitters
import apoNN.src.evaluators as evaluators
import apoNN.src.occam as occam_utils
import numpy as np
import random
import pathlib
import pickle
from ppca import PPCA
import apogee.tools.path as apogee_path
from apogee.tools import air2vac, atomic_number,apStarWavegrid
import matplotlib.pyplot as plt
apogee_path.change_dr(16)
###Setup
root_path = pathlib.Path(__file__).resolve().parents[2]/"outputs"/"data"
#root_path = pathlib.Path("/share/splinter/ddm/taggingProject/tidyPCA/apoNN/scripts").parents[1]/"outputs"/"data"
def standard_fitter(z,z_occam):
"""This fitter performs a change-of-basis to a more appropriate basis for scaling"""
return fitters.StandardFitter(z,z_occam,use_relative_scaling=True,is_pooled=True,is_robust=True)
def simple_fitter(z,z_occam):
"""This is a simple fitter that just scales the dimensions of the inputed representation. Which is used as a baseline"""
return fitters.SimpleFitter(z,z_occam,use_relative_scaling=True,is_pooled=True,is_robust=True)
###Hyperparameters
z_dim = 30 #PCA dimensionality
###
with open(root_path/"spectra"/"without_interstellar"/"cluster.p","rb") as f:
Z_occam = pickle.load(f)
with open(root_path/"spectra"/"without_interstellar"/"pop.p","rb") as f:
Z = pickle.load(f)
with open(root_path/"spectra"/"with_interstellar"/"cluster.p","rb") as f:
Z_occam_interstellar = pickle.load(f)
with open(root_path/"spectra"/"with_interstellar"/"pop.p","rb") as f:
Z_interstellar = pickle.load(f)
with open(root_path/"labels"/"core"/"cluster.p","rb") as f:
Y_occam = pickle.load(f)
with open(root_path/"labels"/"core"/"pop.p","rb") as f:
Y = pickle.load(f)
with open(root_path/"allStar.p","rb") as f:
allStar = pickle.load(f)
### generate the two datasets
low_vrad_cut = np.abs(allStar["VHELIO_AVG"])<5
high_vrad_cut = allStar["VHELIO_AVG"]>80
data_low = apoData.Dataset(allStar[np.where(low_vrad_cut)])
nonzero_idxs = np.where(np.sum(~(data_low.masked_spectra.data==0),axis=0)!=0)[0]
spectra_low = apoData.infill_masked_spectra(data_low.masked_spectra[:,nonzero_idxs]
,data_low.masked_spectra[0:500,nonzero_idxs])
data_high = apoData.Dataset(allStar[np.where(high_vrad_cut)])
spectra_high = apoData.infill_masked_spectra(data_high.masked_spectra[:,nonzero_idxs]
,data_low.masked_spectra[0:500,nonzero_idxs])
### Apply PCA using the low extinction spectra and find the residuals on the high extinction residuals
from sklearn.decomposition import PCA
pca = PCA(n_components=30)
pca.fit(spectra_low)
rec_spec_low = pca.inverse_transform(pca.transform(spectra_low))
rec_spec_high = pca.inverse_transform(pca.transform(spectra_high))
#diff_res = np.mean((np.abs(spectra_high-rec_spec_high)),axis=0)-np.mean((np.abs(spectra_low-rec_spec_low)),axis=0)
diff_res = np.mean((spectra_high-rec_spec_high),axis=0)
### Create figure
mask_interstellar, interstellar_locs = apoUtils.get_interstellar_bands()
diff_res_full = np.zeros(apStarWavegrid().shape) #diff_res_full adds the nan values in nonzero_idxs to
diff_res_full[nonzero_idxs] = diff_res
save_path = root_path.parents[0]/"figures"/"vrad"
save_path.mkdir(parents=True, exist_ok=True)
plt.figure(figsize=[14,3])
plt.plot(apStarWavegrid(),diff_res_full,color="black",linewidth=0.5)
alpha_loc = 0.2
plt.ylabel("residuals",fontsize=14)
plt.xlabel(r"wavelength $\lambda$ $(\AA)$",fontsize=14)
plt.gcf().subplots_adjust(bottom=0.25)
plt.savefig(save_path/"residuals_vrad_spectra.pdf",format="pdf",bbox_inches='tight')
|
def merge_sort(arr):
if len(arr) <= 1:
return arr
midpoint = int(len(arr) / 2)
left = merge_sort(arr[:midpoint])
right = merge_sort(arr[midpoint:])
return merge(left, right)
def merge(left, right):
i = 0
j = 0
new_array = []
while i < len(left) and j < len(right):
if left[i] <= right[j]:
new_array.append(left[i])
i += 1
else:
new_array.append(right[j])
j += 1
new_array.extend(left[i:])
new_array.extend(right[j:])
return new_array
sample = [5,4,3,2,1]
print(merge_sort(sample)) |
#!/usr/bin/env python3
##
## EPITECH PROJECT, 2020
## 107transfer
## File description:
## maths python
##
import sys
import error
import function
def main():
if "-h" in sys.argv or "--help" in sys.argv:
error.usage()
return 0
error.all_error(len(sys.argv))
num = function.func_in_tab(1)
den = function.func_in_tab(2)
function.func_calc(num, den)
return 0
if __name__ == "__main__":
main()
|
# 실습
# Conv1d로 코딩
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28), (60000,) <- 흑백
print(x_test.shape, y_test.shape) # (10000, 28, 28), (10000,)
x_train = x_train.astype('float32')/255.
x_test = x_test/255. # (x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train)
print(y_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=2, padding='same',
strides=1, input_shape=(28,28)))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(64, 2, padding='same', strides=1))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(64, 2, padding='same', strides=1))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=10, mode='auto')
model.fit(x_train, y_train, epochs=4000, batch_size=32, validation_split=0.2, verbose=2, callbacks=[es])
loss, acc = model.evaluate(x_test, y_test)
print('loss :', loss)
print('acc :', acc)
# Result
# loss : 0.05688846483826637
# acc : 0.9869999885559082
y_pred = model.predict(x_test)
print('==========================')
print(' 예상 ' ,'|',' 예측 ')
for i in range(10):
print(' ', np.argmax(y_test[i+40]), ' |', np.argmax(y_pred[i+40]))
|
#!/usr/bin/env python
# coding: utf-8
### Extraction of topics from Wikipedia pages ###
import sys
import os
import numpy as np
import networkx as nx
import requests
import pandas as pd
import csv
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
import community
from fonctions import *
### Parameters for the following pipeline ###
pd.set_option('mode.chained_assignment', None)
### Making a class to englobe all variables ###
class Parameters:
ignored_topics = ['Culture.Biography*', 'Compilation.List_Disambig', 'Geography', 'STEM.STEM*']
def __init__(self, region, date_beg, date_end, path='', graph_type='gexf', list_ignored_topics=ignored_topics, prob_threshold=0.1, save=True, plot=False):
self.region = region
self.date_beg = date_beg
self.date_end = date_end
self.graph_type = graph_type
self.list_ignored_topics = list_ignored_topics
self.prob_threshold = prob_threshold
self.save = save
self.plot = plot
if (path == ''):
self.path = "Results/"+region+"/"+region+"_"+date_beg+"_"+date_end+"/"
else:
self.path = path
### Importing the nodes as a DataFrame for easier manipulations ###
def init_graph(param):
print("Initializing graph...", end="\r")
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
# Try with default name from SparkWiki
try:
graph = nx.read_gexf(path+'peaks_graph_'+date_beg+'_'+date_end+'.gexf')
graph = nx.Graph(graph)
except:
### With .GEXF ###
if (graph_type == 'gexf'):
graph = nx.read_gexf(path+'graph.gexf')
graph = nx.Graph(graph)
### With .GRAPHML ###
if (graph_type == 'graphml'):
graph = nx.read_graphml(path+'graph.graphml')
dataFrame = pd.DataFrame.from_dict(dict(graph.nodes(data=True)), orient='index')
dataFrame['Id'] = dataFrame.index
dataFrame.rename(columns={'label': 'Label'}, inplace=True)
dataFrame['Label'] = dataFrame['Label'].astype('string')
dataFrame['Id'] = dataFrame['Id'].astype('int64')
degree = dict(nx.degree(graph, nbunch=None, weight=10))
partition = community.best_partition(graph, randomize=True, weight='Weight', resolution=1.5)
btw_cent = nx.betweenness_centrality(graph,normalized=False)
dataFrame['Degree'] = pd.DataFrame.from_dict(degree, orient='index')[0]
dataFrame['modularity_class'] = pd.DataFrame.from_dict(partition, orient='index')[0]
dataFrame['betweenesscentrality'] = pd.DataFrame.from_dict(btw_cent, orient='index')[0]
dataFrame.sort_values(by = ['modularity_class'], inplace=True, ascending = [True])
dataFrame.sort_values(by = ['Id'], inplace=True, ascending = [True])
if (save == True):
dataFrame.to_csv(path + 'nodes.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Initializing graph: Done")
return dataFrame
### Associate the 'Qid' value of Wikipedia pages in the DataFrame ###
### Give '-1' value if an error has occured during the query ###
def match_Qids(param):
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
dataFrame = pd.read_csv(path + 'nodes.csv')
# URL for the quieries
urls = "https://"+region+".wikipedia.org/w/api.php?action=query&prop=pageprops&format=json&pageids="
i=0
Nb_pages = len(dataFrame)
for pageId in np.int64(dataFrame['Id']):
response = requests.get(urls + str(pageId)).json()
try:
Qid = (list(findkeys(response, 'wikibase_item'))[0])
Title = (list(findkeys(response, 'title'))[0])
except IndexError:
dataFrame.loc[dataFrame['Id'] == pageId, 'Qid'] = '-1'
i+=1
# Loading display
print("Matching Qids:", i,"/", Nb_pages, dataFrame.loc[dataFrame['Id'] == pageId, 'Label'], "\t error", end="\r")
sys.stdout.flush()
else:
dataFrame.loc[dataFrame['Id'] == pageId, 'Qid'] = Qid
dataFrame.loc[dataFrame['Id'] == pageId, 'Label'] = Title
i+=1
# Loading display
sys.stdout.write("\033[K")
print("Matching Qids:", i,"/", Nb_pages, dataFrame.loc[dataFrame['Id'] == pageId, 'Label'].values[0], end="\r")
# Save the DataFrame with their associated Qids
if (save ==True):
dataFrame.to_csv(path + 'pages_Qids.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Matching Qids: Done")
return dataFrame
### Extracting topic pages from database API ###
def match_topic(param):
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
dataFrame = pd.read_csv(path + 'pages_Qids.csv')
Match_topic_API = pd.DataFrame()
### The API's URL from the topic is extracted ###
urls = "http://86.119.25.229:5000/api/v1/wikidata/topic?qid="
a=0
n = dataFrame.index[-1]
for pageIndex in dataFrame.index:
pageQid = dataFrame.at[pageIndex, 'Qid']
pageModularity = dataFrame.at[pageIndex, 'modularity_class']
response = requests.get(urls + pageQid + "&threshold=" + '0.1').json()
scores = list(findkeys(response, 'score'))
topics = list(findkeys(response, 'topic'))
try:
page_title = response['name']
except KeyError:
page_title = dataFrame.at[pageIndex, 'Label']
for i in range(len(scores)):
page = dataFrame.iloc[pageIndex]
page['Topic'] = topics[i]
page['Probability'] = scores[i]
Match_topic_API = Match_topic_API.append(page, ignore_index=True)
a+=1
# Loading display
sys.stdout.write("\033[K")
print("Matching topics:", a, "/",n, " ",page_title, end="\r")
Match_topic_API.drop(columns = ['Unnamed: 0'], inplace = True )
# Save the results with their associated topics
if (save==True):
Match_topic_API.to_csv(path + 'pages_topic.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Matching topics: Done")
return Match_topic_API
### Counting the number of views per page using pageviews.toolforge.org API ###
def count_views(param):
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
df = pd.read_csv(path + 'pages_topic.csv', index_col = 'Unnamed: 0')
urls = "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/"+region+".wikipedia/all-access/all-agents/"
i=0
list_id = list(dict.fromkeys(df['Id']))
Nb_pages = len(list_id)
for pageId in list_id:
label = df.loc[df['Id'] == pageId, 'Label'].max()
label = label.replace(" ", "_")
try:
response = requests.get(urls+label+"/daily/"+date_beg+"/"+date_end ).json()
views = np.sum(list(findkeys(response, 'views')))
except KeyError:
df.loc[df['Id'] == pageId, 'Views'] = 0
i+=1
# Loading display
sys.stdout.write("\033[K")
print("Counting views:", i,"/", Nb_pages, "Error", end="\r")
else:
df.loc[df['Id'] == pageId, 'Views'] = views
i+=1
# Loading display
sys.stdout.write("\033[K")
print("Counting views:", i,"/", Nb_pages, label," views: ", views, end="\r")
# Save the DataFrame with their associated views
if (save==True):
df.to_csv(path + 'pages_views.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Counting views: Done")
return df
### Saving to nodes attributes into the graph ###
def save_graph_attributes(param):
print("Saving graph attributes...", end="\r")
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
list_ignored_topics = param.list_ignored_topics
prob_threshold = param.prob_threshold
# Try with default name from SparkWiki
try:
graph = nx.read_gexf(path+'peaks_graph_'+date_beg+'_'+date_end+'.gexf')
graph = nx.Graph(graph)
except:
### With .GEXF ###
if (graph_type == 'gexf'):
graph = nx.read_gexf(path+'graph.gexf')
graph = nx.Graph(graph)
### With .GRAPHML ###
if (graph_type == 'graphml'):
graph = nx.read_graphml(path+'graph.graphml')
df = pd.read_csv(path + 'pages_views.csv', index_col = 'Unnamed: 0')
df.rename_axis("Index", inplace=True)
nodes = pd.DataFrame()
df = weight_topic(df, list_ignored_topics)
string_topic_separator(df)
df.sort_values(by = ['Id', 'Weight'], inplace=True, ascending = [True, False])
list_id = list(dict.fromkeys(df['Id']))
# Keeping only the node with the higher Weight and threshold them with lower 'Probability' (~confidence)
for Id in list_id:
page_max = df.loc[(df['Id'] == Id) & (df['Probability'] > prob_threshold), 'Weight']
if (page_max.empty == True):
id_max = df.loc[df['Id'] == Id, 'Probability'].idxmax()
else :
id_max = page_max.idxmax()
nodes = nodes.append(df.loc[df.index == id_max], ignore_index=True)
add_graph_attribute(graph, nodes, 'Degree')
add_graph_attribute(graph, nodes, 'modularity_class')
add_graph_attribute(graph, nodes, 'betweenesscentrality')
add_graph_attribute(graph, nodes, 'Qid')
add_graph_attribute(graph, nodes, 'Topic')
add_graph_attribute(graph, nodes, 'Main topic')
add_graph_attribute(graph, nodes, 'Subtopic')
add_graph_attribute(graph, nodes, 'Weight')
add_graph_attribute(graph, nodes, 'Views')
# Save the graph
nx.write_gexf(graph, path + 'filled_graph.gexf')
# Save the result
if (save==True):
nodes.to_csv(path + 'filled_nodes.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Saving graph attributes: Done")
return graph
### Display a bar chart of the repartition of topics per cluster ###
### Save the number of each topic per cluster and their ratio ###
### Note : here only ignore 'Culture.Biography' topic ###
def count_topic_per_cluster(param):
print("Counting topics per clusters...", end="\r")
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
save = param.save
plot = param.plot
# Create a folder for the figures
try:
os.mkdir(path+'Figures')
except:
pass
nodes = pd.read_csv(path + 'filled_nodes.csv', index_col = 'Unnamed: 0')
list_cluster_topic = pd.DataFrame()
nb_cluster = int( nodes['modularity_class'].max() ) + 1
for cluster_id in range(nb_cluster):
df = nodes.loc[(nodes['modularity_class'] == cluster_id) & (~nodes['Topic'].str.contains('Culture.Biography'))]
# Avoid clusters without topics
if (df.empty == True):
df = nodes.loc[(nodes['modularity_class'] == cluster_id)]
# Counting each topic and make a ratio over the total
df['Count'] = 1
cluster = df.groupby(['Subtopic']).sum()['Count']
df_cluster = pd.DataFrame(cluster)
df_cluster['modularity_class'] = cluster_id
df_cluster['Percentage'] = (cluster / cluster.sum() * 100).round(decimals=1)
df_cluster['Subtopic'] = df_cluster.index
df_cluster.set_index('modularity_class', inplace=True)
df_cluster.sort_values(by = ['Count'], ascending = [False], inplace=True)
df_cluster = df_cluster[['Subtopic', 'Percentage', 'Count']]
list_cluster_topic = list_cluster_topic.append(df_cluster, ignore_index=False)
plt.figure(cluster_id)
# Making descending lists for plotting
labels = list(df_cluster['Subtopic'])[::-1]
ratio = list(df_cluster['Percentage'])[::-1]
ind = np.arange(len(ratio))
height = 0.8
fig1, ax1 = plt.subplots()
ax1.barh(ind, ratio, height=height, align='center')
# Labeling everything
ax1.set_yticks(ind)
ax1.set_yticklabels(labels)
for i, v in enumerate(ratio):
ax1.text(v, i, " "+str(v), color='black', va='center')
plt.xlabel('Percentage')
plt.ylabel('Topics')
plt.title('Cluster ' + str(cluster_id))
plt.savefig(path+'Figures/Cluster_' + str(cluster_id)+'.png', bbox_inches='tight', transparent=False, pad_inches=0.1)
if (plot == True):
plt.show()
plt.close(cluster_id)
# Save the result
if (save == True):
list_cluster_topic.to_csv(path + 'list_cluster_topic.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Counting topics per clusters: Done")
return list_cluster_topic
def translate_label(param):
print("Translating label...", end="\r")
from googletrans import Translator
translator = Translator()
region = param.region
date_beg = param.date_beg
date_end = param.date_end
path = param.path
graph_type = param.graph_type
# Try with default name from SparkWiki
try:
graph = nx.read_gexf(path+'filled_graph.gexf')
graph = nx.Graph(graph)
except:
print("Error: filled_graph.gexf not found")
return
df = pd.read_csv(path + 'filled_nodes.csv', index_col = 'Unnamed: 0')
i=0
n=df['Label'].size
if region == 'ZH':
for label in df['Label']:
word_to_translate = label
label_en = vars(translator.translate(word_to_translate, src='zh-cn', dest='en'))['text']
df.loc[df['Label'] == label, 'Label_en'] = label_en
i+=1
# Loading display
sys.stdout.write("\033[K")
print("Translating label:", i,"/", n, label,'->', label_en, end="\r")
else:
for label in df['Label']:
word_to_translate = label
label_en = vars(translator.translate(word_to_translate, src=region.lower(), dest='en'))['text']
df.loc[df['Label'] == label, 'Label_en'] = label_en
i+=1
# Loading display
sys.stdout.write("\033[K")
print("Translating label:", i,"/", n, label,'->', label_en, end="\r")
add_graph_attribute(graph, df, 'Label_en')
# Save the graph
nx.write_gexf(graph, path + 'filled_graph_translated.gexf')
df.to_csv(path + 'filled_nodes_translated.csv', encoding='utf-8')
sys.stdout.write("\033[K")
print("Translating label: Done")
return df
|
import pytest
import transaction
from datetime import datetime, timedelta, date
from webob.multidict import MultiDict
from onegov.activity import ActivityCollection
from onegov.activity import AttendeeCollection
from onegov.activity import BookingCollection
from onegov.activity import InvoiceCollection
from onegov.activity import OccasionCollection
from onegov.activity import PeriodCollection
from onegov.core.utils import Bunch
from onegov.feriennet.collections import BillingCollection
from onegov.feriennet.forms import NotificationTemplateSendForm, PeriodForm
from onegov.feriennet.forms import VacationActivityForm
from onegov.user import UserCollection
def create_form(session, confirmable, start, delta=None):
delta = delta or timedelta(days=10)
fmt = "%Y-%m-%d"
start_ = start
def iter_start():
nonlocal start_
start_ += delta
return start_.strftime(fmt)
form = PeriodForm(MultiDict([
('title', 'My Period'),
('confirmable', confirmable),
('finalizable', False),
('prebooking_start', start.strftime(fmt)),
('prebooking_end', iter_start()),
('booking_start', iter_start()),
('booking_end', iter_start()),
('execution_start', iter_start()),
('execution_end', iter_start())
]))
form.request = Bunch(translate=lambda txt: txt, include=lambda src: None)
form.model = PeriodCollection(session)
return form
def add_period_by_form(form, session):
# add the period like in view name='new'
return PeriodCollection(session).add(
title=form.title.data,
prebooking=form.prebooking,
booking=form.booking,
execution=form.execution,
minutes_between=form.minutes_between.data,
confirmable=form.confirmable.data,
finalizable=form.finalizable.data,
active=False
)
def edit_period_by_form(form, period):
# simulated the edit view
form.model = period
@pytest.mark.parametrize('confirmable,start, delta', [
(True, date(2020, 4, 1), timedelta(days=10)),
(False, date(2020, 4, 1), timedelta(days=10)),
])
def test_period_form(session, confirmable, start, delta):
# Fixes issue FER-861
booking_start = start + 2 * delta
form = create_form(session, confirmable, start, delta)
assert form.confirmable.data is confirmable
assert form.booking_start.data == booking_start
assert form.validate()
assert form.booking_start.data == booking_start
period = add_period_by_form(form, session)
if not confirmable:
# The prebooking fields are hidden in the ui, but the user still could
# have filled in some dates, so check if the are resetted
assert form.prebooking == (None, None)
assert period.prebooking_end == booking_start
assert period.prebooking_start == booking_start
# Generate form from model and simulate get request
form.model = period
assert not form.is_new
form.process(obj=period)
assert form.prebooking_start.data == start if confirmable \
else booking_start
# Start earlier, no intersections
new_booking_start = form.booking_start.data
form.validate()
assert not form.errors
if not confirmable:
assert form.prebooking_start.data == new_booking_start
assert form.prebooking_end.data == new_booking_start
form.populate_obj(period)
session.flush()
# Start earlier, adjust prebooking
new_booking_start = start - timedelta(days=100)
form.booking_start.data = new_booking_start
validated = form.validate()
if confirmable:
assert form.errors
return
assert validated
assert form.prebooking_start.data == new_booking_start
assert form.prebooking_end.data == new_booking_start
form.populate_obj(period)
session.flush()
assert period.prebooking_start == new_booking_start
assert period.prebooking_end == new_booking_start
def test_vacation_activity_form(session, test_password):
users = UserCollection(session)
users.add(
username='admin@example.org',
realname='Robert Baratheon',
password='foobar',
role='admin')
users.add(
username='editor@example.org',
realname=None,
password='foobar',
role='editor')
users.add(
username='member@example.org',
realname=None,
password='foobar',
role='member')
form = VacationActivityForm()
form.request = Bunch(
is_admin=True,
current_username='editor@example.org',
session=session
)
form.on_request()
assert form.username.data == 'editor@example.org'
assert form.username.choices == [
('editor@example.org', 'editor@example.org'),
('admin@example.org', 'Robert Baratheon')
]
form.request.is_admin = False
form.on_request()
assert form.username is None
def test_notification_template_send_form(session):
activities = ActivityCollection(session, type='vacation')
attendees = AttendeeCollection(session)
periods = PeriodCollection(session)
occasions = OccasionCollection(session)
bookings = BookingCollection(session)
users = UserCollection(session)
admin = users.add(
username='admin@example.org',
realname='Robert Baratheon',
password='foobar',
role='admin')
organiser = users.add(
username='organiser@example.org',
realname=None,
password='foobar',
role='editor')
users.add(
username='member@example.org',
realname=None,
password='foobar',
role='member')
prebooking = tuple(d.date() for d in (
datetime.now() - timedelta(days=1),
datetime.now() + timedelta(days=1)
))
execution = tuple(d.date() for d in (
datetime.now() + timedelta(days=10),
datetime.now() + timedelta(days=12)
))
period = periods.add(
title="Ferienpass 2016",
prebooking=prebooking,
booking=(prebooking[1], execution[0]),
execution=execution,
active=True
)
foo = activities.add("Foo", username='admin@example.org')
foo.propose().accept()
bar = activities.add("Bar", username='organiser@example.org')
bar.propose().accept()
o1 = occasions.add(
start=datetime(2016, 11, 25, 8),
end=datetime(2016, 11, 25, 16),
age=(0, 10),
spots=(0, 2),
timezone="Europe/Zurich",
activity=foo,
period=period,
)
o1.username = admin.username
o2 = occasions.add(
start=datetime(2016, 11, 25, 17),
end=datetime(2016, 11, 25, 20),
age=(0, 10),
spots=(0, 2),
timezone="Europe/Zurich",
activity=bar,
period=period,
)
o2.username = organiser.username
a1 = attendees.add(admin, 'Dustin', date(2000, 1, 1), 'male')
a2 = attendees.add(organiser, 'Mike', date(2000, 1, 1), 'female')
b1 = bookings.add(admin, a1, o1)
b1.state = 'accepted'
b1.cost = 100
b2 = bookings.add(organiser, a2, o2)
b2.state = 'accepted'
b2.cost = 100
transaction.commit()
# create a mock request
def invoice_collection(user_id=None, period_id=None):
return InvoiceCollection(session, user_id=user_id, period_id=period_id)
def request(admin):
return Bunch(
app=Bunch(
active_period=periods.active(),
org=Bunch(
geo_provider='geo-mapbox',
open_files_target_blank=True
),
invoice_collection=invoice_collection,
periods=periods.query().all(),
schema='',
websockets_private_channel='',
websockets_client_url=lambda *args: '',
version='1.0',
sentry_dsn=None
),
session=session,
include=lambda *args: None,
model=Bunch(period_id=period.id),
is_admin=admin,
is_organiser_only=not admin and True or False,
is_manager=admin and True or False,
translate=lambda text, *args, **kwargs: text,
locale='de_CH',
current_username=(
admin and 'admin@example.org' or 'organiser@example.org'
)
)
# in the beginning there are no recipients
form = NotificationTemplateSendForm()
form.model = Bunch(period_id=period.id)
form.request = request(admin=True)
assert form.has_choices # we still have choices (like send to users)
assert not form.occasion.choices
# once the request is processed, the occasions are added
form.on_request()
assert form.has_choices
assert len(form.occasion.choices) == 2
assert len(form.send_to.choices) == 7
# if the period is not confirmed, we send to attendees wanting the occasion
periods.query().one().confirmed = False
bookings.query().filter_by(username=admin.username)\
.one().state = 'denied'
transaction.commit()
form = NotificationTemplateSendForm()
form.model = Bunch(period_id=period.id)
form.request = request(admin=True)
form.on_request()
assert len(form.occasion.choices) == 2
occasions = [c[0] for c in form.occasion.choices]
assert len(form.recipients_by_occasion(occasions, False)) == 2
assert len(form.recipients_by_occasion(occasions, True)) == 2
# if the period is confirmed, we send to attendees with accepted bookings
periods.query().one().confirmed = True
transaction.commit()
form = NotificationTemplateSendForm()
form.model = Bunch(period_id=period.id)
form.request = request(admin=True)
form.on_request()
assert len(form.occasion.choices) == 2
occasions = [c[0] for c in form.occasion.choices]
assert len(form.recipients_by_occasion(occasions, False)) == 1
assert len(form.recipients_by_occasion(occasions, True)) == 2
# the number of users is independent of the period
assert len(form.recipients_by_role(('admin', 'editor', 'member'))) == 3
assert len(form.recipients_by_role(('admin', 'editor'))) == 2
assert len(form.recipients_by_role(('admin',))) == 1
# if the period is confirmed, there are accepted recipients
period = periods.query().one()
period.active = True
period.confirmed = True
transaction.commit()
assert len(form.recipients_by_occasion(occasions)) == 2
# only accepted bookings are counted
bookings.query().filter_by(username=admin.username)\
.one().state = 'cancelled'
transaction.commit()
occasions = [c[0] for c in form.occasion.choices]
# without organisers
assert len(form.recipients_by_occasion(occasions, False)) == 1
# with
assert len(form.recipients_by_occasion(occasions, True)) == 2
# inactive users may be exluded
form.state.data = ['active']
assert len(form.recipients_pool) == 3
form.state.data = ['active', 'inactive']
assert len(form.recipients_pool) == 3
form.state.data = ['inactive']
assert len(form.recipients_pool) == 0
# bookings count towards the wishlist if the period is active,
period = periods.query().one()
period.active = True
period.confirmed = False
transaction.commit()
form.request = request(admin=True)
# do not count cancelled bookings...
form.__dict__['period'] = period
assert len(form.recipients_with_wishes()) == 2
assert len(form.recipients_with_accepted_bookings()) == 0
# otherwise they count towards the bookings
period = periods.query().one()
period.confirmed = True
transaction.commit()
form.request = request(admin=True)
form.__dict__['period'] = period
assert len(form.recipients_with_wishes()) == 0
assert len(form.recipients_with_accepted_bookings()) == 1
# count the active organisers
form.request = request(admin=True)
assert len(form.recipients_which_are_active_organisers()) == 2
# count the users with unpaid bills
form.request = request(admin=True)
assert len(form.recipients_with_unpaid_bills()) == 0
period = periods.query().one()
billing = BillingCollection(request=Bunch(
session=session,
app=Bunch(invoice_collection=invoice_collection)
), period=period)
billing.create_invoices()
transaction.commit()
form.request = request(admin=True)
assert len(form.recipients_with_unpaid_bills()) == 1
# organisers are not counted as active if the occasion has been cancelled
occasions = OccasionCollection(session)
occasions.query().first().cancelled = True
transaction.commit()
form.request = request(admin=True)
assert len(form.recipients_which_are_active_organisers()) == 1
for occasion in occasions.query():
occasion.cancelled = False
transaction.commit()
form.request = request(admin=True)
assert len(form.recipients_which_are_active_organisers()) == 2
@pytest.mark.parametrize('recipient_count,roles,states', [
(2, ['admin', 'editor'], ['active']),
(4, ['admin', 'editor'], ['active', 'inactive']),
(1, ['editor'], ['inactive']),
])
def test_notification_send_template_by_role(
scenario, recipient_count, roles, states):
# Check by_role with inactive users
# in the beginning there are no recipients
session = scenario.session
users = UserCollection(session)
# add each role active and not active
for username in (
'admin@example.org',
'editor@example.org',
):
role = username.split('@')[0]
users.add(username=username, password='foobar', role=role)
users.add(username=f'ex_{username}', password='foobar', role=role,
active=False)
scenario.add_period()
scenario.commit()
scenario.refresh()
period = scenario.latest_period
form = NotificationTemplateSendForm(MultiDict([
('send_to', 'by_role'),
*(('roles', role) for role in roles),
*(('state', s) for s in states)
]))
assert form.send_to.data == 'by_role'
assert form.roles.data == roles
assert form.state.data == states
form.request = Bunch(session=session)
form.model = Bunch(period_id=period.id)
assert len(form.recipients) == recipient_count
|
from fact import fact
fact(6)
print(fact) |
from django.test import TestCase
from posts.models import Group
class GroupModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
Group.objects.create(
title='Название сообщества',
slug='test-group',
description='Описание'
)
cls.group = Group.objects.get(slug='test-group')
def test_verbose_name(self):
"""verbose_name в полях совпадает с ожидаемым."""
group = GroupModelTest.group
field_verboses = {
'title': 'Название группы',
'slug': 'Уникальное имя',
'description': 'Описание'
}
for value, expected in field_verboses.items():
with self.subTest(value=value):
self.assertEqual(
group._meta.get_field(value).verbose_name, expected)
def test_help_text(self):
"""help_text в полях совпадает с ожидаемым."""
group = GroupModelTest.group
field_help_texts = {
'title': 'Дайте короткое название группе',
'slug': 'Укажите адрес для страницы задачи.',
'description': 'Дайте описание группе'
}
for value, expected in field_help_texts.items():
with self.subTest(value=value):
self.assertEqual(
group._meta.get_field(value).help_text, expected)
def test_name_is_title(self):
group = GroupModelTest.group
title_name = group.title
self.assertEqual(title_name, str(group))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-5-31
@author: Chine
'''
import logging.handlers
import SocketServer
import struct
try:
import cPickle as pickle
except ImportError:
import pickle
def get_logger(name='cola', filename=None, server=None, is_master=False):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
if filename is not None:
handler = logging.FileHandler(filename)
formatter = logging.Formatter('%(asctime)s - %(module)s.%(funcName)s.%(lineno)d - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if is_master:
handler.setLevel(logging.ERROR)
logger.addHandler(handler)
if server is not None:
if ':' in server:
server, port = tuple(server.split(':', 1))
port = int(port)
else:
port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
socket_handler = logging.handlers.SocketHandler(server, port)
socket_handler.setLevel(logging.INFO)
logger.addHandler(socket_handler)
return logger
def add_log_client(logger, client):
if ':' in client:
client, port = tuple(client.split(':', 1))
port = int(port)
else:
port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
socket_handler = logging.handlers.SocketHandler(client, port)
socket_handler.setLevel(logging.INFO)
logger.addHandler(socket_handler)
return socket_handler
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
def handle(self):
while not self.server.abort:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return pickle.loads(data)
def handleLogRecord(self, record):
if self.server.logger is not None:
logger = self.server.logger
else:
logger = logging.getLogger(record.name)
logger.handle(record)
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1
def __init__(self, logger=None, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 1
self.logger = logger
def stop(self):
self.abort = True |
import arxiv
import json
result = arxiv.query(query="cs", max_chunk_results=10, iterative=True)
print('[')
for paper in result():
# json_str = json.dumps(paper, indent=2)
json_str = json.dumps(paper)
print(json_str.replace('true', 'True').replace('null', 'None'))
print(', ')
print(']')
|
import zmq
from random import random
from time import time, sleep
def get_random(lo=0, hi=1):
start = time()
sleep(lo + random() * (hi - lo))
return time() - start
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.REP)
sock.bind('ipc:///tmp/random')
while True:
lo, hi = sock.recv_json()
sock.send_json(get_random(lo, hi))
|
def decorated_patterns(wrapping_functions, patterns):
"""
Used to wrap entire URL patterns in a decorator
adapted from: https://gist.github.com/1378003
"""
if not isinstance(wrapping_functions, (list, tuple)):
wrapping_functions = (wrapping_functions, )
return [_wrap_resolver(wrapping_functions, url_instance) for url_instance in patterns]
def _wrap_resolver(wrapping_functions, url_instance): # noqa
resolve_func = getattr(url_instance, 'resolve', None)
if resolve_func is None:
return url_instance
def _wrap_resolved_func(*args, **kwargs):
result = resolve_func(*args, **kwargs)
view_func = getattr(result, 'func', None)
if view_func is None:
return result
for _f in reversed(wrapping_functions):
view_func = _f(view_func)
setattr(result, 'func', view_func)
return result
setattr(url_instance, 'resolve', _wrap_resolved_func)
return url_instance
|
#!/usr/bin/python
# coding=utf-8
""" """
import argparse
import os, sys
from .pipeline_tools import make_perfect_path
def create_VOC_dirs(dir_name):
dir_name_ = make_perfect_path(dir_name)
# print(dir_name_)
# print(type(dir_name_))
if not os.path.exists(dir_name_):
os.system("mkdir " + dir_name_)
os.system("mkdir " + dir_name_ + "/Annotations")
os.system("mkdir " + dir_name_ + "/ImageSets")
os.system("mkdir " + dir_name_ + "/JPEGImages")
os.system("mkdir " + dir_name_ + "/SegmentationClass")
os.system("mkdir " + dir_name_ + "/SegmentationObject")
os.system("mkdir " + dir_name_ + "/ImageSets/Layout")
os.system("mkdir " + dir_name_ + "/ImageSets/Main")
os.system("mkdir " + dir_name_ + "/ImageSets/Segmentation")
print("create_VOC_dirs Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dir_name_arg", help="Path&Name of directories", type=str)
args = parser.parse_args()
create_VOC_dirs(args.dir_name_arg)
#____________________________________
# Demo
# python ./create_VOC_dirs.py /home/zhehua/data/VOC
#____________________________________ |
from enum import Enum
from pydantic import BaseModel
from typing import List, Optional
class ErrorMessage(BaseModel):
detail: str
class MilkEnum(str, Enum):
none = 'none'
skim = 'skim'
semi = 'semi'
whole = 'whole'
class Milk(BaseModel):
id: int
name: MilkEnum
class Config:
orm_mode = True
class SizeEnum(str, Enum):
small = 'small'
medium = 'medium'
large = 'large'
class Size(BaseModel):
id: int
name: SizeEnum
class Config:
orm_mode = True
class EspressoShotEnum(str, Enum):
none = 'none'
single = 'single'
double = 'double'
triple = 'triple'
class EspressoShot(BaseModel):
id: int
name: EspressoShotEnum
class Config:
orm_mode = True
class ConsumeLocationEnum(str, Enum):
take_away = 'take away'
in_shop = 'in shop'
class ConsumeLocation(BaseModel):
id: int
name: ConsumeLocationEnum
class Config:
orm_mode = True
class OrderStatusEnum(str, Enum):
pending = 'pending'
paid = 'paid'
served = 'served'
collected = 'collected'
cancelled = 'cancelled'
class OrderStatus(BaseModel):
id: int
name: OrderStatusEnum
class Config:
orm_mode = True
class ProductEnum(str, Enum):
latte = 'latte'
cappuccino = 'cappuccino'
espresso = 'espresso'
tea = 'tea'
class Product(BaseModel):
id: int
name: ProductEnum
class Config:
orm_mode = True
class OrderItemBase(BaseModel):
quantity: int = 1
class OrderItemCreate(OrderItemBase):
product_name: ProductEnum
size: SizeEnum
milk: MilkEnum = MilkEnum.none
shot: EspressoShotEnum = EspressoShotEnum.none
class OrderItemUpdate(OrderItemCreate):
id: int
class OrderItem(OrderItemBase):
id: int
product: Product
size: Size
milk: Milk
espresso_shot: EspressoShot
class Config:
orm_mode = True
class OrderBase(BaseModel):
pass
class OrderCreate(OrderBase):
location: ConsumeLocationEnum
items: List[OrderItemCreate]
status: Optional[OrderStatusEnum] = OrderStatusEnum.pending
class OrderUpdate(OrderBase):
location: ConsumeLocationEnum
items: List[OrderItemUpdate]
status: Optional[OrderStatusEnum] = OrderStatusEnum.pending
class Order(OrderBase):
id: int
location: ConsumeLocation
items: List[OrderItem]
status: Optional[OrderStatus]
total: Optional[int]
class Config:
orm_mode = True
|
"""
File: algorithms.py
Algorithms configured for profiling.
"""
def selectionSort(lyst, profiler):
i = 0
while i < len(lyst) - 1:
minIndex = i
j = i + 1
while j < len(lyst) - 1:
profiler.comparison()
if lyst[j] < lyst[minIndex]:
minIndex = j
j += 1
if i != minIndex:
swap(lyst, minIndex, i, profiler)
i += 1
def swap(lyst, i, j, profiler):
"""Exchange the elements at positions i and j."""
profiler.exchange()
temp = lyst[i]
lyst[i] = lyst[j]
lyst[j] = temp |
from inspect import getmembers
from wtforms.validators import DataRequired
from onegov.form.fields import UploadField
from onegov.form.validators import StrictOptional
from typing import overload, Any, Literal, TypeVar, TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Collection, Iterator
from onegov.form import Form
from typing_extensions import TypeGuard
from wtforms.fields.core import UnboundField
_FormT = TypeVar('_FormT', bound='Form')
def prepare_for_submission(
form_class: type[_FormT],
for_change_request: bool = False,
force_simple: bool = True,
) -> type[_FormT]:
# force all upload fields to be simple, we do not support the more
# complex add/keep/replace widget, which is hard to properly support
# and is not super useful in submissions
def is_upload(attribute: object) -> 'TypeGuard[UnboundField[UploadField]]':
if not hasattr(attribute, 'field_class'):
return False
return issubclass(attribute.field_class, UploadField)
for name, field in getmembers(form_class, predicate=is_upload):
if force_simple:
if 'render_kw' not in field.kwargs:
field.kwargs['render_kw'] = {}
field.kwargs['render_kw']['force_simple'] = True
# Otherwise the user gets stuck when in form validation not
# changing the file
if for_change_request:
validators = [StrictOptional()] + [
v for v in field.kwargs['validators'] or []
if not isinstance(v, DataRequired)
]
field.kwargs['validators'] = validators
return form_class
@overload
def get_fields(
form_class: type['Form'],
names_only: Literal[False] = False,
exclude: 'Collection[str] | None' = None
) -> 'Iterator[tuple[str, UnboundField[Any]]]': ...
@overload
def get_fields(
form_class: type['Form'],
names_only: Literal[True],
exclude: 'Collection[str] | None' = None
) -> 'Iterator[str]': ...
def get_fields(
form_class: type['Form'],
names_only: bool = False,
exclude: 'Collection[str] | None' = None
) -> 'Iterator[str | tuple[str, UnboundField[Any]]]':
""" Takes an unbound form and returns the name of the fields """
def is_field(attribute: object) -> 'TypeGuard[UnboundField[Any]]':
return hasattr(attribute, 'field_class')
for name, field in getmembers(form_class, predicate=is_field):
if exclude and name in exclude:
continue
if names_only:
yield name
else:
yield name, field
|
r"""
Logging objects (:mod: `qiita_db.logger`)
====================================
..currentmodule:: qiita_db.logger
This module provides objects for recording log information
Classes
-------
..autosummary::
:toctree: generated/
LogEntry
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from json import loads, dumps
import qiita_db as qdb
class LogEntry(qdb.base.QiitaObject):
"""
Attributes
----------
severity
time
info
msg
Methods
-------
clear_info
add_info
"""
_table = 'logging'
@classmethod
def newest_records(cls, numrecords=100):
"""Return a list of the newest records in the logging table
Parameters
----------
numrecords : int, optional
The number of records to return. Default 100
Returns
-------
list of LogEntry objects
list of the log entries
"""
with qdb.sql_connection.TRN:
sql = """SELECT logging_id
FROM qiita.{0}
ORDER BY logging_id DESC LIMIT %s""".format(cls._table)
qdb.sql_connection.TRN.add(sql, [numrecords])
return [cls(i)
for i in qdb.sql_connection.TRN.execute_fetchflatten()]
@classmethod
def create(cls, severity, msg, info=None):
"""Creates a new LogEntry object
Parameters
----------
severity : str {Warning, Runtime, Fatal}
The level of severity to use for the LogEntry. Refers to an entry
in the SEVERITY table.
msg : str
The message text
info : dict, optional
Defaults to ``None``. If supplied, the information will be added
as the first entry in a list of information dicts. If ``None``,
an empty dict will be added.
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
if info is None:
info = {}
info = dumps([info])
with qdb.sql_connection.TRN:
sql = """INSERT INTO qiita.{} (time, severity_id, msg, information)
VALUES (NOW(), %s, %s, %s)
RETURNING logging_id""".format(cls._table)
severity_id = qdb.util.convert_to_id(severity, "severity")
qdb.sql_connection.TRN.add(sql, [severity_id, msg, info])
return cls(qdb.sql_connection.TRN.execute_fetchlast())
@property
def severity(self):
"""Returns the severity_id associated with this LogEntry
Returns
-------
int
This is a key to the SEVERITY table
"""
with qdb.sql_connection.TRN:
sql = """SELECT severity_id FROM qiita.{}
WHERE logging_id = %s""".format(self._table)
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def time(self):
"""Returns the time that this LogEntry was created
Returns
-------
datetime
"""
with qdb.sql_connection.TRN:
sql = "SELECT time FROM qiita.{} WHERE logging_id = %s".format(
self._table)
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def info(self):
"""Returns the info associated with this LogEntry
Returns
-------
list of dict
Each entry in the list is information that was added (the info
added upon creation will be index 0, and if additional info
was supplied subsequently, those entries will occupy subsequent
indices)
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
with qdb.sql_connection.TRN:
sql = """SELECT information FROM qiita.{} WHERE
logging_id = %s""".format(self._table)
qdb.sql_connection.TRN.add(sql, [self.id])
rows = qdb.sql_connection.TRN.execute_fetchlast()
if rows:
results = loads(rows)
else:
results = {}
return results
@property
def msg(self):
"""Gets the message text for this LogEntry
Returns
-------
str
"""
with qdb.sql_connection.TRN:
sql = "SELECT msg FROM qiita.{0} WHERE logging_id = %s".format(
self._table)
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def clear_info(self):
"""Resets the list of info dicts to be an empty list
"""
with qdb.sql_connection.TRN:
sql = """UPDATE qiita.{} SET information = %s
WHERE logging_id = %s""".format(self._table)
qdb.sql_connection.TRN.add(sql, [dumps([]), self.id])
qdb.sql_connection.TRN.execute()
def add_info(self, info):
"""Adds new information to the info associated with this LogEntry
Parameters
----------
info : dict
The information to add.
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
with qdb.sql_connection.TRN:
current_info = self.info
current_info.append(info)
new_info = dumps(current_info)
sql = """UPDATE qiita.{} SET information = %s
WHERE logging_id = %s""".format(self._table)
qdb.sql_connection.TRN.add(sql, [new_info, self.id])
qdb.sql_connection.TRN.execute()
|
# Apprentissage de l'objet
#Une liste est un objet
l = [1,2,3,4]
print(l)
# on peut appliquer des méthode sur l'objet
l.append(1)
print(l)
print(type(l)) |
import os
import cv2 as cv
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
shuffle_data = True
PATH_TO_FILES = '/home/young-joo/Desktop/Dataset/'
JUMPSUIT = PATH_TO_FILES + 'bbox_Jumpsuit.txt'
DRESS = PATH_TO_FILES + 'bbox_Dress.txt'
#Save into a list
def return_lists(JUMPSUIT, DRESS):
jump_file = open(JUMPSUIT, 'r').read().splitlines()
dress_file = open(DRESS, 'r').read().splitlines()
our_file = jump_file + dress_file ##do sth about this LINE
#[0]: addr [1]: bbox [2]: string-name [3]: label
our_list = []
our_list.append([])
our_list.append([])
our_list.append([])
our_list.append([])
length = len(our_file)
for our in range(length):
our_string = "".join(our_file[our])
our_string = our_string.split()
addr = our_string[0]
our_list[0].append(addr)
xmin = float(our_string[1])
xmax = float(our_string[2])
ymin = float(our_string[3])
ymax = float(our_string[4])
bbox = (xmin, xmax, ymin, ymax)
our_list[1].append(bbox)
if 'Jumpsuit' in addr:
category = 'Jumpsuit'
our_list[2].append(category)
label = 1
our_list[3].append(label)
elif 'Dress' in addr:
category = 'Dress'
our_list[2].append(category)
label = 2
our_list[3].append(label)
else:
raise ValueError('Something is wrong')
return our_list
this_is_the_list = return_lists(JUMPSUIT, DRESS)
print this_is_the_list[0][0]
print this_is_the_list[1][0]
print this_is_the_list[2][0]
print this_is_the_list[3][0]
print this_is_the_list[0][1999]
print this_is_the_list[1][1999]
print this_is_the_list[2][1999]
print this_is_the_list[3][1999]
addrs = this_is_the_list[0]
bboxs = this_is_the_list[1]
string_names = this_is_the_list[2]
labels = this_is_the_list[3]
if shuffle_data:
c = list(zip(addrs, bboxs, string_names, labels))
random.shuffle(c)
addrs, bboxs, string_names, labels = zip(*c)
print addrs[0]
print bboxs[0]
print string_names[0]
print labels[0]
print addrs[1999]
print bboxs[1999]
print string_names[1999]
print labels[1999]
#Divide into train and test sets
train_addrs = addrs[0:int(0.8*len(addrs))]
train_bboxs = bboxs[0:int(0.8*len(bboxs))]
train_string_names = string_names[0:int(0.8*len(string_names))]
train_labels = labels[0:int(0.8*len(labels))]
test_addrs = addrs[int(0.8*len(addrs)):]
test_bboxs = bboxs[int(0.8*len(bboxs)):]
test_string_names = string_names[int(0.8*len(string_names)):]
test_labels = labels[int(0.8*len(labels)):]
#Helper functions for converting data types
def int64_feature(value):
return tf.train.Feature(int64_list = tf.train.Int64List(value = [value]))
def bytes_feature(value):
return tf.train.Feature(bytes_list = tf.train.BytesList(value = [value]))
def float_list_feature(value):
return tf.train.Feature(float_list = tf.train.FloatList(value = value))
#Saving to tfrecord
def save_to_record(filename, addrs_addrs, bboxs_bboxs, string_string, labels_labels):
writer = tf.python_io.TFRecordWriter(filename)
length = len(addrs_addrs)
count = 0
print "this is the original length: %d" % (length)
for i in range(length):
if count % 100 == 0:
print length
print("%d out of %d saved" % (count, length))
image = cv.imread(PATH_TO_FILES + addrs_addrs[i])
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
(h, w) = image.shape[:2]
split = addrs_addrs[i].split(".")
with tf.gfile.GFile(PATH_TO_FILES + split[0] + "_revised.jpg", 'rb') as fid:
encoded_jpg = fid.read()
label_num = labels_labels[i]
example = tf.train.Example(features = tf.train.Features(feature = {
'image/height': int64_feature(299),
'image/width': int64_feature(299),
'image/encoded': bytes_feature(encoded_jpg),
'image/format': bytes_feature(b'jpg'),
'image/object/bbox/xmin': float_list_feature([bboxs_bboxs[i][0] / w]),
'image/object/bbox/xmax': float_list_feature([bboxs_bboxs[i][1] / w]),
'image/object/bbox/ymin': float_list_feature([bboxs_bboxs[i][2] / h]),
'image/object/bbox/ymax': float_list_feature([bboxs_bboxs[i][3] / h]),
'image/object/class/text': bytes_feature(string_string[label_num].encode()),
'image/object/class/label': int64_feature(label_num)
}))
count = count + 1
writer.write(example.SerializeToString())
writer.close()
return example
if __name__ == '__main__':
#Generating training data
save_to_record('train', train_addrs, train_bboxs, train_string_names, train_labels)
#Generating testing data
save_to_record('test', test_addrs, test_bboxs, test_string_names, test_labels)
|
def f (x):
y = x - (x ** 2) * 0.01
return y
a, b = map (int, input ().split ())
e = float (input ())
if f (a) == 0: print (a)
elif f (b) == 0: print (b)
else:
flag = 0
while b - a > e:
c = (a + b) / 2
if f (c) == 0:
flag = 1
break
elif f (c) * f (a) > 0: a = c
else: b = c
if flag == 0: print (a, b)
else: print (c) |
from django.contrib import admin
from .models import Plat, PlatConstante, PlatOrganisation
@admin.register(PlatConstante)
class PlatAdmin(admin.ModelAdmin):
list_display = ('id','mx_plat','mx_plat_matin','mx_plat_midi','mx_plat_midi')
@admin.register(Plat)
class PlatAdmin(admin.ModelAdmin):
list_display = ('id','id_participant','participant','jour','periode','equipe','date')
list_filter = ('date','participant__equipe','jour','periode',)
ordering = ('date',)
def id_participant(self, obj):
return obj.participant.id
def equipe(self, obj):
return obj.participant.equipe
@admin.register(PlatOrganisation)
class PlatOrganisationAdmin(admin.ModelAdmin):
list_display = ('nom','vendredi_soir','samedi_matin','samedi_midi','samedi_soir','dimanche_matin','dimanche_midi','date')
list_filter = ('date','vendredi_soir','samedi_matin','samedi_midi','samedi_soir','dimanche_matin','dimanche_midi')
ordering = ('nom',)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 18:00:10 2020
@author: vicma
"""
import json
from time import sleep
from kafka import KafkaConsumer
if __name__ == '__main__':
parsed_topic_name = 'parsed_recipes'
# Notify if a recipe has more than 200 calories
calories_threshold = 200
consumer = KafkaConsumer(parsed_topic_name, auto_offset_reset='earliest',
bootstrap_servers=['localhost:9092'], api_version=(0, 10), consumer_timeout_ms=1000)
for msg in consumer:
record = json.loads(msg.value)
if record == {}:
print(msg)
continue
calories = int(record['calories'])
title = record['title']
if calories > calories_threshold:
print('Alert: {} calories count is {}'.format(title, calories))
sleep(3)
if consumer is not None:
consumer.close() |
from articles import views
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
url(r'^add$', views.addArticle),
url(r'^edit/pass/(?P<pk>\d+)$', views.editPassArticle),
url(r'^edit/(?P<pk>\d+)$', views.editArticle),
url(r'^search/(?P<username>\w{0,50})/$', views.searchArticle),
]
|
str=input()
pos,word=[i for i in input().split(" ")]
l=list(str)
l[pos]=word
print(str)
|
"""Alter AnswerRule column default NOT NULL
Revision ID: 12b6ae6ce692
Revises: 4fecacd2f5e8
Create Date: 2018-11-27 00:48:32.750384
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '12b6ae6ce692'
down_revision = '4fecacd2f5e8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('answer_rule', 'cold_sensitive',
existing_type=mysql.TINYINT(display_width=1),
nullable=False,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'constipation',
existing_type=mysql.TINYINT(display_width=1),
nullable=False,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'digestion',
existing_type=mysql.TINYINT(display_width=1),
nullable=False,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'gluttony',
existing_type=mysql.TINYINT(display_width=1),
nullable=False,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'menopause',
existing_type=mysql.TINYINT(display_width=1),
nullable=False,
existing_server_default=sa.text("'0'"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('answer_rule', 'menopause',
existing_type=mysql.TINYINT(display_width=1),
nullable=True,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'gluttony',
existing_type=mysql.TINYINT(display_width=1),
nullable=True,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'digestion',
existing_type=mysql.TINYINT(display_width=1),
nullable=True,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'constipation',
existing_type=mysql.TINYINT(display_width=1),
nullable=True,
existing_server_default=sa.text("'0'"))
op.alter_column('answer_rule', 'cold_sensitive',
existing_type=mysql.TINYINT(display_width=1),
nullable=True,
existing_server_default=sa.text("'0'"))
# ### end Alembic commands ###
|
"""
Django settings for vss project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import datetime
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ALLOWED_HOSTS = ['*']
# Include the right settings
try:
from .local import *
except ImportError as e:
from .production import *
TEMPLATE_DEBUG = True
AUTH_USER_MODEL = 'user.User'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
'utils',
'user',
'movie_category',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, '..', '..', 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', '..', 'static'),
)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
ROOT_URLCONF = 'core.urls'
WSGI_APPLICATION = 'core.wsgi.application'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
|
"""
Test for Pendulum class.
"""
import nose.tools as nt
import numpy as np
from math import pi, sqrt
from pendulum import NotSolvedError
from pendulum import Pendulum
def test_pendulum():
"""Check if pendulum call gives derivatives."""
test_pend = Pendulum(L=2.2)
test_dth, test_dom = test_pend(0, (pi/4, 0.1))
ex_dth, ex_dom = 0.1, (-9.81/(2.2*sqrt(2)))
nt.assert_almost_equal(test_dth, ex_dth)
nt.assert_almost_equal(test_dom, ex_dom)
def test_pendulum_rest():
"""Check if pendulum rests in initial position 0 with velocity 0."""
test_pend = Pendulum(L=2.2)
test_dth, test_dom = test_pend(0, (0, 0))
ex_dth, ex_dom = 0, 0
nt.assert_equal(test_dth, ex_dth)
nt.assert_equal(test_dom, ex_dom)
@nt.raises(NotSolvedError)
def test_pendulum_solve_Exceptions():
"""Check if pendulum raises exceptions for non-solved properties."""
test_pend = Pendulum(L=2.2)
a = test_pend.t
b = test_pend.theta
c = test_pend.omega
d = test_pend.x
e = test_pend.y
f = test_pend.potential
g = test_pend.vx
h = test_pend.vy
i = test_pend.kinetic
def test_pendulum_solve_zero():
"""Check if start in rest ((angle, angular vel) == (0,0)) returns zeros."""
test_T = 5
test_dt = 0.1
test_pend = Pendulum(L=2.2)
test_pend.solve((0, 0), test_T, test_dt)
test_ts = np.linspace(0, test_T, test_T/test_dt)
test_zeros = np.zeros(int(test_T/test_dt))
np.testing.assert_array_equal(test_pend.t, test_ts)
np.testing.assert_array_equal(test_pend.theta, test_zeros)
np.testing.assert_array_equal(test_pend.omega, test_zeros)
def test_pendulum_xy_transformation():
"""Check if motion is transformed to xy-coordinates."""
test_T = 5
test_dt = 0.1
test_L = 2
test_pend = Pendulum(L=test_L)
test_pend.solve((pi/2, 0), test_T, test_dt)
test_L_squared = (test_pend.x)**2 + (test_pend.y)**2
ex_L_quared = test_L**2
np.testing.assert_array_almost_equal(test_L_squared, ex_L_quared)
if __name__ == "__main__":
import nose
nose.run()
|
import re
from nmmd.base import Dispatcher, try_delegation
class RegexDispatcher(Dispatcher):
@try_delegation
def prepare(self):
data = []
for invoc, method in self.registry:
args, kwargs = self.loads(invoc)
rgx = re.compile(*args, **kwargs)
data.append((rgx, method))
return data
@try_delegation
def get_text(self, text):
return text
@try_delegation
def gen_methods(self, *args, **kwargs):
text = self.get_text(*args, **kwargs)
for rgx, methodname in self.dispatch_data:
matchobj = rgx.match(text)
if matchobj:
method = getattr(self.inst, methodname)
# args = (text, matchobj) + args
# yield method, args
# args = (text, matchobj) + args
yield method, (text, matchobj)
# Else try inst.generic_handler
generic = getattr(self.inst, 'generic_handler', None)
if generic is not None:
yield generic
@try_delegation
def apply_handler(self, method_data, *args, **kwargs):
'''Call the dispatched function, optionally with other data
stored/created during .register and .prepare. Assume the arguments
passed in by the dispathcer are the only ones required.
'''
if isinstance(method_data, tuple):
len_method = len(method_data)
method = method_data[0]
if 1 < len_method:
args = method_data[1]
if 2 < len_method:
kwargs = method_data[2]
else:
method = method_data
return method(*args, **kwargs)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
# read the dataset into the spyder
dataset=pd.read_csv(r'C:\Users\saikumar\Desktop\AMXWAM data science\class24_nov 14,2020\Social_Network_Ads.csv')
# seperate dependent and independent variables
X=dataset.iloc[:,2:-1].values
y=dataset.iloc[:,-1].values
# check the dataset for any null values
dataset.notnull().sum()
# do a feature scaling
sc=StandardScaler()
X=sc.fit_transform(X)
# split my dataset into train and test
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)
# fit gaussianNB to my training model
Gnb=GaussianNB()
Gnb.fit(X_train,y_train)
# predict
y_pred=Gnb.predict(X_test)
# check the measures using confusion matrix
cm=confusion_matrix(y_test,y_pred)
acc=accuracy_score(y_test,y_pred)
cr=classification_report(y_test,y_pred)
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, Gnb.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Naive Bayes (age vs salary)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
X_pred=Gnb.predict(X_train)
X_cm=confusion_matrix(y_train,X_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, Gnb.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Naive Bayes ( age vs salary)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
from abc import abstractmethod
class ServiceBase:
@abstractmethod
def info(self):
pass
|
import random
from characters import BaseCharacer
from formulas import mentor_successful_interaction
from widgets import Popup
class Mentor(BaseCharacer):
MENTORS_COUNT = 4
def __init__(self, location, locations, *groups):
super(Mentor, self).__init__(location, *groups)
self.locations = locations
def change_place(self, new_x, new_y):
self.rect.left = new_x
self.rect.top = new_y
def change_to_random_place(self):
place = random.randint(0, self.MENTORS_COUNT-1)
mc = self.locations[place]
self.change_place(mc.px, mc.py)
def visited(self, player):
chance = mentor_successful_interaction(player.soft_skills)
if chance > random.randint(1, 100):
player.soft_skills += 1
res = player.get_random_bonus()
text = 'You got %d points in your %s.' % (res[0], res[1])
else:
text = 'You asked your mentor a stupid question.'
self.change_to_random_place()
popup = Popup((255, 128, 0), text, show=True)
return popup
def update(self, dt, game):
pass
class Member(BaseCharacer):
def __init__(self, location, programming=0, design=0, soft_skills=0,
*groups):
super(Member, self).__init__(location, *groups)
self.programming = programming
self.design = design
self.soft_skills = soft_skills
def update(self, dt, game):
pass
|
# Proyecto compilador Python
# Maquina Virtual de Fight Compilers 2016
# Hecho por Jaime Neri y Mike Grimaldo
#!env/bin/python
import simplejson
import sys
import pprint
import os
import logging
logging.basicConfig(filename='Execution_log.log',level=logging.DEBUG)
file1 = "ejemplos/dimensionada.txt"
file2 = "ejemplos/example1.txt"
file3 = "ejemplos/suma_funciones.txt"
file4 = "ejemplos/factorial_loop.txt"
file5 = "ejemplos/fibonacci_loop.txt"
compilador_1 = "python FightCompilersYacc.py -f " + file5
#print(compilador_1)
os.system(compilador_1) #Corre un archivo python externo python (compilador)
#os.system("python FightCompilersYacc.py -f ".format(arg)) #Corre un archivo python externo python (compilador)
with open('executable.json') as data_file:
data = simplejson.load(data_file)
#python FightCompilersYacc.py -f example1.txt
# functions = output_quadruples['funcs']
# instructions = output_quadruples['quadruples']
# constants = output_quadruples['constants']
# globalvars = output_quadruples['globals']
# fcinstucts = output_quadruples['fightcomp']
globalfuncs = data['funcs_Global']
localfuncs = data['funcs_Local']
instructions = data['quadruples']
codeLines = data['code']
pp = pprint.PrettyPrinter(indent=4) #Imprime los diccionarios de manera identada
dir_stack = {'variable' : {},
'var_booleans' : {}
}
#Stacks que utiliza la VM
currentFunctionId = [] #lista de funcion actual
recuperar_linea = [] # linea guardada mientras se ejecuta un gosub (llamada a funcion)
listValue = []
stack = []
messageHTML = ""
gosub_list = []
contador_k = [0]
def load_program(argv):
f = open(argv)
lines = f.read().replace("\n", " ")
lines = lines.split(" ")
f.close()
return lines
def save_function_name(functionId):
currentFunctionId.append(functionId)
def recover_function_name():
return currentFunctionId.pop()
def op1_obtain_value(the_quad):
if type(the_quad[1]) is list:
try:
auxIndex = str(return_value_from_variable(the_quad[1][1]))
op1 = cast_to_int_or_float(str(return_value_from_variable(the_quad[1][0]+auxIndex)))
except KeyError:
auxIndex = the_quad[1][0]+the_quad[1][1]
op1 = cast_to_int_or_float(str(return_value_from_variable(auxIndex)))
else:
try:
op1 = cast_to_int_or_float(return_value_from_variable(the_quad[1]))
except KeyError:
op1 = cast_to_int_or_float(the_quad[1])
return op1
def op2_obtain_value(the_quad):
if type(the_quad[2]) is list:
try:
auxIndex = str(return_value_from_variable(the_quad[2][1]))
op2 = cast_to_int_or_float(str(return_value_from_variable(the_quad[2][0]+auxIndex)))
except KeyError:
auxIndex = the_quad[2][0]+the_quad[2][1]
op2 = cast_to_int_or_float(str(return_value_from_variable(auxIndex)))
else:
try:
op2 = cast_to_int_or_float(return_value_from_variable(the_quad[2]))
except KeyError:
op2 = cast_to_int_or_float(the_quad[2])
return op2
def cast_to_int_or_float(theNumber):
intcast = int(float(theNumber))
floatcast = float(theNumber)
result = floatcast - intcast
if result == 0:
return intcast
else:
return floatcast
#Funcion que devuelve booleano si existe una variable en el diccionario
def var_exists_in_dict(var):
keys = dir_stack['variable'].keys()
x = str(var)
for y in keys:
if x in y:
#print("SUCCESS")
#print("var: ", var)
#print("y: ", y)
return True
return False
#Funcion que devuelve el valor de la variable si existe en el diccionario, de lo contrario "no hace nada"
def return_value_from_variable(var):
x = var
#print ("hola1", x)
#print (dir_stack)
if (var_exists_in_dict(var)):
#print("El ultimo que truene:", var)
#pprint(dir_stack.values())
#print("SUCCESS2")
x = dir_stack['variable'][var]
#print("SUCCESS x = : ", x)
#print("x", x)
#print ("hola2", x)
#else:
#agregar temp a dict
return x
def dictionary_update(op1, op2, operator):
#print("aqui", op1, op2)
dir_stack['variable'].update({op1 : op2})
# if operator == '+':
# print ("Instruccion de tipo '{2}' : {0} = {1} ".format(op1, op2, operator))
# #print (dir_stack)
# elif operator == '-':
# print ("Instruccion de tipo '{2}' : {0} = {1} ".format(op1, op2, operator))
# elif operator == '*':
# print ("Instruccion de tipo '{2}' : {0} = {1} ".format(op1, op2, operator))
# elif operator == '/':
# print ("Instruccion de tipo '{2}' : {0} = {1} ".format(op1, op2, operator))
# else:
# print ("Instruccion de tipo '{2}' : {0} = {1} ".format(op1, op2, operator))
logging.info('EXPRESION Aritmetica: "%s %s %s"',op1, operator, op2 )
#Funcion que devuelve booleano si existe una variable para booleano
def var_exists_in_dict_bool(var):
keys = dir_stack['var_booleans'].keys()
for y in keys:
if var in y:
return True
return False
#Funcion que devuelve el valor de la variable para booleanos
def return_value_from_variable_bool(var):
x = var
#print (dir_stack)
if (var_exists_in_dict_bool(var)):
x = dir_stack['var_booleans'][var]
return x
# def dictionary_update_bool(op1, op2):
# listValue.append(op)
# dir_stack['var_booleans'].update({op1 : op2})
# print ("Instruccion logica: {0} = {1} ".format(op1, op2))
def dictionary_update_bool(op1, op2):
dir_stack['var_booleans'].update({op1 : op2})
#print ("Instruccion Logica: {0} = {1} ".format(op1, op2))
logging.info('EXPRESION Logica: "%s = %s"',op1, op2 )
#print (dir_stack)
def regresa_variable_de_funcion(nombre_proc):
count = contador_k.pop()
var_id =localfuncs[nombre_proc]['Var_Table'].keys()[count]
count = contador_k.append(count+1)
return var_id
def execute_program(l):
global contador_k
loop = len(instructions)
i = 0
logging.info('<----COMIENZA LA EJECUCION DEL PROGRAMA "%s"---->',file1)
logging.info('')
print ("<----Comienza a ejecutar el programa---->" )
while i < loop:
#instruction = l[i] #l son los diccionarios
quad_actual = instructions[i]
if quad_actual[0] == 'GOTO':
i = quad_actual[3] - 1
#print ("GOTO encontrado; cambiando a posicion {0}".format(i+1))
logging.info('GOTO encontrado; cambiando posicion a: "%s"',i+1)
elif quad_actual[0] == 'GOTOF':
condition = return_value_from_variable_bool(quad_actual[1])
if not condition:
i = quad_actual[3] - 1
#print ('GOTOF Decision de salida, cambiando a posicion a "%s ".format(i+1))
logging.info('GOTOF Decision de salida, cambiando posicion a: "%s"',i+1)
elif quad_actual[0] == '=':
#op2 = getValueFromMemory(quad_actual[i][1])
#op1 = getValueFromMemory(quad_actual[i][3])
# ['=', ['b', 'i'], 0, 'y']
# ['=', '0', 0, ['b', '0']]
if type(quad_actual[1]) is list:
try:
auxIndex = str(return_value_from_variable(quad_actual[1][1]))
op2 = cast_to_int_or_float(str(return_value_from_variable(quad_actual[1][0]+auxIndex)))
except KeyError:
op2 = cast_to_int_or_float(str(return_value_from_variable(quad_actual[1][0] + str(quad_actual[1][1]))))
else:
try:
op2 = return_value_from_variable(quad_actual[1])
except KeyError:
op2 = quad_actual[1]
if type(quad_actual[3]) is list:
try:
op1 = quad_actual[3][0] + str(return_value_from_variable(quad_actual[3][1]))
except KeyError:
op1 = quad_actual[3][0] + str(quad_actual[3][1])
else:
op1 = quad_actual[3]
dictionary_update(op1, op2, quad_actual[0])
#modIndex = quad_actual[i][2]
#pprint(dir_stack)
stack.append(op1)
elif quad_actual[0] == 'QCF':
print ("HADOUUUUUKEN!!")
elif quad_actual[0] == '+': # ['+', ['b', 'y'], '1', 'temp6']
if type(quad_actual[1]) is list:
try:
auxIndex = str(return_value_from_variable(quad_actual[1][1]))
op1 = cast_to_int_or_float(str(return_value_from_variable(quad_actual[1][0]+auxIndex)))
except KeyError:
auxIndex = quad_actual[1][0]+quad_actual[1][1]
op1 = cast_to_int_or_float(str(return_value_from_variable(auxIndex)))
if type(quad_actual[2]) is list:
try:
auxIndex = str(return_value_from_variable(quad_actual[2][1]))
op2 = cast_to_int_or_float(str(return_value_from_variable(quad_actual[2][0]+auxIndex)))
except KeyError:
auxIndex = quad_actual[2][0]+quad_actual[2][1]
op2 = cast_to_int_or_float(str(return_value_from_variable(auxIndex)))
else:
try:
op2 = cast_to_int_or_float(return_value_from_variable(quad_actual[2]))
#print("SUCCESS op2 = 3: ", op2)
except KeyError:
op2 = cast_to_int_or_float(quad_actual[2])
elif type(quad_actual[2]) is list:
try:
op1 = cast_to_int_or_float(return_value_from_variable(quad_actual[1]))
except KeyError:
op1 = cast_to_int_or_float(quad_actual[1])
try:
auxIndex = str(return_value_from_variable(quad_actual[2][1]))
op2 = cast_to_int_or_float(str(return_value_from_variable(quad_actual[2][0]+auxIndex)))
except KeyError:
auxIndex = quad_actual[2][0]+quad_actual[2][1]
op2 = cast_to_int_or_float(str(return_value_from_variable(auxIndex)))
else:
try:
op1 = cast_to_int_or_float(return_value_from_variable(quad_actual[1]))
except KeyError:
op1 = cast_to_int_or_float(quad_actual[1])
try:
op2 = cast_to_int_or_float(return_value_from_variable(quad_actual[2]))
#print("SUCCESS op2 = 3: ", op2)
except KeyError:
op2 = cast_to_int_or_float(quad_actual[2])
dictionary_update(quad_actual[3], (op1 + op2), quad_actual[0])
elif quad_actual[0] == '-':
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
dictionary_update(quad_actual[3], (op1 - op2), quad_actual[0])
elif quad_actual[0] == '*':
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
dictionary_update(quad_actual[3], (op1 * op2), quad_actual[0])
elif quad_actual[0] == '/':
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
dictionary_update(quad_actual[3], (op1 / op2), quad_actual[0])
elif quad_actual[0] == 'ERA':
save_function_name(quad_actual[3]) #guarda el nombre de la funcion de ERA
#print ("id es: ", currentFunctionId)
contador_k = [0]
elif quad_actual[0] == 'PARAMETER': #['PARAMETER', '5', -1, 'Param0']
op2 = op1_obtain_value(quad_actual)
op1 = quad_actual[3]
dictionary_update(op1, op2, quad_actual[0])
stack.append(op1)
nombre = recover_function_name()
save_function_name(nombre)
var_id = regresa_variable_de_funcion(nombre) #Se obtiene el nombre de la variable parametro de la funcion nombre
dictionary_update(var_id, op2, quad_actual[0])
#count = contador_k.pop()
#contador_k.append(count+1)
#pprint(dir_stack)
elif quad_actual[0] == 'GOSUB':
save_function_name(quad_actual[3]) #guarda el nombre de funcion que se llamara con el gosub
recuperar_linea.append(i)
#print (recuperar_linea) #guarda el cuadruplo donde se quedo andtes de llamar al gosub
i = quad_actual[1] - 1
#print ("GOSUB encontrado; cambiando a posicion: {0}".format(i+1))
logging.info('GOSUB encontrado, cambiando posicion a: "%s"',i+1)
elif quad_actual[0] == '<':
boolAux = False
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
#print ("Comparacion: {0} < {1}".format(op1, op2))
logging.info('COMPARACION: "%s < %s"',op1, op2)
if (op1 < op2):
boolAux = True
dictionary_update_bool(quad_actual[3], boolAux)
elif quad_actual[0] == '>':
boolAux = False
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
#print ("Comparacion: {0} > {1}".format(op1, op2))
logging.info('COMPARACION: "%s > %s"',op1, op2)
if (op1 > op2):
boolAux = True
dictionary_update_bool(quad_actual[3], boolAux)
elif quad_actual[0] == '<=':
boolAux = False
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
#print ("Comparacion: {0} <= {1}".format(op1, op2))
logging.info('COMPARACION: "%s <= %s"',op1, op2)
if (op1 <= op2):
boolAux = True
dictionary_update_bool(quad_actual[3], boolAux)
#print(dir_stack)
elif quad_actual[0] == '>=':
boolAux = False
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
#print ("Comparacion: {0} >= {1}".format(op1, op2))
if (op1 >= op2):
boolAux = True
dictionary_update_bool(quad_actual[3], boolAux)
logging.info('COMPARACION: "%s >= %s"',op1, op2)
elif quad_actual[0] == '==':
boolAux = False
op1 = op1_obtain_value(quad_actual)
op2 = op2_obtain_value(quad_actual)
#print ("Comparacion: {0} == {1}".format(op1, op2))
if (op1 == op2):
boolAux = True
dictionary_update_bool(quad_actual[3], boolAux)
logging.info('COMPARACION: "%s == %s"',op1, op2)
elif quad_actual[0] == 'ENDPROC':
#print (recuperar_linea)
i = recuperar_linea.pop() #recupera el numero de cuadruplo donde se quedo
#print("es la i: ", i)
#i= len(instructions) - 2
elif quad_actual[0] == 'INPUT':
#print("Input from CONSOLE:" + quad_actual[3] )
op1 = quad_actual[3]
logging.info('ENTRADA desde la Consola: "%s"', op1)
elif quad_actual[0] == 'OUTPUT':
if type(quad_actual[3]) is list:
dimvalue = 0
pass
else:
print ("SALIDA desde la Consola: " )
print (" "+ str(return_value_from_variable(quad_actual[3])) )
logging.info('SALIDA desde la Consola: "%s"', str(return_value_from_variable(quad_actual[3])))
elif quad_actual[0] == 'RETURN':
#op1 = quad_actual[3]
op2 = return_value_from_variable(quad_actual[3])
op1 = recover_function_name()
#print("op1: ", op1)
#line = quad_actual[1];
#returnAction(value, line);
dictionary_update(op1, op2, quad_actual[0])
elif quad_actual[0] == 'END':
print ("<----Termino de ejecutar el programa---->" )
logging.info('')
#pp.pprint(dir_stack)
i = len(instructions)
logging.info('El diccionario de variables usado fue: %s ',dir_stack)
logging.info('Los cuadruplos utilizados fue: %s ',instructions)
logging.info('<----TERMINA LA EJECUCION DEL PROGRAMA "%s"---->',file1)
i+=1
#execute_program(1)
def run_program(argv):
#l = load_program(argv)
execute_program(argv)
def main(argv):
run_program(argv[0])
return 0
def target(*args):
return main, None
if __name__ == '__main__':
main(sys.argv) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.