text stringlengths 8 6.05M |
|---|
#!bin/python3
# coding=utf8
""" Firefox xpi 文件批量处理(修改最大版本号). """
import re, zipfile, os, sys
maxversion = 100
ff_maxversion_reg = re.compile(br'(ec8030f7-c20a-464f-9b0e-13a3a9e97384.*?em:maxVersion.*?)([^>< ="/]+)', re.S+re.I)
if __name__ == '__main__':
if len(sys.argv) > 1:
maxversion = int(sys.argv[1])
print('Target Maxversion: %s\n' % maxversion)
for filename in os.listdir():
print('%s ' % filename, end='')
if os.path.isdir(filename) or not filename.lower().endswith('.xpi'):
print('skip.')
continue
zin = zipfile.ZipFile(filename)
rdf = zin.read('install.rdf')
version = 0
for item in ff_maxversion_reg.finditer(rdf):
match_obj = re.search(br'\d+', item.groups()[1])
if match_obj and int(match_obj.group()) > version:
version = int(match_obj.group())
if version >= maxversion:
zin.close()
print('skip.')
continue
zout = zipfile.ZipFile('new.xpi','w')
rdf = ff_maxversion_reg.sub(br'\g<1>'+ str(maxversion).encode('utf8'), rdf)
zout.writestr('install.rdf', rdf)
for item in zin.infolist():
if item.filename.lower() == 'install.rdf':
continue
else:
buffer = zin.read(item.filename)
zout.writestr(item, buffer)
zin.close()
zout.close()
os.remove(filename)
os.rename('new.xpi', filename)
print('done!')
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
'''
在一个字符串(0<=字符串长度<=10000,全部由字母组成)中找到第一个只出现一次的字符,
并返回它的位置, 如果没有则返回 -1(需要区分大小写).
'''
class Solution:
def FirstNotRepeatingChar(self, s):
# write code here
if (len(s) == 0): return -1
res = {}
for i in s:
if i in res.keys(): res[i] = res[i] + 1
else: res[i] = 1
for j in range(0, len(s)):
if (res[s[j]] == 1): return j
return -1 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 02:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_dash', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='message',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m_comments', to='user_dash.Message'),
),
migrations.AlterField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u_comments', to='login_reg.User'),
),
migrations.AlterField(
model_name='message',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='p_messages', to='login_reg.User'),
),
migrations.AlterField(
model_name='message',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m_messages', to='login_reg.User'),
),
migrations.DeleteModel(
name='User',
),
]
|
import pandas as pd
#############################################
### Supply label and channel information: ###
#############################################
channels = ['F3','FC5','AF3','F7','T7','P7','O1','O2','P8','T8','F8','AF4','FC6','F4']
trial_labels = ['fleece', 'trap', 'sh', 'v', 'p', 'n', 'm', 'z', 'goose', 'k', 's', 'zh', 't', 'ng', 'f', 'thought']
header_openvibe = ['Time:256Hz','Epoch','F3','FC5','AF3','F7','T7','P7','O1','O2','P8','T8','F8','AF4','FC6','F4','Event Id','Event Date','Event Duration']
header = ['Time:256Hz','Epoch','F3','FC5','AF3','F7','T7','P7','O1','O2','P8','T8','F8','AF4','FC6','F4','Label','Stage','Flag']
hearing = pd.read_csv('stimuli.csv')
thinking = pd.read_csv('thinking.csv')
speaking = pd.read_csv('speaking.csv')
hearing.columns = header
thinking.columns = header
speaking.columns = header
#################################################################################################
### Set functions to process OpenVibe event-related columns to relevant time/epoch/event data ###
#################################################################################################
def average_data(stage, seconds_per_epoch = 0):
i = 0
s = pd.DataFrame(columns=channels)
for epoch in range (0,256*seconds_per_epoch):
s = pd.concat([s,pd.DataFrame(stage[i::int(len(stage['F3'])/10)].mean(axis = 0)).T])
i += 1
return s
def time(stage):
i = 0
s = []
for x in range(0,len(stage['F3'])):
s.append(i)
i = i + 1/256
return s
def epoch(stage):
i = 0
s = []
for x in range(0,int(len(stage['F3']/32)/32)):
for xx in range(0,32):
s.append(i)
i = i + 1
return s
def misc(df, p, s):
df.insert(0, 'Time:256Hz', time(df))
df.insert(1, 'Epoch', epoch(df))
df['Event Id'] = [p] * len(df['F3'])
df['Event Date'] = [s] * len(df['F3'])
df['Event Duration'] = ['n/a'] * len(df['F3'])
################################################################################
### Create separate dataframes (.csvs) for each separate condition (phoneme) ###
################################################################################
hearing_label = hearing.loc[hearing['Label'] == 'fleece', channels]
thinking_label = thinking.loc[thinking['Label'] == 'fleece', channels]
speaking_label = speaking.loc[speaking['Label'] == 'fleece', channels]
hearing_average = average_data(hearing_label, seconds_per_epoch = 5)
thinking_average = average_data(thinking_label, seconds_per_epoch = 5)
speaking_average = average_data(speaking_label, seconds_per_epoch = 5)
misc(hearing_average, 'fleece', 'stimuli')
misc(thinking_average, 'fleece', 'thinking')
misc(speaking_average, 'fleece', 'speaking')
#########################################################################################
### Create total dataframe (.csv) combining all separate condition (phoneme) averages ###
#########################################################################################
for phoneme in trial_labels[1:]:
hearing_label = hearing.loc[hearing['Label'] == phoneme, channels]
thinking_label = thinking.loc[thinking['Label'] == phoneme, channels]
speaking_label = speaking.loc[speaking['Label'] == phoneme, channels]
hearing_average_next = average_data(hearing_label, seconds_per_epoch = 5)
thinking_average_next = average_data(thinking_label, seconds_per_epoch = 5)
speaking_average_next = average_data(speaking_label, seconds_per_epoch = 5)
misc(hearing_average_next, phoneme, 'stimuli')
misc(thinking_average_next, phoneme, 'thinking')
misc(speaking_average_next, phoneme, 'speaking')
hearing_average = pd.concat([hearing_average, hearing_average_next])
thinking_average = pd.concat([thinking_average, thinking_average_next])
speaking_average = pd.concat([speaking_average, speaking_average_next])
################################################################################################
### Reconstruct experiment with false time and epoch values for OpenViBE processing only: ###
################################################################################################
hearing_average['Time:256Hz'] = time(hearing_average) # Overwrite the timestamps (easier for OpenVibe to process contiguous timestamps)
thinking_average['Time:256Hz'] = time(thinking_average)
speaking_average['Time:256Hz'] = time(speaking_average)
hearing_average['Epoch'] = epoch(hearing_average) # Overwrite the epoch labels (easier for OpenVibe to process 1 epoch per second)
thinking_average['Epoch'] = epoch(thinking_average)
speaking_average['Epoch'] = epoch(speaking_average)
hearing_average.to_csv('hearing_average.csv', index=False)
thinking_average.to_csv('thinking_average.csv', index=False)
speaking_average.to_csv('speaking_average.csv', index=False) |
from tkinter import *
import mysqlFunctions
import datetime
from tkinter import messagebox
from tkinter import ttk
class AdminWindow(mysqlFunctions.Common):
def __init__(self, master):
mysqlFunctions.Common.__init__(self)
self.master = master
master.title("Admin control panel")
master.geometry("500x500")
self.create_widgets()
self.grid_widgets()
def create_widgets(self):
self.upper_left_space = Label(self.master)
self.register_candidate_button = Button(self.master, text='Register a candidate',
command=self.register_candidate)
self.register_recruiter_button = Button(self.master, text='Register a recruiter',
command=self.register_recruiter)
self.add_antikeim_button = Button(self.master, text='Add antikeim',
command=self.add_antikeim)
self.add_business_areas_button = Button(self.master, text='Add business areas',
command=self.add_business_areas)
self.changes_history_button = Button(self.master, text='Changes history', command=self.changes_history)
def grid_widgets(self):
self.upper_left_space.grid(padx=10, pady=0)
self.register_candidate_button.grid(row=2, column=3, sticky=NSEW, ipady=2, ipadx=20, pady=5)
self.register_recruiter_button.grid(row=3, column=3, sticky=NSEW, ipady=2)
self.add_antikeim_button.grid(row=4, column=3, sticky=NSEW, ipady=2, pady=5)
self.add_business_areas_button.grid(row=5, column=3, sticky=NSEW, ipady=2)
self.changes_history_button.grid(row=6, column=3, sticky=NSEW, ipady=2, pady=5)
def add_antikeim(self):
self.destroyer()
# Variables
input_title = StringVar()
input_description = StringVar()
# Labels
title = Label(self.master, text=' Title')
description = Label(self.master, text='Description') # TODO child of belongs_to is applied automatically is this wrong?
belongs = Label(self.master, text='Belongs to')
# Entries and List boxes
title_entry = Entry(self.master, textvariable=input_title)
title_entry.insert(END, 'antikeim')
description_entry = Entry(self.master, textvariable=input_description)
# Grid stuff
title.grid(row=2, column=5, padx=10, sticky=E)
title_entry.grid(row=2, column=6, ipady=1, sticky=E+W)
description.grid(row=3, column=5, padx=10, sticky=E)
description_entry.grid(row=3, column=6, sticky=E + W)
belongs.grid(row=4, column=5, padx=10, sticky=E)
belongs_list = mysqlFunctions.fetch_belongs()
belongs_list.append('None')
# Adding values by iterating belongs_list
belongs_combobox = ttk.Combobox(self.master, state="readonly", values=[value for value in belongs_list])
belongs_combobox.grid(row=4, column=6)
submit_button = Button(self.master, text='Submit',
command=lambda: self.submit('antikeim', title_entry.get()))
submit_button.grid(row=5, column=6, sticky=NSEW)
self.variables = [title_entry,
description_entry,
belongs_combobox]
self.removable_widgets = [submit_button,
title,
title_entry,
description,
belongs,
belongs_combobox,
description_entry]
def add_business_areas(self):
self.destroyer()
# Variables
input_title = StringVar()
input_description = StringVar()
# Labels
title = Label(self.master, text='Title')
description = Label(self.master, text='Description')
belongs_to = Label(self.master, text='Belongs to')
# Entries
title_entry = Entry(self.master, textvariable=input_title)
title_entry.insert(END, 'business area')
description_entry = Entry(self.master, textvariable=input_description)
# Grid stuff
title.grid(row=2, column=5, padx=10, sticky=E)
title_entry.grid(row=2, column=6, ipady=1, sticky=E+W)
description.grid(row=3, column=5, padx=10, sticky=E)
description_entry.grid(row=3, column=6, sticky=E+W)
belongs_to.grid(row=4, column=5, padx=10, sticky=E)
submit_button = Button(self.master, text='Submit',
command=lambda: self.submit('business_areas', title_entry.get()))
#submit_button = Button(self.master, text='Submit', command=submit)
submit_button.grid(row=5, column=6, sticky=NSEW)
belongs_list = mysqlFunctions.fetch_business_areas()
belongs_list.append('None')
# Adding values by iterating belongs_list
belongs_to_combobox = ttk.Combobox(self.master, state="readonly", values=[value for value in belongs_list])
belongs_to_combobox.grid(row=4, column=6)
self.variables = [title_entry,
description_entry,
belongs_to_combobox]
self.removable_widgets = [submit_button,
title,
title_entry,
description,
description_entry,
belongs_to,
belongs_to_combobox,
]
def changes_history(self):
# TODO implement this
self.destroyer()
self.change_h_for_table = Label(self.master, text='Show history for table')
self.change_h_for_table.grid(row=2, column=5)
self.change_h_for_table_combobox = ttk.Combobox(self.master, state='readonly',
values=['candidate',
'recruiter',
'user',
'etaireia',
'job'])
self.change_h_for_table_combobox.grid(row=2, column=6)
self.change_h_for_user = Label(self.master, text='Show history for user')
self.change_h_for_user.grid(row=3, column=5)
users = mysqlFunctions.fetch_users()
self.change_h_for_user_combobox = ttk.Combobox(self.master, state='readonly', values=[user for user in users])
self.change_h_for_user_combobox.grid(row=3, column=6)
self.removable_widgets = [self.change_h_for_user,
self.change_h_for_user_combobox,
self.change_h_for_table,
self.change_h_for_table_combobox]
def submit(self, table_name, primary_key):
# TODO maybe reg_date is automatic
self.info_list = []
for var in self.variables:
self.info_list.append(var.get())
self.current_datetime = datetime.datetime.now()
if table_name == 'recruiter' or table_name == 'candidate':
self.info_list.insert(4, self.current_datetime.strftime("%Y-%m-%d %H:%M:%S"))
result = mysqlFunctions.register(self.info_list, table_name)
elif table_name == 'antikeim':
result = mysqlFunctions.register(self.info_list, table_name)
elif table_name == 'business_areas':
result = mysqlFunctions.register(self.info_list, table_name)
else:
result = 'Error: no table %s exists' % table_name
if result == 'Success':
self.destroyer()
messagebox.showinfo("Success", f'Registration of {primary_key} was a success')
else:
messagebox.showerror("Error", result)
if __name__ == '__main__':
# This is to help debugging without the need to log in each time
root = Tk()
app = AdminWindow(root)
root.mainloop()
|
# !/usr/bin/env python3
# coding: utf-8
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QFrame, QLineEdit, QTextEdit
class Card(QFrame):
def __init__(self, parent, id, has_text_field=True):
super(Card, self).__init__()
self.setParent(parent)
self.id = id
self.DEFAULT_COLOR = "rgb(85, 170, 255)"
self.DEFAULT_BORDER = "none"
self.available_colors = [self.DEFAULT_COLOR, '#c0392b', '#2ecc71', '#f1c40f', '#1abc9c']
self.color_index = 0
self.color = self.available_colors[self.color_index]
self.border = self.DEFAULT_BORDER
self.unfocued_border = "none"
self.has_text_field = has_text_field
self.is_focused = False
self.setup_ui()
def setup_ui(self):
self.title_field = QLineEdit()
self.content_field = QTextEdit()
self.setup_default_card()
if self.has_text_field is False:
self.setup_title_only_card()
self.update_stylesheet()
self.setMouseTracking(True)
# Switches between default and only_title card types
def toggle_type(self):
self.has_text_field = not self.has_text_field
if self.has_text_field is not True:
self.setup_title_only_card()
else:
self.setup_default_card()
# Sets card to card with only a title textbox
def setup_title_only_card(self):
self.content_field.setParent(None)
self.resize(281, 80)
self.title_field.resize(230, 60)
self.title_field.move(25, 10)
self.set_title_font(20)
# Sets card to default card style
def setup_default_card(self):
self.setup_frame()
self.setup_title()
self.setup_content()
self.set_title_font(12)
# Sets font size of title textbox to passed size
def set_title_font(self, font_size):
self.title_field.selectAll()
font = self.title_field.font()
font.setPointSize(font_size)
self.title_field.setFont(font)
def next_color(self):
self.color_index = self.color_index + 1
if len(self.available_colors) is self.color_index:
self.color_index = 0
self.set_background_color(self.available_colors[self.color_index])
def previous_color(self):
self.color_index = self.color_index - 1
if self.color_index is -1:
self.color_index = len(self.available_colors) - 1
self.set_background_color(self.available_colors[self.color_index])
# Sets card background to passed color.
def set_background_color(self, color):
self.color = color
self.update_stylesheet()
# Sets border of card to passed border style
def set_border(self, border):
self.border = border
self.update_stylesheet()
def update_stylesheet(self):
self.setStyleSheet("background-color: " + self.color +
"; border: " + self.border +
"; border-radius: 5px;")
# Sets up size of card
def setup_frame(self):
self.resize(281, 181)
self.setVisible(True)
# Sets up title textbox
def setup_title(self):
self.title_field.resize(146, 29)
self.title_field.move(67, 10)
self.title_field.setParent(self)
self.title_field.setStyleSheet('background-color: white')
self.title_field.setVisible(True)
# Sets up content textbox
def setup_content(self):
self.content_field.resize(261, 121)
self.content_field.move(10, 50)
self.content_field.setParent(self)
self.content_field.setStyleSheet('background-color: white; font-size: 12px;')
self.content_field.setVisible(True)
# Source:
# https://stackoverflow.com/questions/5899826/pyqt-how-to-remove-a-widget
# Deletes card.
def delete(self):
self.setParent(None)
# Gives card focus.
def focus(self):
self.unfocued_border = self.border
self.set_border("2px solid #f39c12")
self.title_field.setFocus()
self.raise_()
self.is_focused = True
# Removes focus from card.
def unfocus(self):
self.set_border(self.unfocued_border)
self.is_focused = False
# Returns center of card.
def center(self):
x = self.pos().x() + (self.size().width() / 2)
y = self.pos().y() + (self.size().height() / 2)
return x, y
# Moves card to passed coordinates.
def move_to(self, x, y):
self.setGeometry(x, y, self.size().width(), self.size().height())
# Checks if given point collides with passed widget.
def collides_with(self, widget, new_x, new_y):
x1 = widget.pos().x()
x2 = x1 + widget.size().width()
y1 = widget.pos().y()
y2 = y1 + widget.size().height()
if x1 <= new_x <= x2 and y1 <= new_y <= y2:
return True
else:
return False
# Source:
# https://stackoverflow.com/questions/23302698/java-check-if-two-rectangles-overlap-at-any-point
# Checks if card collides with widget.
def collides(self, widget):
x = self.pos().x()
y = self.pos().y()
width = self.size().width()
height = self.size().height()
width_fits = x < widget.pos().x() + widget.size().width() and x + width > widget.pos().x()
height_fits = y < widget.pos().y() + widget.size().height() and y + height > widget.pos().y()
return width_fits and height_fits
# Checks if given point collides with passed window frame.
def hits_window_frame(self, window_frame, new_x, new_y):
height_fits = new_y <= 0 or new_y + self.size().height() >= window_frame.size().height()
width_fits = new_x >= 0 or new_x + self.size().width() >= window_frame.size().width()
return height_fits and width_fits
|
# Send Email module
|
# 推导式
# 列表推导式
# 格式:[变量 for 变量 in 可迭代对象]
# 创建一个包含0~9元素的列表,使用常见创建方式
# list1 = []
# for i in range(10):
# list1.append(i)
# print(list1)
# 使用列表推导式
# list2 = [x for x in range(10)]
# print(list2)
#
# list3 = [x for x in range(10)if x % 2 == 0] # 借助if判断
# print(list3)
#
# list4 = [x*x for x in range(5)] # x*x 为元素
# print(list4)
#
# list5 = [i+j for i in range(5) for j in range(5)] # for双循环,也可以使用三循环
# print(list5)
# 字典推导式
dict1 = {k:v for k, v in {"name": "xiaoming", "age": 20}.items()}
print(dict1)
# 元组生成式
tuple1 = (x for x in range(10))
print(tuple1) # <generator object <genexpr> at 0x000001552182FF10>,不能直接使用
for i in tuple1:
print(i, end=" ")
# 当我们使用for循环时,只要作用于一个可迭代对象,for循环就可以正常运行,而我们不太关心该对象究竟是list还是其他数据类型
# 方法是通过collections模块的Iterable类型判断
from collections import Iterable
print(isinstance('abc', Iterable)) # str是否可迭代
|
# simple HTTP to OSC routing
import OSC
import logging
import time
import datetime
from flask import Flask, Response, jsonify, json, request
app = Flask(__name__)
c = OSC.OSCClient()
file_handler = logging.FileHandler('oschttp'+str(datetime.datetime.today().date())+'.log')
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
@app.route("/")
def hello():
print "gott'd"
oscmsg = OSC.OSCMessage()
oscmsg.setAddress("/startup")
oscmsg.append('HELLO')
c.send(oscmsg)
return "Hello World!"
@app.route("/meow")
@app.route("/meow/")
def goodbye():
print "bye'd"
return "Good bye!"
@app.route("/json")
def jsonny():
print "json'd"
list = [
{'state': 0, 'row': 2, 'col': 3},
{'param': 'ahoy', 'val': 2}
]
return jsonify(results=list)
@app.route('/messages', methods=['GET', 'POST'])
def message_time():
print "received json request: " + str(datetime.datetime.now())
print json.dumps(request.json)
app.logger.error("received json request: " + str(datetime.datetime.now()))
app.logger.error(json.dumps(request.json))
print request.headers
print request.data
print request.json
# app.logger.error("was there an error?")
if request.headers['Content-Type'] == 'text/plain':
return "Text Message: " + request.data
elif request.headers['Content-Type'] == 'application/json':
print request.json['row']
print request.json['col']
print request.json['state']
print request.json['device_id']
oscmsg = OSC.OSCMessage()
oscmsg.setAddress("/startup")
oscmsg.append(request.json['row'])
oscmsg.append(request.json['col'])
oscmsg.append(request.json['state'])
oscmsg.append(int(request.json['device_id']))
c.send(oscmsg)
print "sent OSC message at: " + str(datetime.datetime.now())
app.logger.error("sent OSC message at: " + str(datetime.datetime.now()))
resp = Response(json.dumps(request.json), status=200, mimetype='application/json')
# return "JSON Message: " + json.dumps(request.json)
return resp
elif request.headers['Content-Type'] == 'application/octet-stream':
f = open('./binary', 'wb')
f.write(request.data)
f.close()
return "Binary message written!"
else:
return "415 Unsupported Media Type ;)"
return "crap"
@app.route('/wowee/<int:_row>/<int:_col>/<int:_state>', methods=['GET', 'POST'])
def wowter(_row, _col, _state):
print _row
oscmsg = OSC.OSCMessage()
oscmsg.setAddress("/startup")
oscmsg.append(_row)
oscmsg.append(_col)
oscmsg.append(_state)
c.send(oscmsg)
return "GOOD JOB@"
@app.route('/opencol/<int:_col>/', methods=['GET', 'POST'])
def colopen(_col):
print _col
if(_col >= 0 and _col < 17):
for i in range(9):
oscmsg = OSC.OSCMessage()
oscmsg.setAddress("/startup")
oscmsg.append(i)
oscmsg.append(_col)
oscmsg.append(2)
c.send(oscmsg)
time.sleep(0.02)
print "opened row " + str(i) + " of col " + str(_col)
return "opened col " + str(_col)
else:
return "col: " + str(_col) + " is out of range"
@app.route('/openrow/<int:_row>/', methods=['GET', 'POST'])
def rowopen(_row):
print _row
if(_row >= 0 and _row < 9):
for i in range(17):
oscmsg = OSC.OSCMessage()
oscmsg.setAddress("/startup")
oscmsg.append(_row)
oscmsg.append(i)
oscmsg.append(2)
c.send(oscmsg)
time.sleep(0.02)
print "opened col " + str(i) + " of row " + str(_row)
return "opened col " + str(_row)
else:
return "row: " + str(_row) + " is out of range"
if __name__ == "__main__":
c.connect(('127.0.0.1', 9998))
print "started flask server: " + str(datetime.datetime.now())
app.logger.error("started flask server: " + str(datetime.datetime.now()))
app.run(host='0.0.0.0', port=5000, debug=True)
|
from PIL import Image
import math
import colorsys
import sys, os, struct
def konwertuj(path):
print path
if (os.path.splitext(path)[1][1:] != "jpg" and os.path.splitext(path)[1][1:] != "png"):
print("\tBledny format pliku")
else:
im = Image.open(path)
img = im.convert('RGB')
baseWidth, baseHeight = img.size
height = 60
width = (height * baseWidth) / baseHeight
img = img.resize((width,height), Image.ANTIALIAS)
try:
f = open(os.path.splitext(path)[0]+".txt", "w")
try:
f.write("%s %s " % (width, height))
for x in range(width):
for y in range(height):
r, g, b = img.getpixel((x, y))
f.write("%03d %03d %03d " %(r, g, b))
finally:
f.close()
except IOError:
pass
def main():
if len(sys.argv) == 1:
print('Podaj pliki')
sys.exit(1)
else:
for path in sys.argv[1:]:
if (os.path.isfile(path)):
konwertuj(path)
elif (os.path.isdir(path)):
for files in os.listdir(path):
konwertuj(path + "/" + files)
else:
print("Brak pliku/katalogu " + path)
print(" ")
if __name__ == '__main__':
main()
|
# -*-coding:Utf-8 -*
"""Ce module contient la classe Labyrinthe."""
class Labyrinthe:
"""Classe représentant un labyrinthe.
Qui permet de conserver la position du robot, la grille de jeu, la derniere instruction du joueur et son nombre
de répétition."""
def __init__(self, map):
self.robot_x = -1
self.robot_y = -1
self.robot_x_old = -1
self.robot_x_old = -1
self.grille_labyrinthe = map.labyrinthe
self.grille_name = map.nom
self.porte_passe = False
self.rep_instruction = 1
self.old_case_replace_by_robot = ' '
self.instruction = 'A'
self.robot_x = recup_x_robot(self.grille_labyrinthe)
self.robot_y = recup_y_robot(self.grille_labyrinthe)
if self.robot_x == -1 or self.robot_y == -1:
print("ERREUR LE PRGM N'A PAS TROUVE LA POSITION DU ROBOT (x, y)")
def recup_x_robot(grille):
"""
On recupère ici la la postion en colonne du robot, x commençant à 0
:type grille: object
"""
x = 0
for ligne in grille:
for case in ligne:
if case == 'X':
return x
x += 1
x = 0
return -1
def recup_y_robot(grille):
""" On récupère ici la postion en ligne du robot, y commençant par 0
:type grille: object
"""
y = 0
for ligne in grille:
for case in ligne:
if case == 'X':
return y
y += 1
return -1
|
from django.shortcuts import render_to_response
from django.db.models import Q
from django.template import RequestContext
from listing.models import Listing
from accounts.models import UserProfile
from geogeld.settings import DISPLAY_LISTINGS_PER_PAGE
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
NEIGHBOURHOOD_RADIUS = 3000 # 3 km
def home(request, template_name='geogeld/index.html'):
try:
page = int(request.GET.get('page', 0))
except ValueError:
page = 0
# start_listing = page*DISPLAY_LISTINGS_PER_PAGE
# end_listing = start_listing + DISPLAY_LISTINGS_PER_PAGE
# listings = Listing.objects.all()[start_listing:end_listing]
listings = Listing.objects.all()
if request.user.is_authenticated():
userprofile = UserProfile.objects.get(id=request.user.id)
loc = userprofile.location
neighbourhood = loc.buffer(NEIGHBOURHOOD_RADIUS)
listings = Listing.objects.filter(
Q(location__within=neighbourhood) |
Q(location__disjoint=neighbourhood))\
.distance(userprofile.location)\
.order_by('location') # [start_listing:end_listing]
# listings = Listing.objects.filter(Q(location__within=neighbourhood) | Q(location__disjoint=neighbourhood)).distance(userprofile.location).order_by('location')[start_listing:end_listing]
paginator = Paginator(listings, DISPLAY_LISTINGS_PER_PAGE) # Show 25 contacts per page
try:
listings = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
listings = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
listings = paginator.page(paginator.num_pages)
context = {
'listings': listings,
'paginator': paginator,
'before_last_page': paginator.num_pages - 2,
}
return render_to_response(template_name, context, RequestContext(request))
def login(request, template_name='geogeld/login.html'):
context = {}
return render_to_response(template_name, context, RequestContext(request)) |
import random
import string
import boto3
import time
from collections import defaultdict
region = 'us-west-2'
def passwordGenerator(stringLength=20):
""" Generates a random string of fixed length"""
password_characters = string.ascii_letters + string.digits + string.punctuation
return ''.join(random.choice(password_characters) for i in range(stringLength))
print("RDS Instance has been created with a random Master Password.")
print(f"Please give this to the customer via LastPass: {passwordGenerator(20)}")
# AMI
#ami_id = 'ami-0f2176987ee50226e' # Amazon Linux AMI
ami_id = 'ami-082b5a644766e0e6f' # Amazon Linux 2
#ami_id = 'ami-02deb4589e0f0d95e' # Rhel 7.6 ami-02deb4589e0f0d95e
# ami_id = 'ami-0d705356e2616369c' # Windows Server 2016
#keyname = 'aws-corpinfo-msp'
keyname = 'oregon'
instance_type = 't2.small'
# subnet_id = 'subnet-594fb32f' # Shared Services VPC Protected A
# sg_1 = 'sg-85375fe3' # SG-SS-MGMT-ALLTRAFFIC-OUT
# sg_2 = 'sg-ed08608b' #SG-SS-MGMT-CORESERVICES
# sg_3 = 'sg-c00961a6' #SG-SS-MGMT-RDPSSH-IN
# EBS
root_drive = '/dev/sda1'
root_drive_size = 80
root_drive_type = 'gp2'
# second_drive = 'xvdd'
# second_drive_size = 100
# second_drive_type = 'gp2'
ec2 = boto3.resource('ec2', region_name=region)
instance = ec2.create_instances(
ImageId=ami_id,
MinCount=1,
MaxCount=1,
InstanceType=instance_type,
BlockDeviceMappings=[
{
'DeviceName': root_drive,
'Ebs': {
'VolumeSize': root_drive_size,
'VolumeType': root_drive_type,
'Encrypted': True,
},
},
# {
# 'DeviceName': second_drive,
# 'Ebs': {
# 'VolumeSize': second_drive_size,
# 'VolumeType': second_drive_type,
# 'Encrypted': True,
# },
#},
],
KeyName=keyname,
# SecurityGroupIds=[
# ],
# NetworkInterfaces=[
# {
# 'AssociatePublicIpAddress': False,
# 'DeviceIndex': 0,
# 'SubnetId': subnet_id,
# 'Groups': [
# sg_1,
# sg_2,
# sg_3,
# ]
# }
# ],
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
"Key": "Application",
"Value": "Windchill"
},
{
"Key": "ApplicationTier",
"Value": "Application"
},
{
"Key": "ApplicationTierLevel",
"Value": "No Tier"
},
{
"Key": "Managed",
"Value": "Yes"
},
{
"Key": "Environment",
"Value": "Development"
},
{
"Key": "Name",
"Value": passwordGenerator(20)
},
{
"Key": "CorpInfoMSP:TakeNightlySnapshot",
"Value": "No"
},
{
"Key": "FileBackup",
"Value": "No"
},
{
"Key": "MonitoredServices",
"Value": "No"
},
{
"Key":"RequestNumber",
"Value":"RITM0032252"
},
{
"Key": "OperationalHours",
"Value": "24x7"
},
{
"Key": "ReviewDate",
"Value": "6/25/2019"
},
{
"Key": "CostCenter",
"Value": "1001596013"
},
{
"Key": "ServiceLocation",
"Value": "Irvine"
},
{
"Key": "ServiceOwner",
"Value": "Amir Memaran"
},
{
"Key": "TechnicalOwner",
"Value": "Alek Slavuk"
},
{
"Key": "ContactPreference",
"Value": "Email"
},
{
"Key": "PatchGroup",
"Value": "PilotAutoReboot"
},
{
"Key": "Schedule",
"Value": "24x7"
},
{
"Key": "Purpose",
"Value": "N/A"
},
{
"Key": "Validated",
"Value": "No"
}
]
},
{
'ResourceType': 'volume',
'Tags': [
{
"Key": "Application",
"Value": "Windchill"
},
{
"Key": "ApplicationTier",
"Value": "Application"
},
{
"Key": "ApplicationTierLevel",
"Value": "No Tier"
},
{
"Key": "Managed",
"Value": "Yes"
},
{
"Key": "Environment",
"Value": "Development"
},
{
"Key": "Name",
"Value": "AWOR-SBPDMAPP01"
},
{
"Key": "CorpInfoMSP:TakeNightlySnapshot",
"Value": "No"
},
{
"Key": "FileBackup",
"Value": "No"
},
{
"Key": "MonitoredServices",
"Value": "No"
},
{
"Key":"RequestNumber",
"Value": "RITM0032252"
},
{
"Key": "OperationalHours",
"Value": "24x7"
},
{
"Key": "ReviewDate",
"Value": "6/25/2019"
},
{
"Key": "CostCenter",
"Value": "1001596013"
},
{
"Key": "ServiceLocation",
"Value": "Irvine"
},
{
"Key": "ServiceOwner",
"Value": "Amir Memaran"
},
{
"Key": "TechnicalOwner",
"Value": "Alek Slavuk"
},
{
"Key": "ContactPreference",
"Value": "Email"
},
{
"Key": "PatchGroup",
"Value": "PilotAutoReboot"
},
{
"Key": "Schedule",
"Value": "24x7"
},
{
"Key": "Purpose",
"Value": "Windchill 11.2 Sandbox System"
},
{
"Key": "Validated",
"Value": "No"
}
]
}
]
)
time.sleep(2)
instance_status = ec2.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['running']}])
ec2info = defaultdict()
for instance in instance_status:
ec2info[instance.id] = {
'Type': instance.instance_type,
'ID': instance.id,
'Private IP': instance.private_ip_address,
'State': instance.state['Name'],
}
attributes = ['Type', 'ID', 'Private IP', 'State']
for instance_id, instance in ec2info.items():
for key in attributes:
print("{0}: {1}".format(key, instance[key]))
print("-------------------------") |
from django.shortcuts import render
from django.http import HttpResponse
from photo.models import MyPhoto
# Create your views here.
def photo_test(request):
return HttpResponse('hello world!')
def photo_view(request):
photo_list = MyPhoto.objects.all()
return render(request, 'photo/index.html', {'photo_list': photo_list})
pass |
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import time
import pandas as pd
login_url = 'https://www.mercadolivre.com/jms/mlb/lgz/login?platform_id=ML&go=https%3A%2F%2Fwms.mercadolivre.com.br%2F&loginType=explicit'
data = {
'username': 'USER_ACESS',
'password': 'PASSWORD_ACESS',
}
def openDriver():
# i put some security measures so website can't detect that u use selenium, just incase!
options = webdriver.ChromeOptions()
options.add_experimental_option("useAutomationExtension", False)
options.add_argument("--headless")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(10)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'plugins', {
get: () => '[1,2,3]'
})
"""
})
return driver
# open driver
driver = openDriver()
# open login page
driver.get(login_url)
# type username
userBox = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH ,'//*[@id="user_id"]')))
userBox.send_keys(data['username'])
# click on the continue button
contineButton = driver.find_element_by_xpath('//*[@id="login_user_form"]/div[2]/button')
contineButton.click()
# type password
passBox = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH ,'//*[@id="password"]')))
passBox.send_keys(data['password'])
# click on the login button
loginButton = driver.find_element_by_xpath('//*[@id="action-complete"]')
loginButton.click()
# scrape url
report_final = []
url = 'https://wms.mercadolivre.com.br/reports/movements?process_name=transfer_multi_warehouse&external_references.transfer_plan_id=1951,2198,2233,2171,2134,2190,2172,2102,2041,2043,2191,2067,2011,2040,2162,2049,2008,1990,1991,1992,1952,2078,1765,1790,1823,1811,1810,1748,1777,1747,1764,1737,1729,1766,1708,831,843,854,638,703,704,746,774,804,821,832,829,830,845,848&date_from=2019-10-01&date_to=2021-03-17&limit=1&offset='
for x in range(1,291112):
driver.get(url+str(x))
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
content = soup.find_all('table', class_='andes-table table table-sticky')
for dados in content:
td = soup.find('td', class_='andes-table__column andes-table__column--left').text
process= soup.find('td', class_='andes-table__column andes-table__column--center single-line').text
inventory_id = soup.find('a', class_='inventory-id-code').text
tp = [my_tag.text for my_tag in soup.find_all(class_="andes-table__column andes-table__column--center")][0]
qty = [my_tag.text for my_tag in soup.find_all(class_="andes-table__column andes-table__column--center")][2]
origem = [my_tag.text for my_tag in soup.find_all(class_="andes-table__column andes-table__column--center")][3]
destino = [my_tag.text for my_tag in soup.find_all(class_="andes-table__column andes-table__column--center")][4]
user = [my_tag.text for my_tag in soup.find_all(class_="andes-table__column andes-table__column--center")][6]
movements_report = {
'td':td,
'process':process,
'inventory_id':inventory_id,
'tp':tp,
'qty':qty,
'origem':origem,
'destino':destino,
'data':data,
'user':user
}
report_final.append(movements_report)
df = pd.DataFrame(report_final)
df.to_csv('report_erros_sistemicos_multiwhse_pt4.csv', index=False)
print(df.head())
print(len(report_final))
print(soup.title)
# close driver
driver.close()
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
For i in range (4):
pixels[i] = (255,0,0)
For i in range (5, 20):
pixels[i] = (0,0,10)
|
# @see https://adventofcode.com/2015/day/12
import json
with open('day12_input.txt', 'r') as f:
doc = json.loads(f.readline())
def calc_balance(d, acc: int = 0):
if type(d) == int:
acc += d
elif type(d) == str:
pass
elif type(d) == list:
for v in d:
acc = calc_balance(v, acc)
elif type(d) == dict:
for v in d.values():
acc = calc_balance(v, acc)
return acc
def calc_balance_sans_red(d, acc: int = 0):
if type(d) == int:
acc += d
elif type(d) == str:
pass
elif type(d) == list:
for v in d:
acc = calc_balance_sans_red(v, acc)
elif type(d) == dict and 'red' not in d.values():
for v in d.values():
acc = calc_balance_sans_red(v, acc)
return acc
print('------------ PART 01 -------------')
print('Balance:', calc_balance(doc))
print('\n------------ PART 02 -------------')
print('Balance:', calc_balance_sans_red(doc)) |
PLATFORM_LIST = ['linux-x64', 'darwin-x64']
|
# Santosh Khadka
'''
Python Set
- Wont take any duplicate items
'''
s1 = set()
s1.add(4) # Takes only one argument
s1.add(5)
s1.add(4)
# print(s1) # {4, 5} ; Did not add the duplicate 4
s1.add(1)
s1.add(3)
s1.add(10)
# print(s1) # {1, 3, 4, 5, 10} ; Prints in order
''' Clear '''
s1.clear() # Makes empty set
# print(s1)
''' Copy '''
s1 = {1, 4, 5, 6, 7, 0}
# print(s1) # {0, 1, 4, 5, 6, 7}
s2 = s1.copy()
# print(s2) # {0, 1, 4, 5, 6, 7}
''' Difference '''
s2 = {12, 4, 5, 7, 12, 11}
# print(s1.difference(s2)) # {0, 1, 6} ; Prints what s1 has that s2 doesnt
# print(s2.difference(s1)) # {11, 12}
s1 = {1, 2, 3}
s2 = {1, 4, 5}
s1.difference_update(s2) # No return. Done in place.
# print(s1) # {2, 3} ; Returned all the elements that did not match with s2
''' Discard '''
s1 = {1, 2, 3, 4}
s1.discard(3) # No error if value was not in the set.
# print(s1) # {1, 3, 4}
''' Intersection : Elements that are common to all the sets '''
s1 = {1, 2, 3}
s2 = {1, 2, 4}
print(s1.intersection(s2)) # {1, 2} |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for managing data packets. Useful for sending data to a
microcontroller over a serial connection. This module adds control characters
and a checksum to a list of integers. It returns the new list.
Packet structure: Each packet consists of a start and end char, data
ints, and a checksum int. For example:
Start,Data1,Data2,Data3,Checksum,End
@author:Kristian Charboneau
"""
class Packet:
"""
"""
def __init__(self):
pass
def to_packet(self, values):
"""
Adds control chars and checksum int to a list of ints
"""
checksum = 0
for i in values: # calculate checksum
checksum = checksum ^ i
values.insert(0, '<')
values.append(checksum)
values.append('>')
return(values)
def to_list(self, values):
"""
Strips the control characters and checksum from a list of intergers.
"""
values.remove('>')
values.remove('<')
values.pop()
return values
def validate(self, values):
"""
Validates a packetized string. Returns 1 for success and 0 for failure.
"""
values.remove('>')
values.remove('<')
packet_checksum = values.pop()
checksum = 0
for i in values: # calculate checksum
checksum = checksum ^ i
if checksum == packet_checksum:
return True
else:
return False
def gen_checksum(self, packet):
"""
Generate a checksum of a packet. The method used is a simple XOR
method.
"""
checksum = 0
for i in packet: # calculate checksum
checksum = checksum ^ i
return checksum
if __name__ == '__main__':
p = Packet()
l = [1, 2, 0, 65, 66, 254]
print("Packet:%s" % p.to_packet(l))
|
import socket
import threading
from enum import Enum
from datetime import datetime
class UserInfo:
def __init__(self, name, address, session_id):
self.name = name
self.address = address
self.session_id = session_id
self.last_ping = datetime.utcnow()
self.message_queue = [] # first message is the one currently waiting for ack
self.from_packet_num = 1
self.to_packet_num = 0
self.sending_packet_num = 0
def to_bytes(from_string):
return from_string.encode('ascii')
def to_string(from_bytes):
return from_bytes.decode('ascii')
REQUEST_HEADER = 0x63
ACK_HEADER = 0x95
# from server to client
S_OTHER_LOGIN = 0x00
S_OTHER_LOGOUT = 0x01
S_REQUEST_CONNECT = 0x03
S_CANCEL_REQUEST = 0x04
S_START_CONNECT = 0x05
S_REJECT_CONNECT = 0x06
S_INVALID_FORMAT = 0xf0
S_NO_AUTH = 0xf1
S_ERROR = 0xf2
# from client to server
R_LOGIN = 0x00
R_LOGOUT = 0x01
R_PING = 0x02
R_REQUEST_CONNECT = 0x03
R_CANCEL_REQUEST = 0x04
R_TEST = 0xe0
INVALID_PACKET_NUM_MESSAGE = bytes([S_INVALID_FORMAT]) + to_bytes("invalid packet number")
NO_AUTH_MESSAGE = bytes([S_NO_AUTH]) + to_bytes("unauthorized")
DUPLICATE_NAME_MESSAGE = bytes([S_ERROR]) + to_bytes("name already in use")
USER_NOT_FOUND_MESSAGE = bytes([S_ERROR]) + to_bytes("user not found")
INACTIVE_TIME_SECOND = 1000
class Server:
def __init__(self, server_socket):
self.logged_in_users = {} #map user session to user object
self.name_lookup = {} #map user name to user session
self.session_id_seed = 0
self.sock = server_socket
def generate_session_id(self):
self.session_id_seed += 1
return bytes([13, 85, 1, self.session_id_seed])
def send_request(self, user, request_type, content):
message = bytes([REQUEST_HEADER]) + bytes([request_type]) + user.session_id + \
int.to_bytes(user.to_packet_num, 4, 'big') + content
user.to_packet_num += 1
user.message_queue.append(message)
if (len(message_queue) == 1) : # no waiting message
self.sock.sendto(message, user.address)
def send_ack(self, user, request_type, content):
message = bytes([ACK_HEADER]) + bytes([request_type]) + user.session_id + \
int.to_bytes(user.from_packet_num-1, 4, 'big') + content
self.sock.sendto(message, user.address)
user.last_ack = message
def handle_message(self, received_data, address):
msg_type = received_data[0]
if msg_type == REQUEST_HEADER:
request_type = received_data[1]
if request_type == R_LOGIN:
self.handle_login(received_data[2:], address)
else:
self.handle_request_header(received_data[2:], request_type, address)
elif msg_type == ACK_HEADER:
self.handle_ack(received_data[1:])
else:
sendmsg = "got it"
self.sock.sendto(to_bytes(sendmsg), address)
print(to_string(received_data))
print("received message")
def handle_request_header(self, data, request_type, address):
if (len(data) < 8):
self.sock.sendto(NO_AUTH_MESSAGE, address)
print("no session id / packet number")
return
if (data[:4] not in self.logged_in_users):
selt.sock.sendto(NO_AUTH_MESSAGE, address)
print("invalid session id")
return
user = self.logged_in_users[data[:4]]
packet_num = int.from_bytes(data[4:8], 'big')
user.address = address
if packet_num == user.from_packet_num: # new request
user.from_packet_num += 1;
if request_type == R_LOGOUT:
self.handle_logout(user.session_id)
elif request_type == R_PING:
self.handle_ping(user.session_id)
elif request_type == R_REQUEST_CONNECT:
self.handle_request_connect(user.session_id, data[8:])
elif request_type == R_CANCEL_REQUEST:
self.handle_cancel_request(user.session_id, data[8:])
elif packet_num == user.from_packet_num - 1: # processed request, but client did not receive ack
self.sock.sendto(user.last_ack, address)
else:
selt.sock.sendto(INVALID_PACKET_NUM_MESSAGE, address)
def handle_ack(self, data):
if (len(data) < 8 or data[:4] not in self.logged_in_users):
print("invalid ack")
return
user = self.logged_in_users[data[:4]]
packet_num = int.from_bytes(data[4:8], 'big')
if packet_num == user.sending_packet_num and user.message_queue:
user.sending_packet_num += 1
user.message_queue.pop(0)
if (message_queue) :
self.sock.sendto(message_queue[0], user.address)
else:
print("unrequired ack number")
def handle_login(self, content, address):
username = to_string(content)
print("login from " + address[0] + ":" + str(address[1]) + " as " + username)
if username in self.name_lookup:
user = self.logged_in_users[self.name_lookup[username]]
if user.address == address and user.from_packet_num == 1:
self.sock.sendto(user.last_ack, address)
else:
self.sock.sendto(DUPLICATE_NAME_MESSAGE, address)
return
response_str = username
for user in self.logged_in_users.values():
response_str += '\0'
response_str += user.name
self.send_request(user, S_OTHER_LOGIN, to_bytes(username))
session_id = self.generate_session_id()
new_user = UserInfo(username, address, session_id)
self.logged_in_users[session_id] = new_user
self.name_lookup[username] = session_id
self.send_ack(new_user, R_LOGIN, to_bytes(response_str))
def handle_logout(self, user_id):
if (user_id in self.logged_in_users):
username = self.logged_in_users[user_id].name
print(username + " logged out")
del self.logged_in_users[user_id]
del self.name_lookup[username]
for user in self.logged_in_users.values():
self.send_request(user, S_OTHER_LOGOUT, to_bytes(username))
def handle_ping(self, user_id):
user = self.logged_in_users[user_id]
user.last_ping = datetime.utcnow()
self.send_ack(user, R_PING, bytes())
def handle_request_connect(self, user_id, content, address):
to_user = to_string(content)
to_id = self.name_lookup[to_user]
if to_user not in self.name_lookup:
self.sock.sendto(bytes[USER_NOT_FOUND_MESSAGE], address)
def handle_cancel_request(self, user_id, content, address):
to_user = to_string(content)
def handle_accept_connect(self, user_id, content, address):
a = 1
def handle_reject_connect(self, user_id, content, address):
a = 1
def remove_inactive_users(self):
threading.Timer(5.0, self.remove_inactive_users).start()
now = datetime.utcnow()
for user_id in list(self.logged_in_users.keys()):
if (now - self.logged_in_users[user_id].last_ping).total_seconds() > INACTIVE_TIME_SECOND:
name = self.logged_in_users[user_id].name
del self.logged_in_users[user_id]
del self.name_lookup[name]
print("removed " + name)
def start(self):
self.remove_inactive_users()
try:
while True:
received_data, addr = self.sock.recvfrom(65000)
self.handle_message(received_data, addr)
except KeyboardInterrupt:
print("Interrupted!")
finally:
print("cleaning up")
self.sock.close()
if __name__ == '__main__':
UDP_IP = "127.0.0.1"
UDP_PORT = 9020
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind((UDP_IP, UDP_PORT))
server = Server(server_socket)
server.start() |
from src.Utils import Pickler
from src.Utils.logger import logger
class SkuSingleton:
__object = None
def __init__(self):
if SkuSingleton.__object is None:
logger.info('UNPICKLING SKU MATCHING NOTEBOOK')
SkuSingleton.__object = Pickler.unpickle_data('./src/sku_matchbook.pickle')
def get_obj(self):
return SkuSingleton.__object |
from __future__ import print_function
import sys
import os
import logging
import json
import copy
from os.path import dirname
from jsonschema import validate
import importlib
import pkgutil
import tempfile
import uuid
from halocli.exception import HaloPluginException
from halocli.util import Util
logger = logging.getLogger(__name__)
logging.root.setLevel(logging.INFO)
"""
the bian lite plugin
---------------
1. rename bian swagger files to _lite suffix :
2. add "LITE" marker to swagger info block: "revision":"lite"
3. detect service domain name from info/title block (sd_name) : CurrentAccount
4. detect functional pattern name from config file or command line or from swagger file (fp_name) : FulfillmentArrangement
GENERAL:
5. remove all sd session methods with end with : activation,configuration,feedback
6. remove from all url the section containing sd-reference-id and the sd_name+fp_name -> "{sd-reference-id}/current-account-fulfillment-arrangement/":
this -> "/current-account/{sd-reference-id}/current-account-fulfillment-arrangement/{cr-reference-id}/issueddevice/{bq-reference-id}/update"
becomes this -> "/current-account/{cr-ref-id}/issueddevice/{bq-ref-id}/update"
the url is build with "/"+sd_name+"/{cr-ref-id}/"+bq_name+"/{bq-ref-id}/"+action_name
7. remove field sd_name+ServicingSessionReference : "currentAccountServicingSessionReference" from all return blocks
8. remove sd_name+fp_name : currentAccountFulfillmentArrangement in all response blocks field names and also in the definition models if relevant
9. remove all definitions which are not referenced in the swagger
RETRIEVE:
10. remove reporting block from each retrieve method return block - (sd+fp)InstanceReportRecord or (sd+fp)InstanceReport : currentAccountFulfillmentArrangementInstanceReportRecord
11. remove analysis block from each retrieve method return block - (sd+fp)InstanceAnalysis : currentAccountFulfillmentArrangementInstanceAnalysis
12. remove analysis block from each retrieve method return block - (sd+fp)RetrieveActionResponse : currentAccountFulfillmentArrangementRetrieveActionResponse
"""
class Plugin():
def __init__(self,halo):
#init vars
self.halo = halo
#init work on halo config
#if self.halo.config ...
self.name = 'lite'
self.desc = 'lite version of bian swagger file'
# set commands
self.commands = {
'create': {
'usage': "Create a lite bian swagger file",
'lifecycleEvents': ['generate', 'write'],
'options': {
'destination': {
'usage': 'Path of the destination dir',
'shortcut': 'd',
'required': True
},
'path': {
'usage': 'Path of the source swagger file dir',
'shortcut': 'p',
'required': True
},
'file': {
'usage': 'add swagger file',
'shortcut': 'f'
},
'all': {
'usage': 'run all options',
'shortcut': 'a'
}
}
}
}
# set hooks
self.hooks = {
'before:create:generate': self.before_swagger_generate,
'create:generate': self.swagger_generate,
'after:create:generate': self.after_swagger_generate,
'create:write': self.swagger_write
}
#logger.info('finished plugin')
def run_plugin(self,options):
self.options = options
#do more
def fix_props(self,props,sdfp):
propsx = copy.deepcopy(props)
for name in propsx:
if name.endswith("ServicingSessionReference"):
del props[name]
continue
if name.endswith("InstanceReportRecord") or name.endswith("InstanceReport"):
del props[name]
continue
if name.endswith("InstanceAnalysis"):
del props[name]
continue
if name.endswith("RetrieveActionResponse"):
del props[name]
continue
if name.startswith(sdfp):
props[name.replace(sdfp, "")] = propsx[name]
del props[name]
continue
def get_sdfph(self,data):
#/current-account/{sd-reference-id}/current-account-fulfillment-arrangement/{cr-reference-id}
for d in data['paths']:
if d.endswith("/{cr-reference-id}"):
j = d.index("/{cr-reference-id}")
i = d.index("/{sd-reference-id}")
return d[i+18:j]
def get_sdfp(self,data):
#return self.get_sdfph(data).replace("-","").replace("/","")
s = self.get_sdfph(data)
while "-" in s:
i = s.index("-")
s = s[:i] + s[i+1].swapcase() + s[i+2:]
return s.replace("/","")
def before_swagger_generate(self):
for o in self.options:
if 'destination' in o:
self.destination = o['destination']
if 'path' in o:
self.path = o['path']
if 'all' in o:
self.all = o['all']
if 'file' in o:
self.swagger_source = o['file']
if not self.destination:
raise Exception("no destination found")
if self.swagger_source:
urls = os.path.join(self.path, self.swagger_source)
else:
urls = os.path.join('.', self.swagger_source)#self.halo.settings['mservices'][self.service]['record']['path']
try:
self.data = Util.analyze_swagger(urls)
except Exception as e:
self.halo.cli.error("error in source swagger file validation:"+self.swagger_source+"->"+str(e))
raise e
def swagger_generate(self):
data = self.data
sdfph = self.get_sdfph(data)#"/current-account-fulfillment-arrangement"
self.halo.cli.log("sdfph:" + sdfph)
sdfp = self.get_sdfp(data)#"currentAccountFulfillmentArrangement"
self.halo.cli.log("sdfp:" + sdfp)
tmp = {}
data["info"]["title"] = data["info"]["title"]+"(Lite)"
for d in data['paths']:
m = data['paths'][d]
new_m = copy.deepcopy(m)
tmp[d] = new_m
if self.all:
for k in tmp:
new_m = tmp[k]
path = k
if path.endswith("/activation") or path.endswith("/configuration") or path.endswith("/feedback"):
del data['paths'][k]
continue
if path.find("/{sd-reference-id}") >= 0:
del data['paths'][k]
path = path.replace("/{sd-reference-id}","").replace("-reference-","-ref-")
if path.find(sdfph) >= 0:
if k in data['paths']:
del data['paths'][k]
occr = path.rfind(sdfph)
if occr > 0:
path = path[:occr]+path[occr:].replace(sdfph,"")
for o in new_m:# get,put,post,delete
self.halo.cli.log("path:" + path+" op:"+o)
rem_p = None
for p in new_m[o]['parameters']:
self.halo.cli.log(path+":"+p['name'])
if p['name'].find("sd-reference-id") >= 0:
rem_p = p
continue
if p['name'].find("-reference-") >= 0:
p['name'] = p['name'].replace("-reference-","-ref-")
if p['name'].find("body") >= 0:
props = p['schema']['properties']
self.fix_props(props, sdfp)
if rem_p:
new_m[o]['parameters'].remove(rem_p)
if '200' in new_m[o]['responses']:
if 'items' in new_m[o]['responses']['200']['schema']:
if 'properties' in new_m[o]['responses']['200']['schema']['items']:
props = new_m[o]['responses']['200']['schema']['items']['properties']
else:
props = new_m[o]['responses']['200']['schema']['items']
else:
props = new_m[o]['responses']['200']['schema']['properties']
else:
if 'items' in new_m[o]['responses']['201']['schema']:
props = new_m[o]['responses']['201']['schema']['items']['properties']
else:
props = new_m[o]['responses']['201']['schema']['properties']
self.fix_props(props,sdfp)
data['paths'][path] = new_m
del data['definitions']
data['definitions'] = {}
self.halo.cli.log("finished extend successfully")
def after_swagger_generate(self):
data = self.data
try:
Util.validate_swagger(data)
except Exception as e:
self.halo.cli.error("error in generated swagger file validation:"+self.swagger_source+"->"+str(e))
raise e
def swagger_write(self):
self.file_write()
def file_write(self):
try:
path = self.destination
if path:
file_path = os.path.join(path, str(self.swagger_source.replace(".json","_lite.json")))
else:
dir_tmp = tempfile.TemporaryDirectory()
file_path = os.path.join(dir_tmp.name, str(uuid.uuid4()) + "_lite.json")
logger.debug(file_path)
f = open(file_path, "w")
f.write("")
f.close()
Util.dump_file(file_path, self.data)
logging.debug("Swagger file generated:" + file_path)
"""
with open(file_path, 'r') as fi:
f = fi.read()
print(str(f))
return f
"""
except Exception as e:
raise HaloPluginException(str(e))
|
__author__ = 'hassaankhan'
import os
import shapefile
global basepath
basepath = os.path.split(__file__)[0]
global shapefile_folder
shapefile_folder = 'data/shapefiles'
def get_shapefile(filename):
shp = shapefile.Reader(os.path.join(basepath, shapefile_folder, filename))
shp_obj = shp.shapeRecords()
return shp_obj
|
t = int(input())
n, q = map(int, input().split())
s = input()
result = set()
for i in range (len (s)):
temp = ""
for j in range (i, len(s)):
temp += s[j];
result.add (temp)
result = sorted (result)
print (result)
for i in range (q):
k = int(input())
if k <= len(result):
print (len(set(result[k-1])))
else:
print (-1)
|
coordinates = (4, 5)
#coordinates[1] = 10 #tuples cannot be edited
print(coordinates[1])
|
employees = dict()
for _ in range(5):
name = input("Enter name:")
salary = int(input("Enter salary:"))
employees[name] = salary
best_three_salaries = sorted(employees.values())[-3:]
for name in employees.keys():
salary = employees[name]
if salary in best_three_salaries:
print(sorted(name))
|
"""
Author : Lily
Date : 2018-09-21
QQ : 339600718
酷动数码 Coodoo Coodoo-s
抓取思路:数据在页面上,需要翻页,但页面上最大页数,只能从下一页中拿到下一页的页数,再获取下一页的数据
当没有下一页这个标签时,停止抓取。
URL :http://www.coodoo.com.cn/Stores
"""
import re
import datetime
import requests
from lxml import etree
filename = "Coodoo-s" + re.sub('[^0-9]', '', str(datetime.datetime.now())) + ".csv"
f = open(filename, 'w', encoding='utf-8')
f.write('name,address,phone,\n')
n = 1
url = "http://www.coodoo.com.cn/Stores?page="
if n is not None:
html = requests.get(url+str(n)).text
html_lxml = etree.HTML(html)
stores = html_lxml.xpath('//*[@id="main"]/div')
for store in stores:
name = store.xpath('./dl/dd/h1/text()')[0]
address = store.xpath('./dl/dd/text()[1]')[0]
phone = store.xpath('./dl/dd/text()[2]')[0]
f.write(name)
|
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from api import views
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
router = routers.DefaultRouter()
router.register('users', views.UserViewSet)
router.register('books', views.BookViewSet)
router.register('genre', views.LiteraryGenreViewSet)
router.register('editor', views.EditorViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('api/token/', TokenObtainPairView.as_view()),
path('api/token/refresh/', TokenRefreshView.as_view()),
path('genre/books', views.BooksPerGenre.as_view()),
path('books/count', views.BooksInLibrary.as_view()),
path('user/books', views.BooksPerUser.as_view())
]
|
from Base import *
from Object import *
'''
Esta funcao cria um objeto do tipo Chessboard e o retorna
@PARAMETROS
id_tex_livre - primeiro id de textura nao utilizado - passado como lista de tamanho 1
vertices_list - lista de coordenadas de vertices
textures_coord_list - lista de coordenadas de textura
normals_list - lista de normais de vertices
@RETORNO
object - o objeto Chessboard criado
'''
def cria_chessboard(id_tex_livre, vertices_list, textures_coord_list, normals_list):
#adicionando os nomes das texturas utilizdas em uma lista
textures_names = []
textures_names.append("Chessboard/10586_Chess Board_v1_diffuse.JPG")
filename = "Chessboard/chessboard.obj"
mtl_filename = "Chessboard/chessboard.mtl"
#criando o objeto
chessboard = Object(filename, mtl_filename, textures_names, 50, 968, 112, 0, -math.pi/2, 0, 0.03, id_tex_livre, vertices_list, textures_coord_list, normals_list)
return chessboard |
#coding = utf-8
import socket
import threading
import time
global UID
HOST = '127.0.0.1'
PORT = 38557
UID = ''
SUCCESS = 'succeed'
class Receive(threading.Thread):
global UID
def __init__(self, conn):
self.conn = conn
self.is_receiving = True
threading.Thread.__init__(self)
def run(self):
while self.is_receiving:
try:
server_msg = self.conn.recv(65535)
if not len(server_msg):
break
print ''
print server_msg
except Exception, error:
print error
break
def regist_uid(client, uid):
try:
regist_data = '/%s' % uid
client.send(regist_data)
recv_data = client.recv(65535)
print recv_data
if recv_data == SUCCESS:
return True
else:
return False
except Exception, error:
print error
return False
if __name__ == '__main__':
while True:
uid = raw_input('please enter your username:')
try:
my_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_client.connect((HOST, PORT))
if regist_uid(my_client, uid):
UID = uid
break
except Exception, error:
print error
print 'regist failed, try another username'
else:
print 'login succeed, you can start chatting now'
print 'format: DEST_ID, content\n'
receive = Receive(my_client)
receive.start()
while True:
try:
data = raw_input('[%s]' %UID)
if data.strip() == '':
continue
elif data == 'exit':
break
send_data = ','.join([UID, data])
my_client.send(send_data)
print data
except Exception, error:
print error
my_client.shutdown(socket.SHUT_WR)
my_client.close()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
rayleigh.py
=============================================
Without selection scatter distrib plots from
arrays created by:
* optixrap/tests/ORayleighTest.cc
* cfg4/tests/OpRayleighTest.cc
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
import matplotlib.pyplot as plt
from opticks.ana.nbase import vnorm, costheta_
OLDMOM,OLDPOL,NEWMOM,NEWPOL = 0,1,2,3
X,Y,Z=0,1,2
def dotmom_(a):
oldmom = a[:,OLDMOM,:3]
newmom = a[:,NEWMOM,:3]
dotmom = costheta_(oldmom,newmom)
return dotmom
def dotpol_(a):
oldpol = a[:,OLDPOL,:3]
newpol = a[:,NEWPOL,:3]
dotpol = costheta_(oldpol,newpol)
return dotpol
if __name__ == '__main__':
aa = np.load(os.path.expandvars("$TMP/RayleighTest/ok.npy"))
bb = np.load(os.path.expandvars("$TMP/RayleighTest/cfg4.npy"))
bins = 100
nx = 4
ny = 2
qwns = [
(1,aa[:,NEWMOM,X],bb[:,NEWMOM,X],"momx"),
(2,aa[:,NEWMOM,Y],bb[:,NEWMOM,Y],"momy"),
(3,aa[:,NEWMOM,Z],bb[:,NEWMOM,Z],"momz"),
(4,dotmom_(aa) ,dotmom_(bb) ,"dotmom"),
(5,aa[:,NEWPOL,X],bb[:,NEWPOL,X],"polx"),
(6,aa[:,NEWPOL,Y],bb[:,NEWPOL,Y],"poly"),
(7,aa[:,NEWPOL,Z],bb[:,NEWPOL,Z],"polz"),
(8,dotpol_(aa) ,dotpol_(bb) ,"dotpol"),
]
for i,a,b,label in qwns:
plt.subplot(ny, nx, i)
plt.hist(a, bins=bins, histtype="step", label=label)
plt.hist(b, bins=bins, histtype="step", label=label)
pass
plt.show()
|
from pyramid.registry import Registry
from kotti.testing import DummyRequest
from kotti.testing import UnitTestBase
class TestEvents(UnitTestBase):
def setUp(self):
# We're jumping through some hoops to allow the event handlers
# to be able to do 'pyramid.threadlocal.get_current_request'
# and 'authenticated_userid'.
registry = Registry('testing')
request = DummyRequest()
request.registry = registry
super(TestEvents, self).setUp(registry=registry, request=request)
self.config.include('kotti.events')
def test_owner(self):
from kotti import DBSession
from kotti.resources import get_root
from kotti.resources import Content
from kotti.security import list_groups
from kotti.security import list_groups_raw
from kotti.util import clear_cache
session = DBSession()
self.config.testing_securitypolicy(userid='bob')
root = get_root()
child = root[u'child'] = Content()
session.flush()
self.assertEqual(child.owner, u'bob')
self.assertEqual(list_groups(u'bob', child), [u'role:owner'])
clear_cache()
# The event listener does not set the role again for subitems:
grandchild = child[u'grandchild'] = Content()
session.flush()
self.assertEqual(grandchild.owner, u'bob')
self.assertEqual(list_groups(u'bob', grandchild), [u'role:owner'])
self.assertEqual(len(list_groups_raw(u'bob', grandchild)), 0)
def test_sqlalchemy_events(self):
from kotti import events
from kotti import DBSession
from kotti.resources import get_root
from kotti.resources import Content
insert_events = []
def insert(event):
insert_events.append(event)
update_events = []
def update(event):
update_events.append(event)
delete_events = []
def delete(event):
delete_events.append(event)
lis = events.objectevent_listeners
lis[(events.ObjectInsert, None)].append(insert)
lis[(events.ObjectUpdate, None)].append(update)
lis[(events.ObjectDelete, None)].append(delete)
root = get_root()
child = root[u'child'] = Content()
DBSession.flush()
self.assertEqual(
(len(insert_events), len(update_events), len(delete_events)),
(1, 0, 0))
self.assertEqual(insert_events[0].object, child)
child.title = u"Bar"
DBSession.flush()
self.assertEqual(
(len(insert_events), len(update_events), len(delete_events)),
(1, 1, 0))
self.assertEqual(update_events[0].object, child)
DBSession.delete(child)
DBSession.flush()
self.assertEqual(
(len(insert_events), len(update_events), len(delete_events)),
(1, 1, 1))
self.assertEqual(delete_events[0].object, child)
|
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from django.db import models, migrations
import webplatformcompat.validators
import webplatformcompat.fields
import django_extensions.db.fields
import django_extensions.db.fields.json
import mptt.fields
import sortedm2m.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Browser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug.', unique=True)),
('name', webplatformcompat.fields.TranslatedField(help_text='Branding name of browser, client, or platform.', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Extended information about browser, client, or platform.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Changeset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('closed', models.BooleanField(default=False, help_text='Is the changeset closed to new changes?')),
('target_resource_type', models.CharField(blank=True, help_text='Type of target resource', max_length=12, choices=[('browsers', 'browsers'), ('features', 'features'), ('maturities', 'maturities'), ('sections', 'sections'), ('specifications', 'specifications'), ('supports', 'supports'), ('versions', 'versions')])),
('target_resource_id', models.PositiveIntegerField(default=0, help_text='ID of target resource')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Feature',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug.', unique=True)),
('mdn_path', models.CharField(help_text='The path to the page on MDN that this feature was first scraped from. May be used in UX or for debugging import scripts.', max_length=255, blank=True)),
('experimental', models.BooleanField(default=False, help_text='True if a feature is considered experimental, such as being non-standard or part of an non-ratified spec.')),
('standardized', models.BooleanField(default=True, help_text='True if a feature is described in a standards-track spec, regardless of the spec\u2019s maturity.')),
('stable', models.BooleanField(default=True, help_text='True if a feature is considered suitable for production websites.')),
('obsolete', models.BooleanField(default=False, help_text='True if a feature should not be used in new development.')),
('name', webplatformcompat.fields.TranslatedField(help_text='Feature name, in canonical or localized form.', validators=[webplatformcompat.validators.LanguageDictValidator(True)])),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='webplatformcompat.Feature', help_text='Feature set that contains this feature', null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalBrowser',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug.')),
('name', webplatformcompat.fields.TranslatedField(help_text='Branding name of browser, client, or platform.', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Extended information about browser, client, or platform.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_browsers', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical browser',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalFeature',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug.')),
('mdn_path', models.CharField(help_text='The path to the page on MDN that this feature was first scraped from. May be used in UX or for debugging import scripts.', max_length=255, blank=True)),
('experimental', models.BooleanField(default=False, help_text='True if a feature is considered experimental, such as being non-standard or part of an non-ratified spec.')),
('standardized', models.BooleanField(default=True, help_text='True if a feature is described in a standards-track spec, regardless of the spec\u2019s maturity.')),
('stable', models.BooleanField(default=True, help_text='True if a feature is considered suitable for production websites.')),
('obsolete', models.BooleanField(default=False, help_text='True if a feature should not be used in new development.')),
('name', webplatformcompat.fields.TranslatedField(help_text='Feature name, in canonical or localized form.', validators=[webplatformcompat.validators.LanguageDictValidator(True)])),
('parent_id', models.IntegerField(help_text='Feature set that contains this feature', null=True, db_index=True, blank=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('sections', django_extensions.db.fields.json.JSONField(default='[]')),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_features', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical feature',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalMaturity',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug, sourced from the KumaScript macro Spec2')),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of maturity', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_maturities', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical maturity',
'verbose_name_plural': 'historical_maturities',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalSection',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('specification_id', models.IntegerField(db_index=True, null=True, blank=True)),
('number', webplatformcompat.fields.TranslatedField(help_text='Section number', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of section, without section number', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('subpath', webplatformcompat.fields.TranslatedField(help_text='A subpage (possible with an #anchor) to get to the subsection in the specification.', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Notes for this section', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('_order', models.IntegerField(editable=False)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_sections', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical section',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalSpecification',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('maturity_id', models.IntegerField(db_index=True, null=True, blank=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug')),
('mdn_key', models.CharField(help_text='Key used in the KumaScript macro SpecName', max_length=30, blank=True)),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of specification', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('uri', webplatformcompat.fields.TranslatedField(help_text='Specification URI, without subpath and anchor', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_specifications', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical specification',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalSupport',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('version_id', models.IntegerField(db_index=True, null=True, blank=True)),
('feature_id', models.IntegerField(db_index=True, null=True, blank=True)),
('support', models.CharField(default='yes', help_text='Does the browser version support this feature?', max_length=10, choices=[('yes', 'yes'), ('no', 'no'), ('partial', 'partial'), ('unknown', 'unknown'), ('never', 'never')])),
('prefix', models.CharField(help_text='Prefix to apply to the feature name.', max_length=20, blank=True)),
('prefix_mandatory', models.BooleanField(default=False, help_text='Is the prefix required?')),
('alternate_name', models.CharField(help_text='Alternate name for this feature.', max_length=50, blank=True)),
('alternate_mandatory', models.BooleanField(default=False, help_text='Is the alternate name required?')),
('requires_config', models.CharField(help_text='A configuration string to enable the feature.', max_length=100, blank=True)),
('default_config', models.CharField(help_text='The configuration string in the shipping browser.', max_length=100, blank=True)),
('protected', models.BooleanField(default=False, help_text="True if feature requires additional steps to enable in order to protect the user's security or privacy.")),
('note', webplatformcompat.fields.TranslatedField(help_text='Short note on support, designed for inline display.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('footnote', webplatformcompat.fields.TranslatedField(help_text='Long note on support, designed for display after a compatiblity table, in MDN wiki format.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_supports', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical support',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HistoricalVersion',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('browser_id', models.IntegerField(db_index=True, null=True, blank=True)),
('version', models.CharField(help_text='Version string.', max_length=20, blank=True)),
('release_day', models.DateField(help_text='Day of release to public, ISO 8601 format.', null=True, blank=True)),
('retirement_day', models.DateField(help_text='Day this version stopped being supported, ISO 8601 format.', null=True, blank=True)),
('status', models.CharField(default='unknown', max_length=15, choices=[('unknown', 'unknown'), ('current', 'current'), ('future', 'future'), ('retired', 'retired'), ('beta', 'beta'), ('retired beta', 'retired beta')])),
('release_notes_uri', webplatformcompat.fields.TranslatedField(help_text='URI of release notes.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Notes about this version.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('_order', models.IntegerField(editable=False)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_changeset', models.ForeignKey(related_name='historical_versions', to='webplatformcompat.Changeset')),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical version',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Maturity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug, sourced from the KumaScript macro Spec2', unique=True)),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of maturity', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
],
options={
'verbose_name_plural': 'maturities',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', webplatformcompat.fields.TranslatedField(help_text='Section number', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of section, without section number', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('subpath', webplatformcompat.fields.TranslatedField(help_text='A subpage (possible with an #anchor) to get to the subsection in the specification.', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Notes for this section', blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Specification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(help_text='Unique, human-friendly slug', unique=True)),
('mdn_key', models.CharField(help_text='Key used in the KumaScript macro SpecName', max_length=30, blank=True)),
('name', webplatformcompat.fields.TranslatedField(help_text='Name of specification', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('uri', webplatformcompat.fields.TranslatedField(help_text='Specification URI, without subpath and anchor', validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('maturity', models.ForeignKey(related_name='specifications', to='webplatformcompat.Maturity')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Support',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('support', models.CharField(default='yes', help_text='Does the browser version support this feature?', max_length=10, choices=[('yes', 'yes'), ('no', 'no'), ('partial', 'partial'), ('unknown', 'unknown'), ('never', 'never')])),
('prefix', models.CharField(help_text='Prefix to apply to the feature name.', max_length=20, blank=True)),
('prefix_mandatory', models.BooleanField(default=False, help_text='Is the prefix required?')),
('alternate_name', models.CharField(help_text='Alternate name for this feature.', max_length=50, blank=True)),
('alternate_mandatory', models.BooleanField(default=False, help_text='Is the alternate name required?')),
('requires_config', models.CharField(help_text='A configuration string to enable the feature.', max_length=100, blank=True)),
('default_config', models.CharField(help_text='The configuration string in the shipping browser.', max_length=100, blank=True)),
('protected', models.BooleanField(default=False, help_text="True if feature requires additional steps to enable in order to protect the user's security or privacy.")),
('note', webplatformcompat.fields.TranslatedField(help_text='Short note on support, designed for inline display.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('footnote', webplatformcompat.fields.TranslatedField(help_text='Long note on support, designed for display after a compatiblity table, in MDN wiki format.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('feature', models.ForeignKey(related_name='supports', to='webplatformcompat.Feature')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.CharField(help_text='Version string.', max_length=20, blank=True)),
('release_day', models.DateField(help_text='Day of release to public, ISO 8601 format.', null=True, blank=True)),
('retirement_day', models.DateField(help_text='Day this version stopped being supported, ISO 8601 format.', null=True, blank=True)),
('status', models.CharField(default='unknown', max_length=15, choices=[('unknown', 'unknown'), ('current', 'current'), ('future', 'future'), ('retired', 'retired'), ('beta', 'beta'), ('retired beta', 'retired beta')])),
('release_notes_uri', webplatformcompat.fields.TranslatedField(help_text='URI of release notes.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('note', webplatformcompat.fields.TranslatedField(help_text='Notes about this version.', null=True, blank=True, validators=[webplatformcompat.validators.LanguageDictValidator(False)])),
('browser', models.ForeignKey(related_name='versions', to='webplatformcompat.Browser')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterOrderWithRespectTo(
name='version',
order_with_respect_to='browser',
),
migrations.AddField(
model_name='support',
name='version',
field=models.ForeignKey(related_name='supports', to='webplatformcompat.Version'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='support',
unique_together=set([('version', 'feature')]),
),
migrations.AddField(
model_name='section',
name='specification',
field=models.ForeignKey(related_name='sections', to='webplatformcompat.Specification'),
preserve_default=True,
),
migrations.AlterOrderWithRespectTo(
name='section',
order_with_respect_to='specification',
),
migrations.AddField(
model_name='feature',
name='sections',
field=sortedm2m.fields.SortedManyToManyField(help_text=None, related_name='features', to='webplatformcompat.Section'),
preserve_default=True,
),
]
|
def fibonacci(n):
n1 = 0
n2 = 1
i = 3
while i <= n:
n3 = n1 + n2
n1 = n2
n2 = n3
i += 1
return n3
print(fibonacci(10))
print(fibonacci(11))
print(fibonacci(12))
# 檔名: exercise0807.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
#!/usr/bin/env python
from game.base.state import State
from game.entities.camera import Camera
from game.entities.terminal import Terminal
from game.entities.ground import Ground
from game.constants import GROUND_HEIGHT, CAMERA_OFFSET, SCRIPTS_DIR
from game.scene import Scene
from game.util import pg_color, random_rgb, random_char, ncolor
import pygame
import glm
import random
import math
from glm import vec3, vec4, ivec2
class Intro(State):
def __init__(self, app, state=None):
super().__init__(app, state, self)
self.scene = Scene(self.app, self)
self.terminal = self.scene.add(Terminal(self.app, self.scene))
self.bigterm = self.scene.add(Terminal(self.app, self.scene, 32))
self.camera = self.scene.add(Camera(app, self.scene, self.app.size))
self.scene.ground_color = "darkgreen"
self.time = 0
rows = 8
backdrop_h = 150
for i in range(rows):
h = int(backdrop_h) // rows
y = h * i
backdrop = pygame.Surface((self.app.size.x, h))
interp = i / rows
interp_inv = 1 - i / rows
backdrop.set_alpha(255 * interp_inv * 0.2)
backdrop.fill(pg_color(ncolor("white") * interp_inv))
self.scene.on_render += lambda _, y=y, backdrop=backdrop: self.app.screen.blit(
backdrop, (0, y)
)
rows = 8
backdrop_h = 100
for i in range(rows):
h = int(backdrop_h) // rows
y = h * i
backdrop = pygame.Surface((self.app.size.x, h))
interp = i / rows
interp_inv = 1 - i / rows
backdrop.set_alpha(255 * interp_inv * 0.1)
backdrop.fill(pg_color(ncolor("white") * interp_inv))
self.scene.on_render += lambda _, y=y, backdrop=backdrop: self.app.screen.blit(
backdrop, (0, y)
)
backdrop_h = int(24)
rows = 4
for i in range(rows, 0, -1):
h = int(backdrop_h) // rows
y = h * i
backdrop = pygame.Surface((self.app.size.x, h))
interp = i / rows
interp_inv = 1 - i / rows
backdrop.set_alpha(200 * interp_inv)
backdrop.fill((0))
# backdrop.fill(pg_color(ncolor('black')*interp_inv))
self.scene.on_render += lambda _, y=y, backdrop=backdrop: self.app.screen.blit(
backdrop, (0, self.app.size.y - y)
)
def pend(self):
self.app.pend() # tell app we need to update
def update(self, dt):
"""
Called every frame by App as long as Game is the current app.state
:param dt: time since last frame in seconds
"""
super().update(dt) # needed for script
self.scene.update(dt)
self.time += dt
def render(self):
self.scene.render(self.camera)
def change_logo_color(self, script):
yield
bigterm = self.bigterm
while True:
if self.scene.ground_color:
break
yield
c = glm.mix(
self.scene.ground_color,
glm.mix(ncolor("white"), random_rgb(), random.random()),
0.2,
)
r = 0
# rc = vec4()
self.scene.play_sound("explosion.wav")
while True:
if r % 30 == 0:
rc = random_rgb()
s = "BUTTERFLY "
for i in range(len(s)):
# c = ncolor('purple') * i/len(s) + math.sin(r / 200 + i+r) ** 2 + .6
c = (
ncolor("purple") * i / len(s)
+ ((math.sin(i + r) + 0.4) * script.dt)
+ 0.3
)
bigterm.write(s[i], (i - len(s) - 8, 1), c)
if r > 15:
s = "DESTROYERS "
for i in range(len(s)):
c = (
self.scene.ground_color * i / len(s)
+ ((math.sin(i + r) + 4) * script.dt)
+ 0.3
)
bigterm.write(s[i], (i - len(s) - 3, 2), c)
if r == 15:
self.scene.play_sound("explosion.wav")
yield script.sleep(0.1)
r += 1
def __call__(self, script):
yield
self.scene.scripts += self.change_logo_color
when = script.when
scene = self.scene
terminal = self.terminal
self.scene.music = "butterfly2.ogg"
# self.scene.sky_color = "#4c0b6b"
# self.scene.ground_color = "#e08041"
# self.scene.stars()
self.scene.cloudy()
textdelay = 0.03
fades = [
when.fade(
10,
(0, 1),
lambda t: scene.set_sky_color_opt(
glm.mix(ncolor("#4c0b6b"), ncolor("#e08041"), t)
),
),
when.fade(
10,
(0, 1),
lambda t: scene.set_ground_color_opt(
glm.mix(ncolor("darkgreen"), ncolor("yellow"), t)
),
lambda: fades.append(
when.every(
0, lambda: scene.set_ground_color_opt(scene.ground_color)
)
),
),
]
yield
# self.scene.set_ground_color = "#e08041"
# scene.sky_color = "black"
self.scene.music = "butterfly2.ogg"
# for i in range(len(msg)):
# terminal.write(msg[i], (len(msg) / 2 - 1 + i, 1), self.scene.ground_color)
# # scene.ensure_sound("type.wav")
# yield script.sleep(0.002)
# script.push(self.logo_color)
# yield from self.change_logo_color(script)
yield script.sleep(3)
msg = [
"In the year 20XX, the butterfly",
"overpopulation problem has",
"obviously reached critical mass.",
"The military has decided to intervene.",
"Your mission is simple: defeat all the",
"butterflies before the world ends.",
"But look out for Big Butta, king of",
"the butterflies.",
]
for y, line in enumerate(msg):
ty = y * 2 + 5
for x, m in enumerate(line):
terminal.write(random_char(), (x + 2, ty), random_rgb())
cursor = (x + 2, ty)
terminal.write(m, (x + 1, ty), "white")
# scene.ensure_sound("type.wav")
self.change_logo_color(script)
# if not script.keys_down:
# yield
# else:
yield script.sleep(textdelay)
terminal.clear(cursor)
when = script.when
scene = self.scene
terminal = self.terminal
yield script.sleep(3)
# while True:
# terminal.write_center("Press any key to continue", 20, "green")
# self.change_logo_color(script)
# yield script.sleep(0.1)
# if script.keys_down:
# break
# terminal.clear(20)
# self.change_logo_color(script)
# yield script.sleep(0.1)
# if script.keys_down:
# break
terminal.clear()
terminal.write_center("Loading...", 10)
self.app.state = "game"
|
# Sending mail using smtp.
import smtplib
import getpass
session = smtplib.SMTP('smtp.gmail.com', 587)
session.starttls()
print('Gmail Login.')
senderEmailId = input('Enter Gmail Id: ')
password = getpass.getpass('Enter Password: ')
try:
session.login(senderEmailId, password)
recipientEmailId = input('Enter sender Email Id: ')
message = input('Enter the message: ')
session.sendmail(senderEmailId, recipientEmailId, message)
session.quit()
print('Mail sent seccessfully.')
except :
print('Invalid Email or Password!')
|
import time
inicio = time.perf_counter()
def aDormir():
print("Iniciando función, voy a dormir 1 s")
time.sleep(1)
print("Paso un segundo, he despertado")
#Ahora compararemos que pasa cuando ejecutamos 10 veces la función a
for _ in range(10):
aDormir()
final = time.perf_counter()
print(f"Código ejecutado en {final- inicio, 2} segundos") |
from datetime import datetime
from xml_get import get_nodes, remove_non_ascii, get_node_text_value
def get_time_from_short_path(itinerary, short_path):
"""
Time formatting
:param itinerary:
:param short_path:
:return:
"""
# TODO : Fix/Add Timezones!
# TODO ensure it doesn't break comparison for visit overlap (uses str...)
tz = 'Europe/Zurich'
date_str = get_nodes(itinerary, short_path+['NS1:Datum'])
time_str = get_nodes(itinerary, short_path+['NS1:Zeit'])
if date_str and time_str:
datetime_str = '{d} {t}'.format(d=date_str[0].text, t=time_str[0].text)
return datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
return
# ROOT :: get_...(root) methods
def get_itinerary_nodes(root):
itinerary_tree_path_short = ['soapenv:Body',
'NS1:FindVerbindungenResponse',
'NS1:Verbindungen',
'NS1:Verbindung']
return get_nodes(root, itinerary_tree_path_short)
# ITINERARY :: get_...(itinerary) methods
def get_leg_nodes(itinerary):
leg_tree_path_short = ['NS1:Verbindungsabschnitte',
'NS1:Verbindungsabschnitt']
return get_nodes(itinerary, leg_tree_path_short)
def get_itin_start_datetime(itinerary):
short_path = ['NS1:Zusammenfassung',
'NS1:Abfahrt',
'NS1:DatumZeit',
'NS1:Aktuell']
return get_time_from_short_path(itinerary, short_path)
def get_itin_end_datetime(itinerary):
short_path = ['NS1:Zusammenfassung',
'NS1:Ankunft',
'NS1:DatumZeit',
'NS1:Aktuell']
return get_time_from_short_path(itinerary, short_path)
def get_itin_context_reconstruction(itinerary):
short_path = ['NS1:ContextReconstruction']
[context_reconstruction] = get_nodes(itinerary, short_path)
return context_reconstruction.text
# LEG :: get_...(leg) methods
def get_segment_nodes(leg):
segment_tree_path_short = ['NS1:Haltepunkte',
'NS1:Haltepunkt']
return get_nodes(leg, segment_tree_path_short)
def get_leg_type(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Typ']
return get_node_text_value(leg, short_path)
def get_leg_route_full_name(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Informationen',
'NS1:Name']
return get_node_text_value(leg, short_path)
def get_leg_route_category(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Informationen',
'NS1:Kategorie',
'NS1:Abkuerzung']
return get_node_text_value(leg, short_path)
def get_leg_route_line(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Informationen',
'NS1:Linie']
return get_node_text_value(leg, short_path)
def get_leg_route_number(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Informationen',
'NS1:Nummer'] # seems to be identical to 'NS1:ExterneNummer'
return get_node_text_value(leg, short_path)
def get_leg_agency_id(leg):
short_path = ['NS1:Verkehrsmittel',
'NS1:Informationen',
'NS1:TransportUnternehmungCode']
return get_node_text_value(leg, short_path)
def get_leg_time_start(leg):
short_path = ['NS1:Abfahrt',
'NS1:DatumZeit',
'NS1:Aktuell']
return get_time_from_short_path(leg, short_path)
def get_leg_time_end(leg):
short_path = ['NS1:Ankunft',
'NS1:DatumZeit',
'NS1:Aktuell']
return get_time_from_short_path(leg, short_path)
def get_leg_planned_time_start(leg):
short_path = ['NS1:Abfahrt',
'NS1:DatumZeit',
'NS1:Geplant']
return get_time_from_short_path(leg, short_path)
def get_leg_planned_time_end(leg):
short_path = ['NS1:Ankunft',
'NS1:DatumZeit',
'NS1:Geplant']
return get_time_from_short_path(leg, short_path)
def get_leg_stop_id_start(leg):
short_path = ['NS1:Abfahrt',
'NS1:Haltestelle',
'NS1:Standort',
'NS1:Id',
'NS1:ExterneStationId']
return get_node_text_value(leg, short_path).lstrip('0')
def get_leg_station_name_start(leg):
short_path = ['NS1:Abfahrt',
'NS1:Haltestelle',
'NS1:Standort',
'NS1:Name']
# return remove_non_ascii(get_node_text_value(leg, short_path))
return get_node_text_value(leg, short_path)
def get_leg_platform_start(leg):
short_path = ['NS1:Abfahrt',
'NS1:Haltestelle',
'NS1:Gleis',
'NS1:Aktuell']
return get_node_text_value(leg, short_path)
def get_leg_stop_id_end(leg):
short_path = ['NS1:Ankunft',
'NS1:Haltestelle',
'NS1:Standort',
'NS1:Id',
'NS1:ExterneStationId']
return get_node_text_value(leg, short_path).lstrip('0')
def get_leg_station_name_end(leg):
short_path = ['NS1:Ankunft',
'NS1:Haltestelle',
'NS1:Standort',
'NS1:Name']
# return remove_non_ascii(get_node_text_value(leg, short_path))
return get_node_text_value(leg, short_path)
def get_leg_platform_end(leg):
short_path = ['NS1:Ankunft',
'NS1:Haltestelle',
'NS1:Gleis',
'NS1:Aktuell']
return get_node_text_value(leg, short_path)
# SEGMENT :: get_...(segment) methods
def get_seg_stop_id(segment):
short_path = ['NS1:Haltestelle',
'NS1:Standort',
'NS1:Id',
'NS1:ExterneStationId']
return get_node_text_value(segment, short_path).lstrip('0')
def get_seg_time_departure(segment):
short_path = ['NS1:AbfahrtsZeitpunkt',
'NS1:Aktuell']
return get_time_from_short_path(segment, short_path)
def get_seg_time_arrival(segment):
short_path = ['NS1:AnkunftsZeitpunkt',
'NS1:Aktuell']
return get_time_from_short_path(segment, short_path)
def get_seg_type(segment):
short_path = ['NS1:Haltestelle',
'NS1:Standort',
'NS1:Typ']
return get_node_text_value(segment, short_path)
|
import pandas as pd
from data_paths import paths
from glob import glob
import matplotlib.pyplot as plt
data_paths = glob(paths["salary"] + "/*")
# Paths to training files
training_features = pd.read_csv(data_paths[1])
training_target = pd.read_csv(data_paths[-1])
# Merge to form a single dataframe
print "Dimensions prior to merge"
print "Feautures:", training_features.shape
print "Target:", training_target.shape
merge_df = pd.merge(left=training_features, right=training_target, how='inner')
print "Dimensions after merge:" merge_df.shape
# Check for na values across the whole frame
print merge_df.isnull().sum(axis = 0) # Not seeing any...
# Check for odd values in the target
merge_df["salary"].describe() # I would seem that we have some zero values
# Let's start with a basic count of how many we have
merge_df[merge_df["salary"] == 0]
plt.hist(merge_df["salary"], bins = 50)
plt.show()
# I think we could drop these, but I also suspect that there are going to
# be gradations to how "wrong" things are. Let's plot things up to get a
# sense of the distribution
# The goal here is to remove overt errors from the data,
# missing values
# Atypical values
# outliers
# This might be a sepearate section, but we don't just want to remove
# bad values, we might want to ADD values that improve our ability to
# use our features to predict an output -- feature engineering.
# I think this is it's own jam -- as is dimensionality reduction.
# There seem to be some built in methods that we and use to identify ourliers
# http://scikit-learn.org/stable/modules/outlier_detection.html
|
import thread
import time
import random
def run_often(thread_nome, sleep_time):
while True:
time.sleep(sleep_time)
print '%s' % thread_nome
def run_less_often(thread_nome, sleep_time):
while True:
time.sleep(sleep_time)
print '%s' % thread_nome
def run_randomly(thread_nome, sleep_time):
while True:
time.sleep(sleep_time)
print '%s' % thread_nome
thread.start_new_thread(run_often, ('run often', 2))
thread.start_new_thread(run_less_often, ('run less often', 5))
thread.start_new_thread(run_randomly, ('run randomly', random.choice(range(1,6))))
print input() |
"""
Django settings for thm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['LOCAL_SECRET_KEY']
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'apps.users',
'apps.jobs',
'libs',
'apps.faq',
'south',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'djrill',
'floppyforms',
'apps.job_gallery',
'apps.search',
'apps.pricing',
'apps.commcalc',
'pipeline',
'apps.subscription',
'apps.inventory',
'apps.metrics',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# AUTH BACKEND DEFINITIONS
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
]
}
# TEMPLATE CONTEXT DEFINITIONS
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# TEMPLATE PATH CONFIGURATION
TEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')
TEMPLATE_DIRS = (TEMPLATE_PATH)
## MISCELLANEOUS SETTINGS
ROOT_URLCONF = 'thm.urls'
WSGI_APPLICATION = 'thm.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kathmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
APPEND_SLASH = True
# TURN DEBUG OFF
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# DATABASE ENGINE CONFIGURATIONS
import dj_database_url
DATABASES = {
"default": dj_database_url.config()
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Static asset configuration
# STATIC_URL = 'http://s3.amazonaws.com/%s/' % AWS_STATIC_BUCKET
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
)
# Use local storage
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
MEDIA_URL = '/media/'
# CONFIGURING USERPROFILE AS THE AUTH BACKEND
AUTH_USER_MODEL = 'users.UserProfile'
# LOGIN URL DEFINITIONS
LOGIN_URL = '/signin/'
LOGIN_REDIRECT_URL = '/home/'
URL='https://www.thehomerepairapp.com'
## LOGGING DEFINITION AND CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True
},
'null': {
'level': 'WARN',
'class': 'logging.NullHandler',
},
'console': {
'level': 'WARN',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'api': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'faq': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'job_gallery': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.jobs': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'libs': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.pricing': {
'handlers': ['console'],
'propagate': True,
'level': 'WARN',
},
'apps.search': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.users': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.commcalc': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.subscription': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.inventory': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
'apps.metrics': {
'handlers': ['console', 'mail_admins'],
'propagate': True,
'level': 'WARN',
},
}
}
# ALL OTHER SETTINGS
# Mandrill API KEY
MANDRILL_API_KEY = os.environ['MANDRILL_API_KEY']
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
ADMIN_EMAIL = os.environ['ADMIN_EMAIL']
# ERROR REPORTING
DEFAULT_FROM_EMAIL = 'server@thehomerepairapp.com'
SERVER_EMAIL = 'server@thehomerepairapp.com'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
ADMINS = (
('Gaurav Ghimire', ADMIN_EMAIL),
)
MANAGERS = ADMINS
#GOOGlE RELATED CONFIGURATIONS
GOOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']
#User Token Expiry in days
USER_TOKEN_EXPIRY = int(os.environ['USER_TOKEN_EXPIRY'])
# SWAGGER_SETTINGS = {
# "exclude_namespaces": [],
# "api_version": '1',
# "api_path": "/",
# "enabled_methods": [
# 'get',
# 'post',
# ],
# "api_key": '',
# "is_authenticated": True,
# "is_superuser": True,
# "permission_denied_handler": None,
# "info": {
# 'contact': 'dev@thehomerepairapp.com',
# 'description': 'This is a API documentation server. '
# 'To use the API please use your token auth.',
# 'license': 'Copyright The Handyman App 2014',
# 'licenseUrl': '',
# 'termsOfServiceUrl': '',
# 'title': 'The Homerepair App',
# },
# }
# Currency Setting
CURRENCIES = ('NPR',)
# Phone number setting
PHONENUMBER_DEFAULT_REGION = 'NP'
# LOCAL CONFIG IMPORT, IMPORTS ALL CONFIG FROM local_setting.py,
# required only for a dev env
try:
from local_setting import *
except ImportError:
pass
# # Use amazon S3 storage only on production
# if not DEBUG:
# ##This for media, user uploaded files
# DEFAULT_FILE_STORAGE = 'libs.s3utils.MediaRootS3BotoStorage'
# ##This for CSS,
# STATICFILES_STORAGE = 'libs.s3utils.StaticRootS3BotoStorage'
# MEDIA_ROOT = '/%s/' % DEFAULT_FILE_STORAGE
# MEDIA_URL = '//s3.amazonaws.com/%s/' % AWS_MEDIA_BUCKET
# for static file management
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
PIPELINE_SASS_BINARY = "sassc"
PIPELINE_COMPILERS = (
'pipeline.compilers.sass.SASSCompiler',
)
PIPELINE_CSS = {
'web_yellow': {
'source_filenames': (
'css/web_yellow.sass',
'css/popup.css'
),
'output_filename': 'css/web_yellow.css',
},
'admin': {
'source_filenames': (
'css/bootstrap.css',
'css/admin.sass',
),
'output_filename': 'css/admin.css',
},
}
|
#
# cogs/info/info.py
#
# mawabot - Maware's selfbot
# Copyright (c) 2017 Ma-wa-re, Ammon Smith
#
# mawabot is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
''' Contains in-depth commands that get information '''
import asyncio
import re
import unicodedata
from pprint import pformat
import discord
from discord.ext import commands
from mawabot.utils import normalize_caseless
__all__ = [
'Information',
]
CHANNEL_REGEX = re.compile(r'<#([0-9]+)>')
MENTION_REGEX = re.compile(r'<@!?([0-9]+)>')
EMOJI_REGEX = re.compile(r'<:([A-Za-z~\-0-9]+):([0-9]+)>')
class Information:
__slots__ = (
'bot',
)
def __init__(self, bot):
self.bot = bot
async def _get_profile(self, user_or_id):
if isinstance(user_or_id, discord.User):
try:
profile = await user_or_id.profile()
except discord.NotFound:
profile = None
return profile, user_or_id
else:
try:
profile = await self.bot.get_user_profile(user_or_id)
return profile, profile.user
except discord.NotFound:
user = discord.utils.get(self.bot.users, id=user_or_id)
return None, user
async def _get_profiles(self, names):
if not names:
names = ['me']
uids = []
for name in names:
if name == 'me' or name == 'myself':
uids.append(self.bot.user.id)
continue
match = MENTION_REGEX.match(name)
if match is not None:
uid = int(match[1])
elif name.isdigit():
uid = int(name)
else:
nname = normalize_caseless(name)
uid = discord.utils.find(lambda u, n=nname: normalize_caseless(u.name) == n, self.bot.users)
uids.append(uid)
profiles = await asyncio.gather(*[self._get_profile(uid) for uid in uids])
return list(filter(lambda t: t[1] is not None, profiles))
@staticmethod
def _connected_accounts(connected_accounts):
accounts = []
for account in connected_accounts:
id = account['id']
name = account['name']
type = account['type']
verified = '`\N{WHITE HEAVY CHECK MARK}`' if account['verified'] else ''
if type == 'battlenet':
accounts.append(f'battle.net: {name} {verified}')
elif type == 'facebook':
url = f'https://www.facebook.com/{id}'
accounts.append(f'[Facebook]({url}) {verified}')
elif type == 'leagueoflegends':
if '_' in id:
region, id = id.split('_')
url = f'http://lolking.net/summoner/{region}/{id}'
accounts.append(f'[League of Legends {verified}]({url})')
else:
accounts.append(f'League of Legends: {name} {verified}')
elif type == 'reddit':
url = f'https://www.reddit.com/user/{name}'
accounts.append(f'[Reddit {verified}]({url})')
elif type == 'skype':
accounts.append(f'Skype: {name} {verified}')
elif type == 'spotify':
url = f'https://open.spotify.com/user/{id}'
accounts.append(f'[Spotify {verified}]({url})')
elif type == 'steam':
url = f'https://steamcommunity.com/profiles/{id}'
accounts.append(f'[Steam {verified}]({url})')
elif type == 'twitch':
url = f'https://www.twitch.tv/{name}'
accounts.append(f'[Twitch {verified}]({url})')
elif type == 'twitter':
url = f'https://twitter.com/{name}'
accounts.append(f'[Twitter {verified}]({url})')
elif type == 'youtube':
url = f'https://www.youtube.com/channel/{id}'
accounts.append(f'[YouTube {verified}]({url})')
else:
accounts.append(f'{type}: {name} `{id}` {verified}')
return '\n'.join(accounts)
@commands.command(aliases=['uinfo'])
async def user_info(self, ctx, *names: str):
''' Gets information about the given user(s) '''
profiles = await self._get_profiles(names)
if not profiles:
embed = discord.Embed(type='rich', description='No user profiles found.')
await ctx.send(embed=embed)
return
for profile, user in profiles:
lines = [user.mention]
if profile is not None:
# Nitro
if profile.premium:
since = profile.premium_since.strftime('%x @ %X')
lines.append(f'- Nitro user since `{since}`')
# Other markers
if profile.staff:
lines.append('- Discord Staff')
if profile.partner:
lines.append('- Discord Partner')
if profile.hypesquad:
lines.append('- Hypesquad')
if isinstance(user, discord.Member):
if user.game:
if user.game.type == 1:
lines.append(f'Streaming [{user.game.name}]({user.game.url})')
else:
lines.append(f'Playing `{user.game.name}`')
if user.voice:
mute = user.voice.mute or user.voice.self_mute
deaf = user.voice.deaf or user.voice.self_deaf
states = []
if mute:
states.append('muted')
if deaf:
states.append('deafened')
if states:
state = ', '.join(states)
else:
state = 'active'
lines.append(f'Voice: {state}')
if user.nick:
lines.append(f'Nickname: {user.nick}')
roles = ' '.join(map(lambda r: r.mention, user.roles[1:]))
if roles:
lines.append(f'Roles: {roles}')
embed = discord.Embed(type='rich', description='\n'.join(lines))
embed.timestamp = user.created_at
if hasattr(user, 'color'):
embed.color = user.color
name = f'{user.name}#{user.discriminator}'
embed.set_author(name=name)
embed.set_thumbnail(url=user.avatar_url)
if isinstance(user, discord.Member):
embed.add_field(name='Status:', value=f'`{user.status}`')
embed.add_field(name='ID:', value=f'`{user.id}`')
if profile is not None:
# Mutual guilds
if profile.mutual_guilds:
guild_names = ', '.join(map(lambda g: g.name, profile.mutual_guilds))
embed.add_field(name=f'Mutual Guilds: ({len(profile.mutual_guilds)})', value=guild_names)
# Get connected accounts
if profile.connected_accounts:
accounts = self._connected_accounts(profile.connected_accounts)
if accounts:
embed.add_field(name='Connected Accounts:', value=accounts)
await ctx.send(embed=embed)
def _get_channel(self, ctx, name):
if name is None:
return ctx.channel
else:
match = CHANNEL_REGEX.match(name)
if match:
cid = int(match[1])
elif name.isdigit():
cid = int(name)
elif ctx.guild is not None:
return discord.utils.get(ctx.guild.channels, name=name)
return self.bot.get_channel(cid)
def _cinfo(self, ctx, name):
channel = self._get_channel(ctx, name)
# Couldn't find it
if channel is None:
embed = discord.Embed(description=f'No channel found that matched {name}', color=discord.Color.red())
embed.set_author(name='Error')
return embed
else:
embed = discord.Embed()
embed.timestamp = channel.created_at
desc = [f'ID: `{channel.id}`']
# Check if it is a guild channel
if isinstance(channel, discord.abc.GuildChannel):
embed.set_author(name=channel.name)
desc.append(f'Guild: `{channel.guild.name}`')
if isinstance(channel, discord.TextChannel):
desc.append('Type: `Text`')
desc.append(f'Mention: {channel.mention}')
desc.append(f'NSFW: `{channel.is_nsfw()}`')
desc.append(f'Members: `{len(channel.members)}`')
if channel.topic is not None:
embed.add_field(name='Topic:', value=channel.topic)
else:
desc.append('Type: `Voice`')
desc.append(f'Bitrate: `{channel.bitrate}`')
connected = len(channel.members)
limit = channel.user_limit
if limit == 0:
connstr = f'{connected}'
else:
connstr = f'{connected}/{limit}'
desc.append(f'Connected: `{connstr}`')
else:
# Must be a DM otherwise
if isinstance(channel, discord.DMChannel):
desc.append('Type: `DM`')
embed.set_author(name=channel.recipient.name)
else:
desc.append('Type: `DM Group`')
embed.set_author(name=channel.name)
desc.append(f'Owner: `{channel.owner.name}`')
embed.description = '\n'.join(desc)
return embed
@commands.command(aliases=['cinfo', 'vcinfo'])
async def channel_info(self, ctx, *names: str):
''' Gets information about a given channel '''
if names:
embeds = (self._cinfo(ctx, name) for name in names)
else:
embeds = (self._cinfo(ctx, None),)
await asyncio.gather(*[ctx.send(embed=embed) for embed in embeds])
@commands.command(aliases=['id'])
async def snowflake(self, ctx, *ids: int):
''' Gets information about the given snowflake(s) '''
tasks = []
for id in ids:
embed = discord.Embed(type='rich')
embed.set_author(name=f'Snowflake {id}')
embed.timestamp = discord.utils.snowflake_time(id)
guild = self.bot.get_guild(id)
if guild:
embed.add_field(name='Guild:', value=guild.name)
embed.set_thumbnail(url=guild.icon_url)
channel = self.bot.get_channel(id)
if channel:
text = channel.mention
if channel.guild != guild:
text += f' from "{channel.guild.name}"'
embed.add_field(name='Channel:', value=text)
user = self.bot.get_user(id)
if user:
embed.add_field(name='User:', value=user.mention)
emoji = self.bot.get_emoji(id)
if emoji:
text = f'{emoji} ({emoji.name}) from "{channel.guild.name}"'
embed.add_field(name='Emoji:', value=text)
# Can't do get_message() since we're not a true bot
tasks.append(ctx.send(embed=embed))
await asyncio.gather(*tasks)
@commands.command()
async def pins(self, ctx, name: str = None):
''' Gets all the pins in the given channel '''
channel = self._get_channel(ctx, name)
if channel is not None:
pins = await channel.pins()
count = str(len(pins)) if pins else 'No'
plural = '' if len(pins) == 1 else 's'
embed = discord.Embed(type='rich', description=f'{count} pin{plural} in {channel.mention}')
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
for i, message in enumerate(pins):
embed = discord.Embed(type='rich', description=message.content)
embed.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
embed.set_footer(text=f'Pin #{i+1}')
embed.timestamp = message.edited_at or message.created_at
await self.bot.output_send(embed=embed)
@commands.command(aliases=['audit', 'alog'])
async def audit_logs(self, ctx, limit: int = 20):
''' Retrieve the last 20 (or specified) entries in the audit log '''
await ctx.message.delete()
async for entry in ctx.guild.audit_logs(limit=limit):
embed = discord.Embed(type='rich')
embed.timestamp = entry.created_at
embed.set_author(name=entry.user.display_name, icon_url=entry.user.avatar_url)
embed.add_field(name='Type:', value=f'`{entry.action.name}`')
embed.add_field(name='Target:', value=f'`{entry.target!r}`')
embed.description = '\n'.join((
'**Before:**',
'```json',
pformat(dict(entry.before)),
'```\n',
'**After:**',
'```json',
pformat(dict(entry.after)),
'```',
))
if entry.reason is not None:
embed.add_field(name='Reason:', value=entry.reason)
if entry.category is not None:
embed.add_field(name='Category:', value=f'`{entry.category.name}`')
if entry.extra is not None:
embed.add_field(name='Extra:', value=f'`{entry.extra!r}`')
await self.bot.output_send(embed=embed)
@commands.command()
async def emoji(self, ctx, *emojis: str):
''' Gets information about the given emoji(s) '''
for emoji in emojis:
match = EMOJI_REGEX.match(emoji)
lines = [emoji]
if match:
lines.append(f'Emoji: `{match[1]}`')
lines.append(f'ID: `{match[2]}`')
else:
try:
name = unicodedata.name(emoji)
lines.append(f'Unicode name: `{name}`')
try:
lines.append(f'Ord: `{ord(name)}`')
except:
pass
except TypeError:
lines.append('Not an emoji')
await ctx.send(content='\n'.join(lines))
|
Max = "Hello"
print Max
|
# Generated by Django 3.2.3 on 2021-05-17 21:53
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('frontoffice', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='materiel',
name='dateAchat',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 17, 21, 53, 59, 506708, tzinfo=utc), verbose_name="date d'ajoute"),
),
migrations.AlterField(
model_name='materiel',
name='dateMaintenance',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 17, 21, 53, 59, 506708, tzinfo=utc), verbose_name='date de derniere maintenance'),
),
]
|
class punto():
def __init__(self, valor, izq = None, der = None):
self.v= valor
self.izq = izq
self.der=der
def preorden(arbol):
if arbol!=None:
return arbol.v+preorden(arbol.izq)+preorden(arbol.der)
else:
return ""
def inorden(arbol):
if arbol!=None:
return inorden(arbol.izq)+arbol.v+inorden(arbol.der)
else:
return ""
def posorden(arbol):
if arbol!=None:
return posorden(arbol.izq)+posorden(arbol.der)+arbol.v
else:
return ""
arbol = punto('5 ',punto('10 '),punto('15 ',punto('20 '),punto('25 ')))
print("pre: "+preorden(arbol))
print("in: "+inorden(arbol))
print("pos: "+posorden(arbol))
|
t = int(input())
while t:
t -= 1
n = int(input())
s = input()
if(len(s) == 2):
if(s[0] >= s[1]):
print('NO')
else:
print('YES')
print(2)
print(s[0], s[1])
else:
print('YES')
print(2)
print(s[0], s[1:]) |
# -*- coding:UTF-8 -*-
import cookielib
import urllib
import urllib2
import commentURL
#--
'''
'''
def login(userName, password):
LOGIN_SUCCESS_FLAG = 'logout.php'
cj = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
parameter = {
"user": userName,
"pass": password,
"ret": commentURL.URL_BASE
}
req = urllib2.Request(
url=commentURL.URL_LOGIN,
data=urllib.urlencode(parameter)
)
jump = urllib2.urlopen(req)
rs_str = jump.read()
return rs_str.index(LOGIN_SUCCESS_FLAG) > -1
def logout():
logout_request = urllib2.Request(commentURL.URL_LOGOUT)
try:
urllib2.urlopen(logout_request)
return True
except IOError:
return False
|
class Solution:
def minTimeToVisitAllPoints(self, points: List[List[int]]) -> int:
"""
https://leetcode.com/problems/minimum-time-visiting-all-points/
"""
dist = 0
for p in range(1, len(points)):
x = abs(points[p][0] - points[p-1][0])
y = abs(points[p][1] - points[p-1][1])
dist += max(x, y)
return dist |
import string
import requests
from bs4 import BeautifulSoup
import re
import matplotlib.pyplot as plt
def findText(link):
data = requests.get(link).text
return data
def getTop100BookLinks():
data = requests.get('https://www.gutenberg.org/browse/scores/top').text
soup = BeautifulSoup(data, 'html5lib')
rawLinks = soup.find_all('a')
bookLinks = []
for i in range(len(rawLinks)):
rawLinks[i] = rawLinks[i].get('href')
if rawLinks[i].startswith('/ebooks/') and len(rawLinks[i]) > 8:
bookLinks.append('https://www.gutenberg.org/files/' + rawLinks[i][8:] + '/' + rawLinks[i][8:] + '-0.txt')
return bookLinks
def stringSplitter(string1):
arr2 = []
string1 = ''.join(c for c in string1 if c not in string.punctuation)
arr = string1.splitlines()
finalArr = []
hasStarted = False
for i in range(len(arr)):
if "END OF THIS PROJECT GUTENBERG" in arr[i]:
break
elif hasStarted:
for element in arr[i].split(" "):
if element != "":
finalArr.append(element)
elif "START OF THIS PROJECT GUTENBERG" in arr[i]:
hasStarted = True
i += 1
return finalArr
def zipfsLaw1(arr):
d = {}
for i in range(len(arr)):
char1 = arr[i][0].lower()
if char1 in d:
d[char1] += 1
else:
d[char1] = 1
sortedResult = sorted(d.items(), key=lambda x: x[1], reverse=True)
sum = 0
for element in sortedResult:
sum += element[1]
finalResult = []
for element in sortedResult:
finalResult.append([element[0], element[1] / sum])
return finalResult[:36]
def zipfsLaw2(arr):
d = {}
for i in range(len(arr)):
word = arr[i].lower()
if word in d:
d[word] += 1
else:
d[word] = 1
sortedResult = sorted(d.items(), key=lambda x: x[1], reverse=True)
sum = 0
for element in sortedResult:
sum += element[1]
finalResult = []
for element in sortedResult:
finalResult.append([element[0], element[1] / sum])
return finalResult[:50]
def zipfsLaw3(arr):
d = {}
for i in range(len(arr)):
word = arr[i].lower()
if word in d:
d[word[:2]] += 1
elif len(word) > 1:
d[word[:2]] = 1
sortedResult = sorted(d.items(), key=lambda x: x[1], reverse=True)
sum = 0
for element in sortedResult:
sum += element[1]
finalResult = []
for element in sortedResult:
finalResult.append([element[0], element[1] / sum])
print(finalResult[:50])
def bigAssEnglishWordArray():
links = getTop100BookLinks()
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssGermanWordArray():
links = ["https://www.gutenberg.org/files/19755/19755-0.txt", "http://www.gutenberg.org/cache/epub/44051/pg44051.txt", "https://www.gutenberg.org/files/15734/15734-0.txt", "https://www.gutenberg.org/files/14225/14225-0.txt", "https://www.gutenberg.org/files/13953/13953-0.txt", "https://www.gutenberg.org/files/14105/14105-0.txt", "https://www.gutenberg.org/files/19163/19163-0.txt", "https://www.gutenberg.org/files/30883/30883-0.txt", "https://www.gutenberg.org/files/52556/52556-0.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssFrenchWordArray():
links = ["http://www.gutenberg.org/cache/epub/39331/pg39331.txt", "http://www.gutenberg.org/cache/epub/44054/pg44054.txt", "https://www.gutenberg.org/files/33378/33378-0.txt", "https://www.gutenberg.org/files/27566/27566-0.txt", "https://www.gutenberg.org/files/36460/36460-0.txt", "https://www.gutenberg.org/files/49619/49619-0.txt", "http://www.gutenberg.org/cache/epub/5781/pg5781.txt", "http://www.gutenberg.org/cache/epub/23444/pg23444.txt", "https://www.gutenberg.org/files/26376/26376-0.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssSpanishWordArray():
links = ["http://www.gutenberg.org/cache/epub/39947/pg39947.txt", "http://www.gutenberg.org/cache/epub/46279/pg46279.txt", "http://www.gutenberg.org/cache/epub/16109/pg16109.txt", "http://www.gutenberg.org/cache/epub/13458/pg13458.txt", "https://www.gutenberg.org/files/41842/41842-0.txt", "http://www.gutenberg.org/cache/epub/29731/pg29731.txt", "http://www.gutenberg.org/cache/epub/28978/pg28978.txt", "http://www.gutenberg.org/cache/epub/44584/pg44584.txt", "http://www.gutenberg.org/cache/epub/26508/pg26508.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssShakespeareArray():
links = ["http://www.gutenberg.org/cache/epub/5137/pg5137.txt", "https://www.gutenberg.org/files/3875/3875-0.txt", "https://www.gutenberg.org/files/46440/46440-0.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssDickensArray():
links = ["http://www.gutenberg.org/cache/epub/1023/pg1023.txt", "http://www.gutenberg.org/cache/epub/19337/pg19337.txt", "https://www.gutenberg.org/files/766/766-0.txt", "http://www.gutenberg.org/cache/epub/730/pg730.txt", "https://www.gutenberg.org/files/1400/1400-0.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def bigAssFitzgeraldArray():
links = ["http://www.gutenberg.org/cache/epub/9830/pg9830.txt", "http://www.gutenberg.org/cache/epub/4368/pg4368.txt", "http://www.gutenberg.org/cache/epub/6695/pg6695.txt", "https://www.gutenberg.org/files/805/805-0.txt"]
hugeArray = []
for element in links:
text = findText(element)
hugeArray += stringSplitter(text)
return hugeArray
def plotZipf(arr):
charsArr = [element[0] for element in arr]
intsArr = [element[1] for element in arr]
ranksArr = [i for i in range(len(arr))]
plt.plot(ranksArr, intsArr)
plt.xticks(ranksArr, charsArr)
plt.show()
return 0
#plotZipf(zipfsLaw1(bigAssEnglishWordArray()))
zipfsLaw3(bigAssEnglishWordArray())
|
import csv
import matplotlib.pyplot as plt
def get_legend_from_file_path(file_path):
return file_path.split(" ")
def graph_x_and_y(x, y, legend):
plt.plot(x, y, label=legend)
def plotgraph():
plt.xlabel("Date")
plt.ylabel("Cases")
plt.xticks(rotation=90)
plt.title("Covid Cases in New Jersey")
plt.legend(loc="upper right")
plt.grid()
plt.show()
def plotgraph2():
plt.xlabel("Date")
plt.ylabel("Cases")
plt.xticks(rotation=90)
plt.title("Effect of COVID in New Jersey Hospitals")
plt.legend(loc="upper right")
plt.grid()
plt.show()
def main():
csv_file_path = "C:/Users/nitin/Desktop/cs110/new-jersey-history.csv"
legend = get_legend_from_file_path(csv_file_path)
date = []
death = []
positive = []
recovered = []
negative = []
hospitalizedatm = []
icuatm = []
ventilatoratm = []
with open(csv_file_path) as csv_file:
row_list = csv.reader(csv_file)
for row_index, row in enumerate(row_list):
if row_index != 0:
date.append(row[0])
death.append(float(row[3]))
positive.append(float(row[20]))
recovered.append(float(row[29]))
negative.append(float(row[13]))
ventilatoratm.append(float(row[19]))
hospitalizedatm.append(float(row[9]))
icuatm.append(float(row[12]))
active = [(positive[i] - death[i] - recovered[i]) for i in range(len(date))]
print(" A graph will pop up. After viewing please close it to view the next graph")
graph_x_and_y(date, recovered, "recovered cases")
graph_x_and_y(date, death, "death cases")
graph_x_and_y(date, active, "active cases")
plotgraph()
graph_x_and_y(date, hospitalizedatm, "corona patients in hospital")
graph_x_and_y(date, icuatm, "corona patients in ICU")
graph_x_and_y(date, ventilatoratm, "corona patients using ventilators")
plotgraph2()
main() |
#!/usr/bin/env python
#Bao Dang
#Assignment 2
#Convert preorder to postorder
def preorder_postorder(String):
L = list(String)
s = []
Operators = ['+','-','*','/']
for i in range(len(L)-1, -1, -2):
if L[i] in Operators:
op1 = s.pop()
op2 = s.pop()
temp =op1+" "+op2+" "+L[i]
s.append(temp)
else:
s.append(L[i])
return s[-1]
#Convert postorder to preorder
def postorder_preorder(String):
L = list(String)
s = []
Operators = ['+','-','*','/']
for i in range(0,len(L),2):
if L[i] in Operators:
op1 = s.pop()
op2 = s.pop()
temp = L[i]+" "+op2+ " "+op1
s.append(temp)
else:
s.append(L[i])
return s[-1]
#Convert preorder to inorder
def preorder_inorder(String):
L = list(String)
s = []
Operators = ['+','-','*','/']
for i in range(len(L)-1,-1,-2):
if L[i] in Operators:
op1 = s.pop()
op2 = s.pop()
temp = op1+" "+L[i]+" "+op2
s.append(temp)
else:
s.append(L[i])
return s[-1]
if __name__ == "__main__":
print "Testing expression(in order): 6 / 2 - 4 - 3 * 1 + 2 * 2 * 1"
print "Testing Preorder Listing into Postorder Listing"
test = "* - / 6 2 - 4 3 * + 1 2 * 2 1"
print preorder_postorder(test)
print "Testing Postorder Listing into Preorder Listing"
test = "6 2 / 4 3 - - 1 2 + 2 1 * * *"
print postorder_preorder(test)
print "Testing Preorder Listing into Inorder Listing"
test = "* - / 6 2 - 4 3 * + 1 2 * 2 1"
print preorder_inorder(test)
|
import json
file_handle = open("app_data.json", "r")
content = file_handle.read()
file_handle.close()
# muutetaan JSON-tieto dict
city = json.loads(content)
print(city)
print(city["name"])
print(city["population"])
print(city["county"])
|
import boto3
from fabric import task
@task
def deploy(cli):
key_id = input('AWS access key id? ')
key = input('AWS secret access key? ')
region = input('AWS default region? ')
registry = input('ECR registry (without your repo name)? ')
scm_id = input('SCM secret_id for db? ')
cli.run('mkdir -p ~/.simple2do/nginx/conf.d')
cli.run('mkdir -p ~/.simple2do/nginx/data/certbot')
cli.put('env_file', '.simple2do')
cli.run(f'echo AWS_ACCESS_KEY_ID={key_id} >> .simple2do/env_file')
cli.run(f'echo AWS_SECRET_ACCESS_KEY={key} >> .simple2do/env_file')
cli.run(f'echo AWS_DEFAULT_REGION={region} >> .simple2do/env_file')
cli.run(f'echo TODO_SCM_SECRET_ID={scm_id} >> .simple2do/env_file')
cli.put('nginx/conf.d/default.conf', '.simple2do/nginx/conf.d')
cli.put('docker-compose.yml', '.')
cli.run(f'export DOCKER_REGISTRY={registry}')
cli.run('docker-compose stop web && docker-compose up -d')
cli.run('rm ./docker-compose.yml ~/.simple2do/env_file')
|
from __future__ import annotations
import zipfile
import tarfile
import typing as T
from pathlib import Path
import tempfile
try:
import zstandard
except ImportError:
zstandard = None # type: ignore
Pathlike = T.Union[str, Path]
def extract_zst(archive: Pathlike, out_path: Pathlike):
"""extract .zst file
works on Windows, Linux, MacOS, etc.
Parameters
----------
archive: pathlib.Path or str
.zst file to extract
out_path: pathlib.Path or str
directory to extract files and directories to
"""
if zstandard is None:
raise ImportError("pip install zstandard")
archive = Path(archive).expanduser().resolve()
out_path = Path(out_path).expanduser().resolve()
# need .resolve() in case intermediate relative dir doesn't exist
dctx = zstandard.ZstdDecompressor()
with tempfile.TemporaryFile(suffix=".tar") as ofh:
with archive.open("rb") as ifh:
dctx.copy_stream(ifh, ofh)
ofh.seek(0)
with tarfile.open(fileobj=ofh) as z:
z.extractall(out_path)
def extract_zip(archive: Pathlike, outpath: Pathlike):
outpath = Path(outpath).expanduser().resolve()
# need .resolve() in case intermediate relative dir doesn't exist
archive = Path(archive).expanduser().resolve()
with zipfile.ZipFile(archive) as z:
z.extractall(outpath)
def extract_tar(archive: Pathlike, outpath: Pathlike):
outpath = Path(outpath).expanduser().resolve()
# need .resolve() in case intermediate relative dir doesn't exist
archive = Path(archive).expanduser().resolve()
if not archive.is_file():
# tarfile gives confusing error on missing file
raise FileNotFoundError(archive)
try:
with tarfile.open(archive) as z:
z.extractall(outpath)
except tarfile.TarError as e:
raise RuntimeError(
f"""failed to extract {archive} with error {e}.
This file may be corrupt or system libz may be broken.
Try deleting {archive} or manually extracting it."""
)
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/pi/tanis/CodeBase/ros/src/angela/msg/motormsg.msg"
services_str = ""
pkg_name = "angela"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "angela;/home/pi/tanis/CodeBase/ros/src/angela/msg;std_msgs;/opt/ros/lunar/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/lunar/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
# Create your models here.
class Profile (models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
about = models.TextField(max_length=400)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save() |
"""
Point items.
@author: Jason Cohen
@author: Shaun Hamelin-Owens
@author: Sasithra Thanabalan
@author: Andrew Walker
"""
# Imports
from GameItem import GameItem
from display import DrawingGenerics
class PointItem(GameItem):
"""
PointItem class.
This class contains the methods used in the creation of any point item.
It is of the tkinter library and inherits an instance of GameItem.
"""
def __init__(self, gameCanvas, specs, points):
"""
Initialization.
This method creates a new Point item with xCenter, yCenter and radius attributes,
and initializes it.
@param gameCanvas: The game tk canvas object (on which the game is drawn)
@param specs: Specifies the coordinates, radius, color, tag and points associated
with this dot.
@param points: Amount of points assigned to this instance.
"""
# Declare the attributes of the point.
self.points = points
self.xCenter = specs['xCenter']
self.yCenter = specs['yCenter']
radius = specs['radius']
# Initialize this instance of the point class.
super(PointItem, self).__init__(gameCanvas,
self.xCenter - radius,
self.yCenter - radius,
self.xCenter + radius,
self.yCenter + radius,
specs['color'],
specs['tag'])
def setPoints(self, points):
"""
Set Points.
Takes points values and assigns the points attribute of this instance to it.
@param points: Int value of the points to be assigned to this point item.
"""
self.points = points;
def getPoints(self):
"""
Get Points.
Returns the points attributed to this particular instance of a point item.
@return: points assigned to this item.
"""
return self.points;
def draw(self):
"""
Draw.
Draws the point on the tkinter Canvas of the game.
"""
self.canvasID = self.gameCanvas.create_oval(self.xLeft, self.yTop,
self.xRight, self.yBottom, fill = self.color, tags = self.tagType)
def eat(self):
"""
Eat.
Deletes the drawing of this particular point item from the tkinter Canvas and
returns the most recent points values.
@return: points assigned to this item.
"""
self.deleteDrawing()
return self.points
def inTile(self):
"""
In Tile.
Takes the top left coordinates and top right coordinates of the item, finds its
center, and returns the tile in which that center is located in.
@return: points assigned to this item.
"""
xTile = int(self.xCenter / DrawingGenerics.TILE_SIZE)
yTile = int(self.yCenter / DrawingGenerics.TILE_SIZE)
return xTile, yTile
|
a=str(input("請輸入字串:"))
b=len(a)
print("There are "+str(b)+" characters") |
"""
-------------------------------------------------------------------------------
| Copyright 2016 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
------------------------------------------------------------------------------
"""
# dlaPublish.py - Publish one source to a target
# ----------------------------------------------------------------------------------------------------------------------
'''
This script is called by both Append Data and Replace Data tools. It has several options for running
the tool using as a Geoprocessing script directly or by callng dlaPublish.publish from another script.
Note in the GP script approach a source and target dataset can be provided as parameters to override the settings
in the Xml Config file. In this case just a single xml file should be passed with the datasets as the 2nd and 3rd
parameters. By default this will use the Append approach, to use replace by settings you can also make the
useReplaceSettings variable to change the behavior (see example at the end of this script).
'''
import arcpy,dlaExtractLayerToGDB,dlaFieldCalculator,dlaService,dla,dlaService,xml.dom.minidom,os
arcpy.AddMessage("Data Assistant")
xmlFileNames = arcpy.GetParameterAsText(0) # xml file name as a parameter, multiple values separated by ;
_outParam = 1
_useReplaceSettings = False # change this from a calling script to make this script replace data.
_chunkSize = 100
def main(argv = None):
# this approach makes it easier to call publish from other python scripts with using GP tool method
publish(xmlFileNames)
def publish(xmlFileNames):
# function called from main or from another script, performs the data update processing
global _useReplaceSettings
dla._errCount = 0
arcpy.SetProgressor("default","Data Assistant")
arcpy.SetProgressorLabel("Data Assistant")
xmlFiles = xmlFileNames.split(";")
layers = []
for xmlFile in xmlFiles: # multi value parameter, loop for each file
xmlFile = dla.getXmlDocName(xmlFile)
dla.addMessage("Configuration file: " + xmlFile)
xmlDoc = dla.getXmlDoc(xmlFile) # parse the xml document
if xmlDoc == None:
return
prj = dla.setProject(xmlFile,dla.getNodeValue(xmlDoc,"Project"))
if prj == None:
dla.addError("Unable to open your project, please ensure it is in the same folder as your current project or your Config file")
return False
source = dla.getDatasetPath(xmlDoc,"Source")
target = dla.getDatasetPath(xmlDoc,"Target")
targetName = dla.getDatasetName(target)
dla.addMessage(source)
dla.addMessage(target)
if dlaService.checkLayerIsService(source) or dlaService.checkLayerIsService(target):
token = dlaService.getSigninToken() # when signed in get the token and use this. Will be requested many times during the publish
# exit here before doing other things if not signed in
if token == None:
dla.addError("User must be signed in for this tool to work with services")
return False
expr = getWhereClause(xmlDoc)
if _useReplaceSettings == True and (expr == '' or expr == None):
dla.addError("There must be an expression for replacing by field value, current value = " + str(expr))
return False
errs = False
if dlaService.validateSourceUrl(source) == False:
dla.addError("Source path does not appear to be a valid feature layer")
errs = True
if _useReplaceSettings == True:
if dlaService.validateTargetReplace(target) == False:
dla.addError("Target path does not have correct privileges")
errs = True
elif _useReplaceSettings == False:
if dlaService.validateTargetAppend(target) == False:
dla.addError("Target path does not have correct privileges")
errs = True
if errs:
return False
dla.setWorkspace()
if dla.isTable(source) or dla.isTable(target):
datasetType = 'Table'
else:
datasetType = 'FeatureClass'
if not dla.isStaged(xmlDoc):
res = dlaExtractLayerToGDB.extract(xmlFile,None,dla.workspace,source,target,datasetType)
if res != True:
table = dla.getTempTable(targetName)
msg = "Unable to export data, there is a lock on existing datasets or another unknown error"
if arcpy.TestSchemaLock(table) != True and arcpy.Exists(table) == True:
msg = "Unable to export data, there is a lock on the intermediate feature class: " + table
dla.addError(msg)
print(msg)
return
else:
res = dlaFieldCalculator.calculate(xmlFile,dla.workspace,targetName,False)
if res == True:
dlaTable = dla.getTempTable(targetName)
res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
else:
dla.addMessage('Data previously staged, will proceed using intermediate dataset')
dlaTable = dla.workspace + os.sep + dla.getStagingName(source,target)
res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
if res == True:
dla.removeStagingElement(xmlDoc)
xmlDoc.writexml(open(xmlFile, 'wt', encoding='utf-8'))
dla.addMessage('Staging element removed from config file')
arcpy.ResetProgressor()
if res == False:
err = "Data Assistant Update Failed, see messages for details"
dla.addError(err)
print(err)
else:
layers.append(target)
arcpy.SetParameter(_outParam,';'.join(layers))
def doPublish(xmlDoc,dlaTable,target,useReplaceSettings):
# either truncate and replace or replace by field value
# run locally or update agol
success = False
expr = ''
dlaTable = handleGeometryChanges(dlaTable,target)
if useReplaceSettings == True:
expr = getWhereClause(xmlDoc)
if useReplaceSettings == True and (expr == '' or expr == None):
dla.addError("There must be an expression for replacing by field value, current value = '" + str(expr) + "'")
return False
currGlobalIDs = arcpy.env.preserveGlobalIds
if dla.processGlobalIds(xmlDoc) and currGlobalIDs == False: # both datasets have globalids in the correct workspace types
arcpy.env.preserveGlobalIds = True
target = dla.getLayerPath(target)
if target.startswith("http") == True:
success = dlaService.doPublishHttp(dlaTable,target,expr,useReplaceSettings)
else:
# logic change - if not replace field settings then only append
if expr != '' and useReplaceSettings == True:
if dla.deleteRows(target,expr) == True:
success = dla.appendRows(dlaTable,target,expr)
else:
success = False
else:
success = dla.appendRows(dlaTable,target,'')
if currGlobalIDs != arcpy.env.preserveGlobalIds:
arcpy.env.preserveGlobalIds = currGlobalIDs
return success
def getWhereClause(xmlDoc):
# get the where clause using the xml document or return ''
repl = xmlDoc.getElementsByTagName("ReplaceBy")[0]
fieldName = dla.getNodeValue(repl,"FieldName")
operator = dla.getNodeValue(repl,"Operator")
value = dla.getNodeValue(repl,"Value")
expr = ''
type = getTargetType(xmlDoc,fieldName)
if fieldName != '' and fieldName != '(None)' and operator != "Where":
if type == 'String':
value = "'" + value + "'"
expr = fieldName + " " + operator + " " + value
elif operator == 'Where':
expr = value
else:
expr = '' # empty string by default
return expr
def getTargetType(xmlDoc,fname):
# get the target field type
for tfield in xmlDoc.getElementsByTagName('TargetField'):
nm = tfield.getAttribute("Name")
if nm == fname:
return tfield.getAttribute("Type")
def handleGeometryChanges(sourceDataset,target):
# simplfiy polygons
if dla.isTable(sourceDataset):
return sourceDataset
desc = arcpy.Describe(sourceDataset) # assuming local file gdb
dataset = sourceDataset
if desc.ShapeType == "Polygon" and (target.lower().startswith("http://") == True or target.lower().startswith("https://") == True):
dataset = simplifyPolygons(sourceDataset)
else:
dataset = sourceDataset
return dataset
def simplifyPolygons(sourceDataset):
# simplify polygons using approach developed by Chris Bus.
dla.addMessage("Simplifying (densifying) Geometry")
arcpy.Densify_edit(sourceDataset)
simplify = sourceDataset + '_simplified'
if arcpy.Exists(simplify):
arcpy.Delete_management(simplify)
if arcpy.Exists(simplify + '_Pnt'):
arcpy.Delete_management(simplify + '_Pnt')
arcpy.SimplifyPolygon_cartography(sourceDataset, simplify, "POINT_REMOVE", "1 Meters")
return simplify
if __name__ == "__main__":
main()
|
import pyglet
from pyglet import clock
'''
Ok, so the Tween stuff has been fixed.
What I need to figure out now is how to make Frank moving around look good.
I feel like I need to read the chapter again
Why does frank move around in a jerky fasion? I want a smooth move between points
'''
def ease_in_out_quad (t, b, c, d):
t = t / (d/2)
if (t < 1):
return c/2*t*t + b
t -= 1
return -c/2 * (t*(t-2) - 1) + b
def ease_in_quad(t, b, c, d):
td = t / d
return c*(td)*td + b
def ease_none (t, b, c, d):
return c*t/d + b
class Motion(pyglet.event.EventDispatcher):
def __init__(self, obj, prop, begin, duration, use_seconds, looping, name):
self.obj = obj
self.prop = prop
self.begin = begin
self.position = begin
self.duration = duration
self.use_seconds = use_seconds
self.name = name
self.debug_time = debug_time
self.time = 1
self.prev_position = None
self.prev_time = None
self.looping = False
self.clock = clock.Clock()
self.register_events()
def register_events(self):
self.register_event_type('on_motion_started')
self.register_event_type('on_motion_stopped')
self.register_event_type('on_motion_resumed')
self.register_event_type('on_motion_looped')
self.register_event_type('on_motion_finished')
self.register_event_type('on_motion_changed')
def on_motion_started(self, obj):
print "Got on_motion_started event for ", obj.name
def on_motion_stopped(self, obj):
print "Got on_motion_stopped event for ", obj.name
def on_motion_resumed(self, obj):
print "Got on_motion_resumed event for ", obj.name
def next_frame(self, dt):
if self.use_seconds:
#self.set_time
self.set_time(self.time + dt)
else:
#Not sure what I want to do for frames
pass
def prev_frame(self):
pass
def update(self):
self.set_position(self.get_position(self.time))
def set_time(self, t):
self.prev_time = self.time
if (t > self.duration):
if(self.looping):
self.rewind(t - self.duration)
self.dispatch_event('on_motion_looped', self)
else:
self.stop()
self.dispatch_event('on_motion_finished', self)
elif(t < 0):
self.rewind()
else:
self.time = t
self.update()
#Probably want to change this to self.set_time
def on_update(self, dt):
if self.time < self.duration:
print "Tick for object", self.name, "current time ", dt
else:
print "Finished for", self.name
self.clock.unschedule(self.on_update)
self.time += dt
def start(self):
#For now just assume that we are using seconds.
self.rewind()
pyglet.clock.schedule_interval(self.clock.tick, 1.0/60)
#self.clock.schedule_interval(self.on_update, 1.0)
self.clock.schedule_interval(self.next_frame, 1.0/60)
self.dispatch_event('on_motion_started', self)
def rewind(self, t=1):
self.time = t
self.fix_time()
def fix_time(self):
pass
def stop(self):
#Perhaps changes this to set_time
self.clock.unschedule(self.next_frame)
self.dispatch_event('on_motion_stopped', self)
def resume(self):
self.fix_time()
self.clock.schedule_interval(self.next_frame, 1.0)
self.dispatch_event('on_motion_resumed', self)
def fforward(self):
pass
def get_time(self):
return self.time
def to_string(self):
return "[motion prop= ", self.prop, " t= ", self.time, " pos= ", self.position, " ]"
def get_position(self, t):
pass
def set_position(self, p):
self.prev_position = self.position
self.position = p
setattr(self.obj, self.prop, self.position)
self.dispatch_event('on_motion_changed', self)
def get_prev_pos(self):
pass
def set_begin(self, b):
pass
def get_begin(self):
return self.begin
def set_duration(self, d):
if d is None or d <= 0:
self.duration = -1
else:
self.duration = d
def set_looping(self, b):
pass
def get_looping(self):
pass
def set_obj(self, obj):
self.obj = obj
def get_obj(self):
return self.obj
def set_prop(self, p):
self.prop = p
def get_prop(self):
return self.prop
def set_use_seconds(self, use_secs):
self.use_seconds = use_secs
def get_use_seconds(self):
return self.use_seconds
#def __init__(self, obj, prop, begin, duration, use_seconds, looping, name):
class Tween(Motion):
def __init__(self, obj, prop, func, begin, finish, duration, use_seconds, looping=False, name=None):
#super(Motion, self).__init__(*args, **kwargs)
self.obj = obj
self.func = func
self.prop = prop
self.begin = begin
self.finish = finish
self.duration = duration
self.position = begin
self.prev_position = None
self.change = None
self.use_seconds = use_seconds
self.name = name
self.start_time = None
self.time = None # May remove this later
self.looping = looping #Might need to change this
self.clock = clock.Clock()
self.register_events()
self.set_func(func)
self.set_finish(finish)
def get_position(self, t=None):
if(t == None):
t = self.time
position = self.func(t, self.begin, self.change, self.duration)
return position
def set_func(self, f):
self.func = f
def get_func(self):
return self.func
def set_change(self, c):
self.change = c
def get_change(self):
return self.change
def set_finish(self, f):
self.change = f - self.begin
def get_finish(self):
return self.begin + self.change
#window = pyglet.window.Window(width=800,height=600, resizable=True, visible=False)
#window.clear()
#window.set_visible(True)
#m = Motion(window, "something", 0, 12, 3, True, "Test1")
#m1 = Motion(window, "something", 0, 6, 0.5, False, "Test4")
#game_data = game.init()
#pyglet.sprite.Sprite(self.game_data['data']['map']['elements']['House01']['Wall6.png'], 0, 0, batch=self.object_batch)
#sprite = pyglet.sprite.Sprite(game_data['data']['agents']['Monster01']['animations']['Monster_Up1.png'], 0, 0)
#def on_draw(self):
# print "Draw"
#clock.schedule_interval(printPoo, 1.0)
#pyglet.app.run()
|
import dash_bootstrap_components as dbc
from dash import html
number_input = html.Div(
[
html.P("Type a number outside the range 0-10"),
dbc.Input(type="number", min=0, max=10, step=1),
],
id="styled-numeric-input",
)
|
class Graphs:
def __init__(self):
self.adjancy_list = {}
def addVertex(self,vertex):
if(self.adjancy_list.get(vertex) is not None):
return
self.adjancy_list[vertex]= []
def addEdge(self,first_vertex,second_vertex):
if(self.adjancy_list.get(first_vertex) is None or self.adjancy_list.get(second_vertex) is None):
return False
# checking to see if the edge already exists and if so, don't add it
if(self.adjancy_list[first_vertex].count(second_vertex)>0):
return False
if(first_vertex is second_vertex):
return False
self.adjancy_list[first_vertex].append(second_vertex)
self.adjancy_list[second_vertex].append(first_vertex)
return True
def printGraph(self):
for item in self.adjancy_list:
print(f"{item}:{self.adjancy_list[item]}")
def returnGraph(self):
return self.adjancy_list
def removeEdge(self, first_vertex, second_vertex):
if(self.adjancy_list.get(first_vertex) is None or self.adjancy_list.get(second_vertex) is None):
return False
if(first_vertex is second_vertex):
return False
self.adjancy_list[first_vertex].remove(second_vertex)
self.adjancy_list[second_vertex].remove(first_vertex)
return True
def removeVertex(self,vertex):
if (self.adjancy_list.get(vertex) is None):
return
for item in self.adjancy_list:
self.removeEdge(item,vertex)
self.adjancy_list.pop(vertex)
# graph = Graphs()
# graph.addVertex("A")
# graph.addVertex("B")
# graph.addVertex("C")
# graph.addEdge("A","B")
# graph.addEdge("A","C")
# graph.printGraph()
# print("Removing vertex")
# graph.removeVertex("A")
# graph.printGraph()
|
# coding: utf-8
# In[9]:
import lightgbm as lgb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[23]:
train = pd.read_csv('data/train.csv', index_col=0)
X = train.drop('target', axis=1)
y = train.target
# In[55]:
from sklearn.decomposition import PCA
pca = PCA(
copy=True, iterated_power=7, n_components=100,
random_state=None, svd_solver='auto', tol=0.0, whiten=False
)
X_pca = pca.fit_transform(X)
# In[60]:
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
X_pca = minmax.fit_transform(X)
y_log = np.log1p(y)
# In[61]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_pca, y_log, test_size=0.2, random_state=42
)
# In[62]:
def rmsle_metric(y_test, y_pred) :
assert len(y_test) == len(y_pred)
y_test = np.exp(y_test)-1
y_pred = np.exp(y_pred)-1
rmsle = np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_test))**2))
return ('RMSLE', rmsle, False)
# In[63]:
gbm = lgb.LGBMRegressor(
objective='regression',
num_leaves=31,
learning_rate=0.01,
n_estimators=1000
)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle_metric,
early_stopping_rounds=100
)
# In[64]:
y_pred = gbm.predict(X_test)
print(rmsle_metric(y_test, y_pred))
# In[66]:
from sklearn.externals import joblib
joblib.dump(gbm, 'LightGBM_log_y_1_427.pkl')
|
'''
1. check the list of infra habitations
2. check missing hh habitations if any
3. check hh numbers for all habs
'''
'''
input file: macro
field map:
'''
from work.models import ProgressQty, Site
import pandas as pd
from work.controller import getSite, getHabID
from work.controller import getSiteProgressdf
from consumers.models import Consumer
from django.db.models import Count, F, Q
from pprint import pprint
skip_rows = 5
HT = 'Weasel Conductor on Steel Tubular Poles'
LT1P = '1X35+1X25'
LT3P = '3X50+1X35'
NONE = 'None'
DTR25 = '25 KVA (3 Ph)'
DTR63 = '63 KVA (3 Ph)'
DTR100 = '100 KVA (3 Ph)'
columns = dict(sn=0,
block=1,
village=2,
census=3,
habitation=4,
bplv=6,
free_aplv=7,
aplv=8,
hhv=9,
offgrid=10,
htst=11,
htsv=12,
htwt=13,
htwv=14,
htrt=15,
htrv=16,
ltt1=17,
lt1v=18,
ltt2=19,
lt2v=20,
kvat1=21,
kva1v=22,
kvat2=23,
kva2v=24)
columnsnew = dict(sn=0,
block=1,
village=2,
census=3,
habitation=4,
bplv=6,
free_aplv=7,
aplv=8,
hhv=9,
offgrid=10,
htst=11,
htsv=12,
htwt=13,
htwv=14,
htrt=15,
htrv=16,
htrct=17,
htrcv=18,
htdt=19,
htdv=20,
ltt1=21,
lt1v=22,
ltt2=23,
lt2v=24,
kvat1=25,
kva1v=26,
kvat2=27,
kva2v=28,
# kvat3=29,
# kva3v=30,
# kvat4=31,
# kva4v=32,
)
ncol1 = 81 # new
ncol2 = 73 # new
# print(columns.values())
is_newformat = False
fd = 'closure_review'
f = input('Macro file: ')
# f = '/Users/ronsair/mspdcl/Closure submissions/submissionofclosuretemplatedatafortheexecutedquantit/Bishnupur_275_0.xlsm'
dfMacro = pd.read_excel(f, sheet_name='Proposed', engine='openpyxl')
district = str.upper(dfMacro.iloc[0, 1])
if(len(dfMacro[skip_rows:].columns) == ncol1):
is_newformat = True
columns = columnsnew
dfMacroSites = dfMacro[skip_rows:].iloc[:, list(columns.values())].fillna(0)
# print(df1)
dfMacroSites.columns = list(columns.keys())
# delete empty hab rows
dfNilHab = (dfMacroSites['habitation'] == 0) & (dfMacroSites['census'] == 0)
dfMacroSites = dfMacroSites.loc[~dfNilHab]
# check infra types
df = dfMacroSites
wrongHtt = [x for x in df['htst'] if x not in [NONE, 0]]
if(len(wrongHtt) > 0):
print(f'ERROR: HT Infra type out of scope')
wrongHtt = [x for x in df['htrt'] if x not in [NONE, 0]]
if(len(wrongHtt) > 0):
print(f'ERROR: HT Infra type out of scope')
wrongHtt = [x for x in df['htwt'] if x not in [HT, NONE, 0]]
if(len(wrongHtt) > 0):
print(f'ERROR: HT Infra Type not in {[HT, NONE]}')
wrongLTC1 = [x for x in df['ltt1'] if x not in [LT1P, LT3P, NONE, 0]]
if(len(wrongLTC1) > 0):
print(f'ERROR: LTC1 Infra Type', wrongLTC1)
wrongLTC2 = [x for x in df['ltt2'] if x not in [LT1P, LT3P, NONE, 0]]
if(len(wrongLTC2) > 0):
print(f'ERROR: LTC2 Infra Type')
for i, row in df.iterrows():
ht = row['htwv']
ltt1 = row['ltt1']
ltt2 = row['ltt2']
if((ltt1 == ltt2) and ltt1 not in [0, NONE]):
print('ERROR: duplicate LT Infra')
dtr1 = row['kvat1']
dtr2 = row['kvat2']
# dtr3 = row['kvat3']
# dtr4 = row['kvat4']
if((dtr1 == dtr2) and dtr1 not in [0, NONE]):
print('ERROR: duplicate DTR Infra')
macrohab_ids = []
divisions = []
flag = 1
for i, row in dfMacroSites.iterrows():
site, extra = getSite(census=row['census'], habitation=row['habitation'])
if(site):
macrohab_ids.append(site.hab_id)
divisions.append(site.division)
else:
hab_id = getHabID(census=row['census'], habitation=row['habitation'])
macrohab_ids.append(f'{flag}-{hab_id}')
divisions.append('-')
flag += 1
dfMacroSites['hab_id'] = macrohab_ids
dfMacroSites['division'] = divisions
vcols = [lbl for lbl in dfMacroSites.columns if lbl[-1] == 'v']
dfMacroSites['infraSum'] = dfMacroSites[vcols[4:]].sum(axis=1)
# dfMacroSites['qtySum'] = dfMacroSites[vcols].sum(axis=1)
dfProgressSites = getSiteProgressdf(district)
# print(type(dfProgressSites['census']))
dfProgressSites = dfProgressSites[dfProgressSites['census'].astype(
int) < 800000]
dfProgressSites.to_excel(f'{fd}/{district}_rural_prog_sites.xlsx')
certs = dfProgressSites.set_index('hab_id')['progressqty__cert']
dfMacroSites = dfMacroSites.join(certs, on='hab_id')
dfMacroSites.to_excel(f'{fd}/{district}_rural_macro.xlsx')
dfMacroWithInfra = dfMacroSites[dfMacroSites['infraSum'] > 0]
# dfMacroWithInfra = dfMacroWithInfra.join(certs, on='hab_id')
dfMacroWithInfra.to_excel(f'{fd}/{district}_rural_macroHabIdWithInfra.xlsx')
duplicateMacroSites = dfMacroSites.duplicated(subset=['hab_id'],keep=False)
dfduplicateMacroSites = dfMacroSites[duplicateMacroSites]
if(len(dfduplicateMacroSites) > 0):
dfduplicateMacroSites.sort_values(['hab_id'], inplace=True)
print('Duplicate sites in macros')
for i, row in dfduplicateMacroSites.iterrows():
print(round(row['infraSum'],2), row['hab_id'],row['census'], row['habitation'])
dfduplicateMacroSites.to_excel(f'{fd}/{district}_rural_duplicates.xlsx')
input('continue?')
missingProgressSites = set(dfProgressSites['hab_id']) - set(dfMacroWithInfra['hab_id'])
if(missingProgressSites):
print('missing progress sites')
dfmissingProgressSites = dfProgressSites[dfProgressSites['hab_id'].isin(missingProgressSites)]
print(dfmissingProgressSites.iloc[:,1:5])
dfmissingProgressSites.to_excel(f'{fd}/{district}_rural_missingProgressSites.xlsx')
input('continue?')
dfmacromissingcensus = pd.read_excel('missing_census_macro_habs.xlsx', engine='openpyxl')
pendingMacrocensus = set(dfmacromissingcensus['hab_id']).intersection(dfProgressSites['hab_id'])
if(pendingMacrocensus):
print('\npending macro census update')
print(pendingMacrocensus)
extraMacroInfraSites = set(dfMacroWithInfra['hab_id']) - set(dfProgressSites['hab_id'])
if(extraMacroInfraSites):
print('extraMacroInfraSites')
df = dfMacroWithInfra[dfMacroWithInfra['hab_id'].isin(extraMacroInfraSites)]
print(df.iloc[:,1:5])
df.to_excel(f'{fd}/{district}_rural_extraMacroInfraSites.xlsx')
input('continue?')
# 1: get all hh habs
cs = Consumer.objects.filter(
site__district=district).exclude(site__census__gt=800000)
# cshabs = cs.values('site__origin__hab_id','site__hab_id','site__census', 'site__habitation', )\
# .annotate(apl=Count(Q(F('habitation')=='APL')))
cshabs = cs.values('site__hab_id', 'site__village', 'site__census', 'site__habitation', )\
.annotate(
bplv=Count('apl_bpl', filter=Q(apl_bpl='BPL')),
aplv=Count('apl_bpl', filter=Q(apl_bpl='APL')))
# 2: are all habs present in macro?
dfcshabs = pd.DataFrame(cshabs)
dfcshabs['hhv'] = dfcshabs['aplv'] + dfcshabs['bplv']
dfcshabs.to_excel(f'{fd}/{district}_rural_cshabs.xlsx')
# sitesToAdd = [x for i, x in dfcshabs.iterrows() if ((x['site__hab_id'] not in dfMacroSites['hab_id'].values) and (
# (x['site__origin__hab_id'] == None or x['site__origin__hab_id'] not in dfMacroSites['hab_id'].values)))]
csitesToAdd = set(dfcshabs['site__hab_id']) - set(dfMacroSites['hab_id'])
if(csitesToAdd):
dfcsitesToAdd = dfcshabs[dfcshabs['site__hab_id'].isin(csitesToAdd)]
dfcsitesToAdd.sort_values('site__census', inplace=True)
print('\n*** Missing Consumer Sites in Macro')
dfcsitesToAdd.to_excel(f'{fd}/{district}_rural_sites_to_add_macro.xlsx')
print(dfcsitesToAdd[['site__village','site__census','site__habitation']])
input('continue?')
# s_cs_count = dfcsi
dfcscounts = dfcshabs.groupby('site__hab_id').sum()
dfMacroSites = dfMacroSites.set_index('hab_id')
print(f"total hh (macro): {sum(dfMacroSites['hhv'])}")
print(f"total hh (report): {sum(dfcshabs['hhv'])}")
dfMacroSites[['hhv', 'aplv', 'bplv']] = 0
dfMacroSites.update(dfcscounts)
# dfMacroSites['qtySum'] = dfMacroSites[vcols].sum(axis=1)
print(f"total hh (update): {sum(dfMacroSites['hhv'])}")
dfMacroSites.to_excel(f'{fd}/{district}_rural_data.xlsx')
# print(f"total hh (macro): {sum(dfMacroSites['hhv'])}")
# print(f"total hh (report): {sum(dfcshabs['hhv'])}")
formatteddata = []
for i, row in dfMacroSites.iterrows():
ht = row['htwv'] + row['htsv'] + row['htrv']
ltt1 = row['ltt1']
ltt2 = row['ltt2']
lt1 = 0.0
lt3 = 0.0
if(ltt1 == LT1P):
lt1 += row['lt1v']
if(ltt1 == LT3P):
lt3 += row['lt1v']
if(ltt2 == LT1P):
lt1 += row['lt2v']
if(ltt2 == LT3P):
lt3 += row['lt2v']
dtr1 = row['kvat1']
dtr2 = row['kvat2']
dtr25 = 0
dtr63 = 0
dtr100 = 0
if(dtr1 == DTR25):
dtr25 += row['kva1v']
if(dtr1 == DTR63):
dtr63 += row['kva1v']
if(dtr1 == DTR100):
dtr100 += row['kva1v']
if(dtr2 == DTR25):
dtr25 += row['kva2v']
if(dtr2 == DTR63):
dtr63 += row['kva2v']
if(dtr2 == DTR100):
dtr100 += row['kva2v']
hh = row['hhv']
offgrid = row['offgrid']
rec = {}
if(hh > 0 or row['infraSum'] > 0 or offgrid>0):
rec = {
'block': row['block'],
'village': row['village'],
'census': row['census'],
'habitation': row['habitation'],
'hhv': row['hhv'], HT: ht, LT3P: lt3, LT1P: lt1,
DTR100: dtr100, DTR63: dtr63, DTR25: dtr25,
'infra': row['infraSum'] > 0,
'hab_id': i
}
formatteddata.append(rec)
dfFormatted = pd.DataFrame(formatteddata)
dfFormatted = dfFormatted.join(certs, on='hab_id')
dfFormatted.to_excel(f'{fd}/{district}_rural_formatted.xlsx')
sExecutedSum = dfFormatted[['hhv', HT, LT3P, LT1P, DTR100, DTR63, DTR25]].sum()
sExecutedSum['infra_habs'] = len(dfFormatted[dfFormatted['infra']])
sExecutedSum['total_habs'] = len(dfFormatted)
sExecutedSum.name = 'executed'
pqty_maps = {'ht': HT, 'lt_3p': LT3P, 'lt_1p': LT1P,
'dtr_100': DTR100, 'dtr_63': DTR63, 'dtr_25': DTR25}
pfields = {'progressqty__'+f: pqty_maps[f] for f in pqty_maps}
# if(len(missingProgressSites) > 0):
# sMissingSum = dfmissingProgressSites[list(pfields.keys())].sum()
# sMissingSum.index = [pfields[x] for x in sMissingSum.index]
# sScopeSum = sMissingSum + sExecutedSum
# else:
# sScopeSum = sExecutedSum.copy()
# sScopeSum['hhv'] = sum(dfcshabs['hhv'])
# sScopeSum['infra_habs'] = len(dfProgressSites)
# sScopeSum['total_habs'] = len(dfFormatted)
# sScopeSum.name = 'Scope'
# dfExecutedSum = pd.DataFrame([sExecutedSum, sScopeSum])
sExecutedSum.to_excel(f'{fd}/{district}_rural_summary.xlsx')
s_report_psum = pd.Series(
{pfields[f]: dfProgressSites[f].sum() for f in pfields}, name='report_sum')
s_report_psum['infra_habs'] = len(dfProgressSites)
s_report_psum['total_habs'] = '--'
s_report_psum['hhv'] = '--'
print(pd.DataFrame([s_report_psum, sExecutedSum]).transpose())
ifUpdate = input('Update? (Y)')
if(ifUpdate == 'Y'):
alreadycanceled = []
canceled = []
updated = []
ps = ProgressQty.objects.filter(
site__district=district, site__census__lt=800000)
dff = dfFormatted.set_index('hab_id')
dff = dff[dff['infra']>0]
for p in ps:
if(p.site.hab_id in dff.index):
s = dff.loc[p.site.hab_id]
# print('updating', p.site.hab_id)
p.ht = s[HT]
p.lt_1p = s[LT1P]
p.lt_3p = s[LT3P]
p.dtr_25 = s[DTR25]
p.dtr_63 = s[DTR63]
p.dtr_100 = s[DTR100]
p.status = 'completed'
updated.append(p)
else:
if(p.status == 'canceled'):
alreadycanceled.append([p, p.cert])
else:
if(p.site.hab_id in pendingMacrocensus):
continue
else:
# print('canceling', p.site.hab_id)
p.remark = 'canceled in June 2021'
p.status = 'canceled'
canceled.append([p, p.cert])
p.save()
pprint('')
pprint('updated')
pprint(updated)
pprint('')
pprint('canceled')
pprint(canceled)
pprint('')
pprint('already canceled')
pprint(alreadycanceled) |
# -*- coding: utf-8 -*-
#a=[]
#for i in range(5):
# a.append(eval(input()))
#
#sum=0.0
#for j in range(5):
# sum=sum+a[j]
#aver=sum/len(a)
#print(a[0],a[1],a[2],a[3],a[4])
#print("Sum =",sum)
#print("Average =",aver)
a=eval(input())
b=eval(input())
c=eval(input())
d=eval(input())
e=eval(input())
sum=a+b+c+d+e
aver=sum/5
print(a,b,c,d,e)
print("Sum = {:.1f}".format(sum))
print("Average = {:.1f}".format(aver))
#a=[]
#for i in range(1,6):
# a.append(eval(input()))
#
#sum=0
#for i in a:
# sum=sum+i
#
#aver=sum/len(a)
#
#for i in a:
# print(i,end=" ")
#print("")
#print("Sum = {:.1f}".format(sum))
#print("Average = {:.1f}".format(aver))
#a=[]
#for i in range(1,6):
# a.append(eval(input()))
#
#sum=0.0
#for i in range(5):
# sum=sum+a[i]
#aver=sum/len(a)
#for i in a:
# print("{:d} ".format(i),end="")
#print("")
#print("Sum =",sum)
#print("Average =",aver) |
import os
import commands
cmd ='''curl -u root:Dis@init3 http://35.237.28.200/remote.php/dav/files/root/ -X PROPFIND --data '<?xml version="1.0" encoding="UTF-8"?><d:propfind xmlns:d="DAV:"><d:prop xmlns:oc="http://owncloud.org/ns"><d:getcontenttype/><oc:permissions/></d:prop></d:propfind>' '''
status,output = commands.getstatusoutput(cmd)
print status,output
|
import pkg_resources
pkg_resources.require("matplotlib==1.4.0")
from pandas import *
from ggplot import *
import pprint
import csv
import itertools
import ggplot as gg
import numpy as np
import pandas as pd
from datetime import datetime, date, time
import matplotlib.pyplot as plt
turnstile_weather=pandas.read_csv("C:/move - bwlee/Data Analysis/Nano/\
Intro to Data Science/project/code/turnstile_data_master_with_weather.csv")
plot=ggplot(turnstile_weather,aes(x='ENTRIESn_hourly',y='EXITSn_hourly',color='Hour')) \
+ geom_point() \
+ scale_color_brewer(type='diverging', palette=4) \
+ xlab("Entries") \
+ ylab("Exits")\
+ ggtitle("Entries vs Exists by hour")
#print plot
df = DataFrame({"rain": turnstile_weather[turnstile_weather['rain']==1]['ENTRIESn_hourly'], \
"no_rain": turnstile_weather[turnstile_weather['rain'] == 0]['ENTRIESn_hourly']}).fillna(0)
df = melt(df)
plot = ggplot(aes(x='value', color='variable'), data=df) \
+ geom_histogram(binwidth=400) \
+ scale_y_log() \
+ ylab("Frequency") \
+ xlab("Entries Per Hour")\
+ ggtitle("Entries Per Hour vs Frequency")
#print plot
df = DataFrame({"rain": turnstile_weather[turnstile_weather['rain']==1]['ENTRIESn_hourly'], \
"no_rain": turnstile_weather[turnstile_weather['rain'] == 0]['ENTRIESn_hourly']})
df.to_csv('dump1.csv')
df = melt(df)
df.to_csv('dump2.csv')
#print df
plot=ggplot(aes(x='value',fill='variable'),data=df) \
+geom_histogram(binwidth=1000) \
+scale_y_log() \
+ylab("Frequency of Log 10 scale") \
+xlab("Number of Entries")\
+ggtitle("Frequency of Hourly Entry, red=No rain, blue=rain")
print plot |
#!/usr/bin/env python
'''
## Course Project
'''
import sys
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
import graph_properties as gp
import networkx as nx
import pycxsimulator
import update_graph as ug
import metric as met
import data_store_ops as ds
# --------------------------------------------
# Adjustable Variables/Properties
# --------------------------------------------
SAVE_TO_PATH = 'data/test_'
NODES = 100
EDGES = 500
DEFAULT_EDGE_WEIGHT = 0.05
DISPLAY_GRAPH = True
GRAPH_NAME = "ssie-523-complex-modeling"
VERBOSE = True # print final graph to stdout
STATS = True
SAVE = True # save as gml and/or graphml
NUM_ITERATIONS = 500 # stop after N iterations NOT IMPLEMENTED FOR GUI
SHOW_NODE_LABELS = True # show node labels
# --------------------------------------------
# Adjustable Update parameters
# --------------------------------------------
# each node is regarding cultural difference.
alpha = 1 # diffusion constant
beta = 2 # 6 rate of adaptive edge weight change
gamma = 3 # 6 pickiness of nodes
Dt = 0.01 # Delta t
def set_parameters():
gp.set_alpha(alpha) # used - read in update_graph.update_diffusion
gp.set_beta(beta) # used - read in update_graph.update_diffusion
gp.set_gamma(gamma) # used - read in update_graph.update_diffusion
gp.set_Dt(Dt) # used - read in update_graph.update_diffusion
gp.set_default_edge_weight(DEFAULT_EDGE_WEIGHT)
# --------------------------------------------
# statistics
# --------------------------------------------
def stats(g, show_results=False):
print('degree histogram:')
print(nx.degree_histogram(g))
print('closeness centrality:')
print(nx.closeness_centrality(g))
print("method property assortivity")
print(nx.attribute_assortativity_coefficient(g, gp.METHOD))
print("Density")
print(met.density(g))
print("clustering coefficient")
print(met.get_clustering_coefficient(g))
if show_results:
met.get_hist(g)
# print("degree histogram")
# d = nx.degree(g)
# hist(d.values(), bins=15)
# print(hist(d.values))
# show()
# --------------------------------------------
# Diffusion model
# --------------------------------------------
def initialization_method(n,m):
if True: # create a random graph
g = gp.random_graph(n,m)
else: # create graph with these properties
g = nx.Graph()
g = gp.add_xnodes(g, 10, gp.AGILE, 5, 1, gp.PEAK) # graph numberOfNodes method risk reliability hype
g = gp.add_xnodes(g, 10, gp.AGILE, 5, 1, gp.PEAK) # graph numberOfNodes method risk reliability hype
g = gp.add_xnodes(g, 10, gp.WATERFALL, 2, 2, gp.PLATEAU) # graph numberOfNodes method risk reliability hype
g = gp.add_xnodes(g, 10, gp.WATERFALL, 2, 2, gp.PLATEAU)
for u,v in g.number_of_nodes/2:
nx.add_edge
for i, j in g.edges_iter():
simil = gp.node_similarity(g, i, j)
simil *= random() * 10
print(simil)
gp.set_similarity(g, i, j, simil)
# gp.set_similarity(g, i, j, 0.5)
print(g)
stats(g, False)
return g
def initialize():
# diffusion model
# using setters and getters rather than globals
g = initialization_method(gp.get_node_count(), gp.get_edge_count())
g.pos = nx.spring_layout(g)
gp.set_graph(g)
print(g.nodes(data=True))
print(g.edges(data=True))
gp.set_Next_graph(g.copy())
if SAVE:
ds.save_graph(g, "data/original1_")
def observe():
observe_diffusion()
def observe_diffusion():
g = gp.get_graph()
cla()
nx.draw(g, cmap = cm.division, vmin = 0, vmax = 10,
node_color = [gp.col(a) for a in g.nodes_iter()],
with_labels = SHOW_NODE_LABELS,
edge_cmap = cm.binary, edge_vmin = 0, edge_vmax = 1,
edge_color = [g.edge[i][j]['weight'] for i, j in g.edges_iter()],
pos = g.pos)
def update():
ug.update_method() # moved to filename: update_graph.py (so it can be worked on individually)
# --------------------------------------------
# Main
# --------------------------------------------
def main(args):
set_parameters()
# display the graph
if DISPLAY_GRAPH:
gp.set_node_count(NODES)
gp.set_edge_count(EDGES)
pycxsimulator.GUI().start(func=[initialize, observe, update])
g = gp.get_graph()
g.name = GRAPH_NAME
if VERBOSE:
ds.print_graph(g)
else: # run in background
gp.set_node_count(NODES)
gp.set_edge_count(EDGES)
initialize()
g = gp.get_graph()
try:
for i in xrange(NUM_ITERATIONS):
observe()
update()
g = gp.get_graph()
g.name = GRAPH_NAME
if VERBOSE:
ds.print_graph(g)
except KeyboardInterrupt:
g = gp.get_graph()
g.name = GRAPH_NAME
ds.save_graphml(g, SAVE_TO_PATH)
ds.save_graph(g, SAVE_TO_PATH)
if STATS:
g = gp.get_graph()
stats(g, True)
if SAVE:
ds.savetxt(ds.get_unique_fn(SAVE_TO_PATH), nx.degree_histogram(g))
if SAVE:
# store the graph to a gml file
ds.save_graphml(g, SAVE_TO_PATH)
ds.save_graph(g, SAVE_TO_PATH)
# this starts it, chech for command line arguments, and call the main method.
if __name__ == '__main__':
main(None)
|
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField,SelectField
from wtforms.validators import DataRequired,URL
from flask_wtf.file import FileAllowed,FileField
from project.utils import *
class addeventsform(FlaskForm):
def get_all_main_category(dct):
all_main_category = []
for key in dct.keys():
all_main_category.append(key)
return all_main_category
def prepare_choices_main_category(event_dictionary):
lst = []
for categ in event_dictionary:
tup = (categ, categ)
lst.append(tup)
return lst
def prepare_choices_sub_category(event_dictionary):
lst = []
for categ in event_dictionary:
for subcat in event_dictionary[categ]:
lst.append((subcat,subcat))
return lst
def return_event_category_pkl():
pkl_path = 'project/data/events_category/events_category.pkl'
return load_pickle(pkl_path)
eventname=StringField(label='event name',validators=[DataRequired()])
event_main_category = SelectField(label='event main category',choices=prepare_choices_main_category(return_event_category_pkl()))
eventcategory=SelectField(label='event category',choices=prepare_choices_sub_category(return_event_category_pkl()))
event_date=StringField('event date')
event_description=StringField(label='event description',validators=[DataRequired()])
image_file=FileField(label='event poster',validators=[FileAllowed(['jpg','png'])])
register_link = StringField(label='registration link',validators=[URL()])
submit = SubmitField(label='Add Event')
class modifyeventsform(FlaskForm):
def get_all_main_category(dct):
all_main_category = []
for key in dct.keys():
all_main_category.append(key)
return all_main_category
def prepare_choices_main_category(event_dictionary):
lst = []
for categ in event_dictionary:
tup = (categ, categ)
lst.append(tup)
return lst
def prepare_choices_sub_category(event_dictionary):
lst = []
for categ in event_dictionary:
for subcat in event_dictionary[categ]:
lst.append((subcat,subcat))
return lst
def return_event_category_pkl():
pkl_path = 'project/data/events_category/events_category.pkl'
return load_pickle(pkl_path)
eventname=StringField(label='event name',validators=[DataRequired()])
eventcategory=SelectField(label='event category',choices=prepare_choices_sub_category(return_event_category_pkl()))
event_date=StringField('event date')
event_description=StringField(label='event description',validators=[DataRequired()])
image_file=FileField(label='event poster',validators=[FileAllowed(['jpg','png'])])
register_link = StringField(label='registration link',validators=[URL()])
submit = SubmitField(label='modify Event') |
import pickle
from sympy import sympify
class Conjecture:
def __init__(self, target, inequality, expression, family):
self.target = target
self.inequality = inequality
self.expression = expression.split()
self.family_name = family
self.family = pickle.load(open(family, 'rb'))
def get_inequality(self):
if self.inequality == 'upper':
return ' <= '
elif self.inequality == 'lower':
return ' >= '
else:
print('ERROR: Inequality not detected')
def get_expression(self):
s = ''
for string in self.expression:
s+= string
s+= ' '
return s
def get_string(self):
return f'{self.target} {self.get_inequality()} {sympify(self.get_expression())}'
def __str__(self):
if self.family_name == 'small_connected':
return f'If G is a connected graph, then {self.get_string()}'
elif self.family_name == 'tree':
return f'If G is a tree, then {self.get_string()}'
else:
return f'If G is a connected and {self.family_name} graph, then {self.get_string()}'
def target_value(self, G):
return G[self.target]
def expression_value(self, G):
string = ''
for invariant in self.expression:
if invariant in G:
string += str(G[invariant])
string += ' '
else:
string += invariant
string += ' '
string +=' +.0'
try: return eval(string)
except ZeroDivisionError: return 0
def conjecture_instance(self, G):
return eval(str(self.target_value(G))+self.get_inequality()+
str(self.expression_value(G)))
def conjecture_sharp(self, G):
return self.target_value(G) == self.expression_value(G)
def conjecture_check(self):
t = 0
tight = []
value_dict = dict()
for G in self.family:
value_dict[G] = self.expression_value(self.family[G])
if self.conjecture_instance(self.family[G]) == False:
return (False, 0)
elif self.target_value(self.family[G]) == self.expression_value(self.family[G]):
t += 1
tight.append(G)
return (True, t, tight, value_dict)
def touch(self):
t = 0
for G in self.family:
t += int(self.conjecture_sharp(self.family[G]))
return t
def conjecture_check_sharp(self):
return self.touch() > 1 and self.conjecture_check()[0]
def __eq__(self, other):
if self.get_expression() == other.get_expression():
return True
else:
return False
|
import numpy as np
from constants import Action, move_action_to_deviation as action_to_deviation_map
from utilities import euclidean_dist, manhattan_distance, sgn
class State:
def __init__(self, block_positions, selected_index, goal_config, screen_dims, block_size=50):
"""
:type block_positions: list[tuple(int)]
:type goal_positions: list[tuple(int)]
:type selected_index: nullable int
:type block_size: int
"""
self.block_positions = block_positions
self.block_count = len(block_positions)
self.selected_index = selected_index
self.goal_config = goal_config
self.block_size = block_size
self.screen_dims = screen_dims
self.goal_positions = self.compute_goal_positions()
def compute_goal_positions(self):
block_count = self.block_count
median_x = sum(self.get_position(idx)[0] for idx in range(self.block_count)) // self.block_count
median_x = self.block_size//2 + median_x - median_x % self.block_size
median_y = sum(self.get_position(idx)[1] for idx in range(self.block_count)) // self.block_count
median_y = self.block_size//2 + median_y - median_y % self.block_size
goal_position = [None for _ in range(block_count)]
if block_count % 2 == 1:
for idx, i in enumerate(self.goal_config[0]):
goal_position[i] = (median_x, median_y + self.block_size * (block_count // 2 - idx))
else:
for idx, i in enumerate(self.goal_config[0]):
goal_position[i] = (median_x, median_y + self.block_size * (block_count // 2 - idx))
for _ in range(self.block_count):
if not State.is_in_bounding_box(goal_position[self.goal_config[0][-1]], block_size=self.block_size, screen_dims=self.screen_dims):
# move 50 down i.e. +50
for idx in range(block_count):
goal_position[idx] = (goal_position[idx][0], goal_position[idx][1] + self.block_size)
elif not State.is_in_bounding_box(goal_position[self.goal_config[0][0]], block_size=self.block_size, screen_dims=self.screen_dims):
# move 50 up, i.e. -50
for idx in range(block_count):
goal_position[idx] = (goal_position[idx][0], goal_position[idx][1] - self.block_size)
return goal_position
def get_position(self, block_index):
return self.block_positions[block_index]
def get_selected(self):
return self.selected_index
def get_goal_position(self, block_index) -> list:
return self.goal_positions[block_index]
def set_goal_positions(self, goal_positions):
self.goal_positions = goal_positions
def get_tuple(self):
return tuple(self.block_positions), tuple(self.goal_positions), self.selected_index
def update_selection(self, selection):
self.selected_index = selection
def update_state(self, idx, position):
self.block_positions[idx] = position
def select(self, idx):
self.selected_index = idx
def deselect(self):
self.selected_index = None
def copy(self):
return self.__deepcopy__()
def __deepcopy__(self):
return State(block_positions=self.block_positions.copy(), goal_config=self.goal_config.copy(), selected_index=self.selected_index, screen_dims=tuple(self.screen_dims))
def __repr__(self):
return "Positions: %s, Goal: %s, Selected: %s" % (self.block_positions, self.goal_positions, self.selected_index)
def goal_reached(self):
for i in range(self.block_count - 1):
this_block = self.get_position(self.goal_config[0][i])
next_block = self.get_position(self.goal_config[0][i + 1])
val = this_block[0] == next_block[0] and this_block[1] - next_block[1] == self.block_size
if not val:
return False
return True
def is_action_good(self, move_action, idx):
def get_next_state(action):
new_state: State = self.copy()
old_position: tuple = self.block_positions[idx]
new_state.block_positions[idx] = (old_position[0] + action_to_deviation_map[action][0], old_position[1] + action_to_deviation_map[action][1])
return new_state
new_block_position = get_next_state(move_action).block_positions[idx]
in_bounding_box = State.is_in_bounding_box(new_block_position, self.block_size, screen_dims=self.screen_dims)
is_not_colliding = not any([tuple(new_block_position) == tuple(block_position) for block_position in self.block_positions])
return in_bounding_box and is_not_colliding
def is_action_allowed(self, move_action, idx):
return self.is_action_good(move_action, idx) and not self.is_action_blocking_goal(move_action, idx)
def is_action_blocking_goal(self, move_action, idx):
def get_next_state(action):
new_state: State = self.copy()
old_position: tuple = self.block_positions[idx]
new_state.block_positions[idx] = (old_position[0] + action_to_deviation_map[action][0], old_position[1] + action_to_deviation_map[action][1])
return new_state
new_block_position = get_next_state(move_action).block_positions[idx]
return self.is_state_blocking_goal(new_block_position, idx)
def is_state_blocking_goal(self, new_block_position, idx):
am_blocking_goal = any([tuple(goal_position) == tuple(new_block_position) for goal_position in self.goal_positions])
am_blocking_my_goal = tuple(new_block_position) == tuple(self.goal_positions[idx])
return am_blocking_goal and not am_blocking_my_goal
def all_goals_blocked(self):
for goalIdx, goalPos in enumerate(self.goal_positions):
this_goal_blocked = False
for blockidx, block_position in enumerate(self.block_positions):
this_goal_blocked = this_goal_blocked or tuple(block_position) == tuple(goalPos)
if not this_goal_blocked:
return False
return True
def get_target_blocks(self):
target_blocks = {}
for i in range(len(self.goal_config[0]) - 1):
target_blocks[i] = self.goal_config[0][i + 1]
target_blocks[len(self.goal_config[0]) - 1] = self.goal_config[0][-2]
return target_blocks
def get_medial_state_repr(self):
if self.selected_index is not None:
pos = self.get_position(self.selected_index)
transformed_pos = (pos[0] - 25) // 50, (pos[1] - 25) // 50
goal = self.get_goal_position(self.selected_index)
transformed_goal = (goal[0] - 25) // 50, (goal[1] - 25) // 50
return sgn(transformed_pos[0] - transformed_goal[0]), sgn(transformed_pos[1] - transformed_goal[1]), manhattan_distance(transformed_pos, transformed_goal)
else:
transformed_x = [(pos[0] - 25) // 50 for pos in self.block_positions]
transformed_y = [(pos[1] - 25) // 50 for pos in self.block_positions]
goal_x = [(pos[0] - 25) // 50 for pos in self.goal_positions]
goal_y = [(pos[1] - 25) // 50 for pos in self.goal_positions]
transformed_pos = [(sgn(ix - gx), sgn(iy - gy)) for ix, iy, gx, gy in zip(transformed_x, transformed_y, goal_x, goal_y)]
return tuple(transformed_pos)
def get_medial_state_repr_old(self):
transformed_x = [(pos[0] - 25) // 50 for pos in self.block_positions]
transformed_y = [(pos[1] - 25) // 50 for pos in self.block_positions]
goal_x = [(pos[0] - 25) // 50 for pos in self.goal_positions]
goal_y = [(pos[1] - 25) // 50 for pos in self.goal_positions]
transformed_pos = [(sgn(ix - gx), sgn(iy - gy)) for ix, iy, gx, gy in zip(transformed_x, transformed_y, goal_x, goal_y)]
return tuple(transformed_pos), tuple(self.goal_config[0]), self.selected_index
def get_medial_state_repr_older(self):
transformed_x = [(pos[0] - 25) // 50 for pos in self.block_positions]
transformed_y = [(pos[1] - 25) // 50 for pos in self.block_positions]
median_x = np.array(np.median(transformed_x), dtype=int)
median_y = np.array(np.median(transformed_y), dtype=int)
def sgn(a):
if a < 0:
return -1
elif a == 0:
return 0
else:
return 1
transformed_pos = [(pos[0] - median_x, pos[1] - median_y) for pos in zip(transformed_x, transformed_y)]
return tuple(transformed_pos), tuple(self.goal_config[0]), self.selected_index
def get_state_as_tuple_pramodith(self):
target_blocks = self.get_target_blocks()
some_list = [-1 for _ in range(3)]
directions = ["-", "-"]
if self.selected_index is not None:
if self.selected_index in target_blocks:
target_id = target_blocks[self.selected_index]
some_list[0] = np.square(self.block_positions[self.selected_index][0] - self.block_positions[target_id][0]) + np.square(self.block_positions[self.selected_index][1] - self.block_positions[target_id][1])
if self.block_positions[self.selected_index][0] - self.block_positions[target_id][0] > 0:
directions[0] = 'l'
elif self.block_positions[self.selected_index][0] - self.block_positions[target_id][0] < 0:
directions[0] = 'r'
if self.block_positions[self.selected_index][1] - self.block_positions[target_id][1] > 0:
directions[1] = 'u'
elif self.block_positions[self.selected_index][1] - self.block_positions[target_id][1] < 0:
directions[1] = 'd'
else:
for key, value in target_blocks.items():
if value == self.selected_index:
target_id = key
some_list[0] = np.square(self.block_positions[self.selected_index][0] - self.block_positions[target_id][0]) + np.square(self.block_positions[self.selected_index].rect.centery - self.block_positions[target_id].rect.centery)
if self.block_positions[self.selected_index][0] - self.block_positions[target_id][0] > 0:
directions[0] = 'l'
elif self.block_positions[self.selected_index][0] - self.block_positions[target_id][0] < 0:
directions[0] = 'r'
if self.block_positions[self.selected_index].rect.centery - self.block_positions[target_id].rect.centery > 0:
directions[1] = 'u'
elif self.block_positions[self.selected_index].rect.centery - self.block_positions[target_id].rect.centery < 0:
directions[1] = 'd'
else:
distances = []
for key in target_blocks:
distances.append(euclidean_dist(self.block_positions[key], self.block_positions[target_blocks[key]]))
some_list[0] = tuple(distances)
some_list[1] = tuple(directions)
some_list[-1] = self.selected_index
some_list.append(tuple([tuple(x) for x in self.goal_config]))
return tuple(some_list)
def get_state_as_tuple(self):
# curr_state is a n-tuple( (x1, y1), (x2, y2), (x3, y3), (x4, y4), selectedBlockId, (goal_config))
some_list = [0 for _ in range(self.block_count + 1)]
for block_id in self.block_positions:
some_list[block_id] = (self.block_positions[block_id][0], self.block_positions[block_id][1])
some_list[-1] = self.selected_index
some_list.append(tuple([tuple(x) for x in self.goal_config]))
return tuple(some_list)
def get_state_as_dict(self):
block_pos = self.block_positions
state = {"positions": {block_id: (block_pos[block_id][0], block_pos[block_id][1]) for block_id in block_pos}, "selected": self.selected_index if self.selected_index is not None else -1}
return state
def get_next_state(self, action: tuple, screen_dims):
# action: [Action, int]
new_state = self.copy()
if action[0] == Action.PICK:
new_state.selected_index = action[1]
elif action[0] == Action.DROP:
new_state.selected_index = None
else:
new_state.block_positions[self.selected_index] = self.get_transformed_location(action[0], self.selected_index, screen_dims)
return new_state
def get_rect(self, center):
return {"left": center[0] - self.block_size // 2, "right": center[0] + self.block_size // 2, "bottom": center[1] + self.block_size // 2, "top": center[1] - self.block_size // 2}
@staticmethod
def are_intersecting(rect1, dx, dy, other_rect):
return (other_rect["top"] <= rect1["top"] + dy < other_rect["bottom"] and (other_rect["left"] <= rect1["left"] + dx < other_rect["right"] or other_rect["left"] < rect1["right"] + dx <= other_rect["right"])) or (other_rect["top"] < rect1["bottom"] + dy <= other_rect["bottom"] and (other_rect["left"] <= rect1["left"] + dx < other_rect["right"] or other_rect["left"] < rect1["right"] + dx <= other_rect["right"]))
@staticmethod
def is_in_bounding_box(next_pos, block_size, screen_dims):
screen_width, screen_height = screen_dims
return (block_size / 2) <= next_pos[0] <= (screen_width - block_size / 2) and (block_size / 2) <= next_pos[1] <= (screen_height - block_size / 2)
def get_transformed_location(self, action, sel_block_id, screen_dims):
if action in action_to_deviation_map:
dx, dy = action_to_deviation_map[action]
else:
raise IOError("Invalid Action", action)
rectangle = self.get_position(sel_block_id)
not_intersections = [not State.are_intersecting(self.get_rect(rectangle), dx, dy, self.get_rect(other_block)) for id, other_block in enumerate(self.block_positions) if sel_block_id != id]
orig_pos = rectangle
if all(not_intersections):
next_pos = (orig_pos[0] + dx, orig_pos[1] + dy)
if self.is_in_bounding_box(next_pos, self.block_size, screen_dims):
return next_pos
return orig_pos
def test_get_goal_position():
state = State(block_positions=[[75, 25], [125, 25], [175, 25], [225, 25], [275, 25]], selected_index=None, goal_config=[[3, 2, 0, 1, 4]], screen_dims=(350, 350))
state.compute_goal_positions()
assert [(175, 125), (175, 75), (175, 175), (175, 225), (175, 25)] == state.goal_positions
state = State(block_positions=[[75, 325], [125, 325], [175, 325], [225, 325], [275, 325]], selected_index=None, goal_config=[[3, 2, 0, 1, 4]], screen_dims=(350, 350))
state.compute_goal_positions()
assert [(175, 225), (175, 175), (175, 275), (175, 325), (175, 125)] == state.goal_positions
def test_get_medial_position_rep():
medial_state_rep = State(block_positions=[[10, 20], [20, 10], [30, 30]], selected_index=1, goal_config=[[0, 2, 1]]).get_medial_state_repr()
print("Medial: ", medial_state_rep)
if __name__ == '__main__':
test_get_goal_position()
|
"""
Write a python program to help an airport manager to generate few statistics based on the ticket details available for a day.
Go through the below program and complete it based on the comments mentioned in it.
Note: Perform case sensitive string comparisons wherever necessary.
"""
#PF-Assgn-55
#Sample ticket list - ticket format: "flight_no:source:destination:ticket_no"
#Note: flight_no has the following format - "airline_name followed by three digit number
#Global variable
ticket_list=["AI567:MUM:LON:014","AI077:MUM:LON:056", "BA896:MUM:LON:067", "SI267:MUM:SIN:145","AI077:MUM:CAN:060","SI267:BLR:MUM:148","AI567:CHE:SIN:015","AI077:MUM:SIN:050","AI077:MUM:LON:051","SI267:MUM:SIN:146"]
def find_passengers_flight(airline_name="AI"):
#This function finds and returns the number of passengers travelling in the specified airline.
count=0
for i in ticket_list:
string_list=i.split(":")
if(string_list[0].startswith(airline_name)):
count+=1
return count
def find_passengers_destination(destination):
#Write the logic to find and return the number of passengers traveling to the specified destination
count=0
for i in ticket_list:
string_list=i.split(":")
# for val in string_list:
if(string_list[2]==destination):
count+=1
return count
def find_passengers_per_flight():
'''Write the logic to find and return a list having number of passengers traveling per flight based on the details in the ticket_list
In the list, details should be provided in the format:
[flight_no:no_of_passengers, flight_no:no_of_passengers, etc.].'''
l=[]
temp=[]
m=[]
for i in ticket_list:
string_list=i.split(":")
l.append(string_list[0])
for i in l:
if i not in temp:
temp.append(i)
for i in temp:
s1=0
s1=l.count(i)
s2 = str(i)+":"+str(s1)
m.append(s2)
return m
def sort_passenger_list():
#Write the logic to sort the list returned from find_passengers_per_flight() function in the descending order of number of passengers
l=find_passengers_per_flight()
temp=[]
final=[]
for i in l:
s=i.split(":")
temp.append(s[1])
temp.sort()
sort=temp[::-1]
for i in range(0,len(sort)):
for val in l:
s2=val.split(":")
if sort[i] in s2:
final.insert(i,val)
return final
#Provide different values for airline_name and destination and test your program.
print(find_passengers_flight("AI"))
print(find_passengers_destination("LON"))
#find_passengers_per_flight()
print(sort_passenger_list())
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.option.option_types import BoolOption
from pants.option.subsystem import Subsystem
class SoapSubsystem(Subsystem):
options_scope = "soap"
help = "General SOAP/WSDL codegen settings."
tailor = BoolOption(
default=True,
help="If true, add `wsdl_sources` targets with the `tailor` goal.",
advanced=True,
)
|
from django.urls import path, include
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path("accounts/", include("allauth.urls")),
path("home/", views.home, name="home"),
] |
from unittest import TestCase
from svtools.bedpe import Bedpe
from svtools.cluster import Cluster
class ClusterTests(TestCase):
def test_can_add(self):
bedpe = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b = Bedpe(bedpe)
c = Cluster()
c.chrom_a = b.c1
c.chrom_b = b.c2
c.min_a = b.s1
c.max_a = b.e1
c.min_b = b.s2
c.max_b = b.e2
c.strand_a = b.o1
c.strand_b = b.o2
self.assertTrue(c.can_add(b, 1))
c.size = 1
c.sv_event = 'DEL'
self.assertFalse(c.can_add(b, 1))
c.sv_event = 'BND'
self.assertTrue(c.can_add(b, 1))
c.chrom_a = 'X'
self.assertFalse(c.can_add(b, 1))
c.chrom_a = b.c1
c.chrom_b = 'X'
self.assertFalse(c.can_add(b, 1))
c.chrom_b = b.c2
c.min_a = 305
self.assertFalse(c.can_add(b, 1))
c.min_a = b.s1
c.max_a = 150
self.assertFalse(c.can_add(b, 1))
c.max_a = b.e1
c.min_b = 405
self.assertFalse(c.can_add(b, 1))
c.min_b = b.s1
c.max_b = 150
self.assertFalse(c.can_add(b, 1))
def test_add(self):
bedpe1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(bedpe1)
bedpe2= [ '1', '195', '305', '2', '295', '405', '777_1', '57', '+', '-', 'BND', 'PASS', 'MISSING', 'SVTYPE=BND;AF=0.3' ]
b2 = Bedpe(bedpe2)
c = Cluster()
c.add(b1, None)
self.assertEqual(c.size, 1)
self.assertEqual(c.sv_event, 'BND')
self.assertEqual(c.filter, '0.2')
self.assertEqual(c.chrom_a, '1')
self.assertEqual(c.min_a, 200)
self.assertEqual(c.max_a, 300)
self.assertEqual(c.chrom_b, '2')
self.assertEqual(c.min_b, 300)
self.assertEqual(c.max_b, 400)
self.assertEqual(c.strand_a, '+')
self.assertEqual(c.strand_b, '-')
c.add(b2, None)
self.assertEqual(c.size, 2)
self.assertEqual(c.sv_event, 'BND')
self.assertEqual(c.filter, '0.3')
self.assertEqual(c.chrom_a, '1')
self.assertEqual(c.min_a, 195)
self.assertEqual(c.max_a, 305)
self.assertEqual(c.chrom_b, '2')
self.assertEqual(c.min_b, 295)
self.assertEqual(c.max_b, 405)
self.assertEqual(c.strand_a, '+')
self.assertEqual(c.strand_b, '-')
def test_get_cluster_string(self):
bedpe = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b = Bedpe(bedpe)
c = Cluster()
with self.assertRaises(ValueError):
c.get_cluster_string()
c.add(b, None)
self.assertEqual(c.get_cluster_string(), str(b))
|
from django.conf import settings
def settings_context_processor(request):
""" Processor adding django settings to context """
return {'settings': settings}
|
n = int(input())
arr = []
if n == 0:
print(0)
else:
while(n != 0 ):
rem = n%2
n //=2
arr.append(rem)
arr.reverse()
print(*arr,sep="")
|
import gi
gi.require_version('Gtk', '3.0')
|
import dash_bootstrap_components as dbc
from dash import html
spinners = html.Div(
[
dbc.Spinner(size="sm"),
html.Hr(),
dbc.Spinner(spinner_style={"width": "3rem", "height": "3rem"}),
]
)
|
import torch
from models.model import MyCNN
from models.model import ExampleCNN
from datasets.dataloader import make_test_dataloader
import os
from tqdm import tqdm
def test(model_name, device, base_path, save_path):
test_data_path = os.path.join(base_path, "data", "test")
weight_path = os.path.join(save_path, "weight.pth")
# load model and use weights we saved before
if model_name == "ExampleCNN":
model = ExampleCNN()
else:
model = MyCNN()
model.load_state_dict(torch.load(weight_path))
model = model.to(device)
# make dataloader for test data
test_loader = make_test_dataloader(test_data_path, 2)
predict_correct = 0
model.eval()
with torch.no_grad():
for data, target in tqdm(test_loader, desc="Testing"):
data, target = data.to(device), target.to(device)
output = model(data)
predict_correct += (output.data.max(1)[1] == target.data).sum()
accuracy = 100. * predict_correct / len(test_loader.dataset)
print(f'Test accuracy: {accuracy:.4f}%')
return accuracy.item()
if __name__ == '__main__':
cuda_device = 0
batch_size =32
epochs = 40
learning_rate = 0.01
model_name = "ExampleCNN"
base_path = os.path.dirname(os.path.abspath(__file__))
device = torch.device(f'cuda:{cuda_device}' if torch.cuda.is_available() else 'cpu')
state_name = f"{batch_size}_{epochs}_{learning_rate}"
save_name = "train_result"
save_path = os.path.join(base_path, save_name, state_name)
if not os.path.exists(os.path.join(base_path, save_name)):
os.mkdir(os.path.join(base_path, save_name))
if not os.path.exists(save_path):
os.mkdir(save_path)
test_accuracy = test(
model_name=model_name,
device=device,
base_path=base_path,
save_path=save_path
) |
import pandas as pd
import numpy as np
import py_spatial
import rsp_reader
from weather import Weather_Station
from cleanfirst import Vehicle_Cleaner
class Vehicle(Vehicle_Cleaner):
'''
This is the class where the bulk of the cleaning and merging of datafiles
is performed. It is built on top of the class which is used to determine the
new restricted GPS points.
'''
def __init__(self, section, vehicle, speed, gps_coords, verbose=False, path=''):
Vehicle_Cleaner.__init__(self, section, vehicle, verbose=verbose, path=path)
self.speed = str(speed)
self.is_concrete = float(self.info.loc['is_concrete'])
self.filename_rsp = path + self.info.loc['rsp_filename']
self.filename = path + self.info.loc[''.join([self.vehicle, '_', self.speed, 'file'])]
self.county = self.info.loc['county']
self.route = self.info.loc['route']
self.hpgps_file = path + self.info.loc['HPGPS_file']
self.ws_time = self.info.loc['ws_time']
self.ws_actualtime = self.info.loc['ws_actualtime']
self.ws_file = path + self.info.loc['ws_file']
self.vehicle_type = self.vehicle.lower()
# using a get method here as a placeholder until the input master is changed
self.gpr_file = path + self.info.get('GPR_file', 'none')
self.gps_coords = gps_coords
self.verbose = verbose
def clean_vehicle_data(self):
"""
This function takes the information from the truck 55mph file and creates a dataframe
which we'll use later to find the bounds for our GPS coordinates.
It is assumed that the most restrictive GPS coordinates will be attached to the truck at 55mph
If while cleaning the data, it is found that one of the other vehicles seems more restrictive
(i.e. if for example the f450 seems to be starting it's cruise control earlier or later) you can replace
the hhdt with that vehicle and get the new restricted gps coordinates.
"""
# make sure that the info received from argv is usable
speed = str(self.speed)
# load all the needed info from the master excel file
start_lat = self.gps_coords[0][0]
start_lng = self.gps_coords[0][1]
end_lat = self.gps_coords[1][0]
end_lng = self.gps_coords[1][1]
skiprow = int(self.info.loc[''.join([self.vehicle_type, '_lineskip'])])
high_kph = int(self.info.loc[''.join([self.vehicle_type, '_', speed, 'highspeed'])])
low_kph = int(self.info.loc[''.join([self.vehicle_type, '_', speed, 'lowspeed'])])
# filename = self.info.loc[''.join([self.vehicle_type,'_',speed,'file'])]
is_truck = self.vehicle_type == 'hhdt'
df = pd.read_csv(str(self.filename), skiprows=skiprow)
df['useful_data'] = False
if is_truck:
df.loc[(df['Wheel-Based Vehicle Speed (kph)'] > low_kph) & (df['Wheel-Based Vehicle Speed (kph)'] < high_kph), "useful_data"] = True
df = df.rename(columns={'Wheel-Based Vehicle Speed (kph)': 'Vehicle Speed (km/hr)'})
else:
df.loc[(df['Vehicle Speed (km/hr)'] > low_kph) & (df['Vehicle Speed (km/hr)'] < high_kph), "useful_data"] = True
df = df[(df['useful_data'] == True)]
df['indicator'] = df['Time'].diff()
df['Switch'] = float('nan')
# for below, because it's a float, strict > doesn't work so add an arbitrary small number to the value we're looking for (ex. 1/10 of the mean)
df.loc[df['indicator'] > (np.mean(df['Time'].diff() + np.mean(df['Time'].diff() / 10))), "Switch"] = 1
df.loc[(df['Switch'] == 1), "Switch"] = df['Switch'][(df['Switch'] == 1)].cumsum(skipna=True)
# this fills in the missing data so we know which run each point of data belongs in
df['Switch'] = df['Switch'].fillna(method='ffill')
# the previous cumsum() function doesn't have a 0 value, so we now label the remaining data to run 0
df['Switch'] = df['Switch'].fillna(value=0)
# cumsum() creates a floating point value, so convert the type from float to int to be used for labeling runs
df['Switch'] = df['Switch'].astype(int)
# for the cutoff, I just used an arbitrary small number. The user will set the larger cutoff later in the code
cutoff = 100
# we initialize a new dataframe for our good run data to be put into
dfnew = pd.DataFrame()
run_data_list = []
if self.verbose:
print '((index,distance from start),(index,distance from end),run #,number of points)'
for i in range(int(df['Switch'].max() + 1)):
if df['Switch'].value_counts()[i] >= int(cutoff):
start = py_spatial.find_closest_gps(start_lat, start_lng, df.loc[df['Switch'] == i, 'Latitude'], df.loc[df['Switch'] == i, 'Longitude'])
end = py_spatial.find_closest_gps(end_lat, end_lng, df.loc[df['Switch'] == i, 'Latitude'], df.loc[df['Switch'] == i, 'Longitude'])
if start[0] < end[0]: # may need to add more conditions if this doesn't return correct sections
# probably a better way to find it, but this gives the index of the first point in i
first_index = np.argmin(df.loc[df['Switch'] == i, 'Switch'])
start_index = first_index + start[0]
end_index = first_index + end[0]
dfnew = dfnew.append(df.loc[start_index:end_index]) # ,ignore_index=True)
speed1 = np.argmax(dfnew.loc[dfnew['Switch'] == i, ('Vehicle Speed (km/hr)')].value_counts())
activate = False
if (dfnew.loc[dfnew['Switch'] == i, 'Vehicle Speed (km/hr)'][start_index] != speed1) and (self.vehicle_type != 'hhdt'):
start_index = dfnew.loc[(dfnew['Switch'] == i) & (dfnew['Vehicle Speed (km/hr)'] == speed1)].index[0]
activate = True
if (dfnew.loc[dfnew['Switch'] == i, 'Vehicle Speed (km/hr)'][end_index] != speed1) and (self.vehicle_type != 'hhdt'):
end_index = dfnew.loc[(dfnew['Switch'] == i) & (dfnew['Vehicle Speed (km/hr)'] == speed1)].index[-1]
activate = True
if activate:
dfnew.loc[dfnew['Switch'] == i] = dfnew.loc[start_index:end_index]
# the distance info printed may be slightly off from what is actually happening, since it doesn't take into
# account the cruise control, but the gps reduction function does.
start_distance = py_spatial.distance_on_unit_sphere(start_lat, start_lng, df['Latitude'][start_index], df['Longitude'][start_index])
end_distance = py_spatial.distance_on_unit_sphere(end_lat, end_lng, df['Latitude'][end_index], df['Longitude'][end_index])
# print(i,end_index - start_index+1)
run_data_list.append(((i, end_index - start_index + 1), ([df['Latitude'][start_index], df['Longitude'][start_index]]),
([df['Latitude'][end_index], df['Longitude'][end_index]]), (start_distance, end_distance)))
if self.verbose:
print 'Run #, Data Points, Distance from Start, Distance from End'
for run in run_data_list:
print run[0][0], run[0][1], run[3][0], run[3][1]
cutoff = raw_input('What is the maximum acceptable distance from the specified GPS location? ')
else:
# TODO see if we are okay with 10m distance. This is the default. Change if needed
cutoff = 10
self.run_list = [x[0][0] for x in run_data_list if x[3][1] <= int(cutoff) and x[3][0] <= int(cutoff)]
self.dfnew = dfnew.loc[dfnew['Switch'].isin(self.run_list)]
def add_fc(self, gas_density=6.71):
"""
This looks at the vehicle type then calculates the fuel economy.
the default value for gas density is set at 6.71lbs/gallon.
We may want to use something different after we measure it on our samples.
"""
print 'Adding fuel consumption data'
if self.vehicle_type in ['carg', 'suv']:
self.dfnew['IFC (MPG)'] = self.dfnew.apply(lambda x: 14.7 * gas_density * 453.592 * (x['Vehicle Speed (km/hr)'] * 0.621371) / ((3600 * x['Air Flow Rate from Mass Air Flow Sensor (g/s)'])), axis=1)
needed_params = ['Time', 'Vehicle Speed (km/hr)', 'Air Flow Rate from Mass Air Flow Sensor (g/s)', 'IFC (MPG)', 'Fuel Level Input (%)', 'Latitude', 'Longitude', 'Altitude',
'Velocity', 'Heading', 'Date', 'Time.1', 'Switch', 'Universal Time', 'Rounded Time']
elif self.vehicle_type in ['hhdt']:
self.dfnew['IFC (MPG)'] = self.dfnew.apply(lambda x: x['Engine Instantaneous Fuel Economy (km/L)'] * 0.621371 * 3.78, axis=1)
needed_params = ['Time', 'Vehicle Speed (km/hr)', 'Engine Instantaneous Fuel Economy (km/L)', 'IFC (MPG)', 'Latitude', 'Longitude', 'Altitude',
'Velocity', 'Heading', 'Date', 'Time.1', 'Switch', 'Universal Time', 'Rounded Time']
elif self.vehicle_type in ['f450', 'card']:
self.dfnew['IFC (MPG)'] = self.dfnew.apply(lambda x: x['Vehicle Speed (km/hr)'] / x['Engine Fuel Rate (L/h)'] * 0.621371 * 3.78, axis=1) # 3.78 is liters per gallon
needed_params = ['Time', 'Vehicle Speed (km/hr)', 'Engine Fuel Rate (L/h)', 'IFC (MPG)', 'Fuel Level Input (%)', 'Latitude', 'Longitude', 'Altitude',
'Velocity', 'Heading', 'Date', 'Time.1', 'Switch', 'Universal Time', 'Rounded Time']
else:
print 'Fuel consumption is broken'
time_offset = self._get_local_time()
# there might be a daylight savings time issue, will need to check
def get_universal_time(cur_time):
return cur_time / 60.0
self.dfnew['Universal Time'] = self.dfnew['Time'].apply(get_universal_time) + time_offset
# we round the time to an int, that way we can merge on that column when merging with the weather data
self.dfnew['Rounded Time'] = self.dfnew['Universal Time'].map(round).astype(int)
self.dfnew = self.dfnew[needed_params]
def merge_obd_weather(self, column_list=['Time', 'Out', 'Density', '2nd', 'Effective Wind']):
'''
This function merges the weather station data with the OBD data
'''
print 'Reading in weather data'
# below we merge the weather and OBD datasets, it fills the weather data for the rounded time
self.ws = Weather_Station(self.ws_file, self.ws_time, self.ws_actualtime, self.gps_coords, column_list)
dfmerged = pd.merge(self.dfnew, self.ws.dfw, on='Rounded Time', how='outer')
# next line gets rid of the data where we have weather station data that is not relevant for our runs
dfmerged.dropna(subset=['Vehicle Speed (km/hr)'], inplace=True)
# the next 2 lines fill in the weather data for when the weather station stopped too early or started too late
# it fills the closest weather station data to those points in time
dfmerged.fillna(method='ffill', inplace=True)
dfmerged.fillna(method='bfill', inplace=True)
self.dfnew = dfmerged
def add_info_to_df(self, name, df_other):
"""
This will take the df we want then add the column of the name to that df from the other df
both df's will need to have gps points.
This function can be run multiple times to continue adding items. It acts in-place.
name: what we'll call our newest column
df: the main dataframe, probably the OBD reader df in this case
df_other: the dataframe to merge. Likely for elevation or IRI/MPD
"""
self.dfnew[name] = float('nan')
for run in self.run_list:
run = int(run)
for gps in zip(df_other['Latitude'], df_other['Longitude']):
loc, dist = py_spatial.find_closest_gps(float(gps[0]), float(gps[1]), self.dfnew.loc[self.dfnew['Switch'] == run, 'Latitude'], self.dfnew.loc[self.dfnew['Switch'] == run, 'Longitude'])
self.dfnew.loc[np.where(self.dfnew['Switch'] == run)[0][loc], name] = float(df_other.loc[(df_other['Latitude'] == gps[0]) & (df_other['Longitude'] == gps[1]), name])
# we fill the values for each run so that values from earlier runs don't accidentally get used
self.dfnew.loc[self.dfnew['Switch'] == run, name] = self.dfnew.loc[self.dfnew['Switch'] == run, name].fillna(method='ffill')
# in case there were any values that occurred before the profiler started recording, they'll be set to nearest
self.dfnew.loc[self.dfnew['Switch'] == run, name] = self.dfnew.loc[self.dfnew['Switch'] == run, name].fillna(method='bfill')
def add_rsp_file_info(self, iri_avg_dist, mpd_avg_dist):
'''
This function adds the mpd and iri to the dataframe
iri_avg_dist: the averaging distance (in feet) to be used for the IRI
mpd_avg_dist: the averaging distance (in feet) to be used for the MPD
'''
print 'Reading in pavement profile data (this step may take some time)'
iri_avg_dist = int(iri_avg_dist * 3.28084)
mpd_avg_dist = int(mpd_avg_dist * 3.28084)
df_dict = rsp_reader.extract_data(self.filename_rsp, filename_info='dict.txt',)
mpddf = rsp_reader.info_btw_pts(df_dict[5409], self.gps_coords[0][0], self.gps_coords[0][1], self.gps_coords[1][0], self.gps_coords[1][1], path='First Texture', distance=mpd_avg_dist)
iridf = rsp_reader.info_btw_pts(df_dict[5406], self.gps_coords[0][0], self.gps_coords[0][1], self.gps_coords[1][0], self.gps_coords[1][1], path='RWP IRI', distance=iri_avg_dist)
def lat_lng_maker(df, gps_name):
'''
Helper function to split the GPS column into Lat and Lng columns
'''
df['Latitude'] = df[gps_name].map(lambda x: x[0])
df['Longitude'] = df[gps_name].map(lambda x: x[1])
return df
mpddf = lat_lng_maker(mpddf, 'Start_GPS')
iridf = lat_lng_maker(iridf, 'Start_GPS')
self.add_info_to_df('RWP IRI', iridf)
self.dfnew.rename(columns={'RWP IRI': 'RWP IRI %s' % iri_avg_dist}, inplace=True)
if not self.is_concrete:
self.add_info_to_df('First Texture', mpddf)
self.dfnew.rename(columns={'First Texture': 'MPD (microns) %s' % mpd_avg_dist}, inplace=True)
else:
self.dfnew['First Texture'] = 0
self.dfnew.rename(columns={'First Texture': 'MPD (microns) %s' % mpd_avg_dist}, inplace=True)
def add_elevations_gmaps(self, grade_avg_dist):
'''
This adds the grade from google maps into the data.
grade_avg_dist: the averaging distance for the grade (in meters)
can be run multiple times to add different averaging distances
'''
print 'Reading in elevation data from Googlemaps'
df_elev = py_spatial.elev_from_gmaps(self.gps_coords[0][0], self.gps_coords[0][1], self.gps_coords[1][0], self.gps_coords[1][1], grade_avg_dist)
self.add_info_to_df('GM-Grade', df_elev)
self.dfnew.rename(columns={'GM-Grade': 'GM-Grade %s' % grade_avg_dist}, inplace=True)
def add_hpgps(self, grade_avg_dist):
'''
This adds the grade from the hpgps into the data.
grade_avg_dist: the averaging distance for the grade (in meters)
can be run multiple times.
If there is no hpgps file and this is called, it will not change the dataframe
'''
if self.hpgps_file != 'none':
df_hpgps = py_spatial.create_subsections_grade(self.hpgps_file, (self.gps_coords[0][0], self.gps_coords[0][1]), (self.gps_coords[1][0], self.gps_coords[1][1]), distance=grade_avg_dist)
self.add_info_to_df('HPG-Grade', df_hpgps)
self.dfnew.rename(columns={'HPG-Grade': 'HPG-Grade %s' % grade_avg_dist}, inplace=True)
else:
pass
def add_gpr_gps(self, grade_avg_dist):
'''
This adds the grade from the GPR van into the data.
grade_avg_dist: the averaging distance for the grade (in meters)
can be run multiple times.
If there is no hpgps file and this is called, it will not change the dataframe
'''
if self.gpr_file != 'none':
df_gpr = py_spatial.import_gps_data(self.gpr_file, (self.gps_coords[0][0], self.gps_coords[0][1]), (self.gps_coords[1][0], self.gps_coords[1][1]), grade_avg_dist)
self.add_info_to_df('GPR-Grade', df_gpr)
self.dfnew.rename(columns={'GPR-Grade': 'GPR-Grade %s' % grade_avg_dist}, inplace=True)
def average_data(self, sections=None):
"""
This function can be used to average the data across each replicate or to average
the vehicle parameters for each Lat/Lng reading.
Lat/lng are read once every 0.2seconds but the vehicle parameters occur once
every 0.04 seconds. Typing sections = 'max' averages on each GPS reading.
If sections is set as 1, this takes the average over the entire run
"""
if sections == 1:
grouped = self.dfnew
grouped = grouped.groupby(['Switch'])
grouped = grouped.agg(np.average).sort_values('Time').reset_index()
return grouped
elif sections in ['max', 'Max', 'MAX']:
grouped = self.dfnew
grouped = grouped.groupby(['Switch', 'Latitude', 'Longitude'])
grouped = grouped.agg(np.average).sort_values('Time').reset_index()
return grouped
else:
return None
def create_subsections(self, distance=100):
'''
We likely want to subsection runs such that we take advantage of all the data collected
Not that this does not make inplace changes to the dataframe so that it can be run multiple times
with different distances.
distance: the distance (in meters) to average for subsections
'''
# ignore the chained assignment copy error which is not affected by this code
pd.options.mode.chained_assignment = None
self.dfnew['Dist_from_start'] = self.dfnew.apply(lambda x: py_spatial.distance_on_unit_sphere(self.gps_coords[0][0], self.gps_coords[0][1], x['Latitude'], x['Longitude']), axis=1)
# there is only a new gps every 0.2s, so we average all the other readings until the GPS location changes
dfmax = self.average_data(sections='Max')
df2 = pd.DataFrame()
for value in set(dfmax['Switch']):
df1 = dfmax.loc[dfmax['Switch'] == value]
df1['speeddist'] = df1['Time'].diff() * 0.911344 * df1['Vehicle Speed (km/hr)'] * 0.3048
df1['speeddist'] = df1['speeddist'].cumsum() + df1['Dist_from_start'].iloc[0]
df1['speeddist'] = df1['speeddist'].fillna(value=df1['Dist_from_start'].iloc[0])
start_point = (int(df1['speeddist'].iloc[0] / float(distance)) + 1) * distance
end_point = int(df1['speeddist'].iloc[-1] / float(distance)) * distance
df1, dist_list = py_spatial.add_distance_to_df(df1, [start_point, end_point], distance)
df1.index = df1['speeddist']
df1 = df1.sort_values(by='speeddist').interpolate(method='values')
# df1 = df1.fillna(value = 0)
df1['groups'] = df1['speeddist'].map(lambda x: int(x / distance))
df1['weights'] = df1['speeddist'].diff().shift(-1)
df3 = pd.DataFrame()
for column in df1.columns:
df3[column] = df1.loc[(df1['groups'] >= start_point / distance) & (df1['groups'] < end_point / distance)].groupby(df1['groups']).apply(lambda x: np.average(x[column], weights=x['weights']))
# print len(df3)
df2 = df2.append(df3)
dflatlng = df1.loc[df1['speeddist'].isin(dist_list)].groupby('speeddist').mean()
dflatlng = dflatlng.loc[:, ['Latitude', "Longitude", 'groups']].loc[(df1['groups'] >= start_point / distance) & (df1['groups'] < end_point / distance)]
return df2.sort_values(['groups', 'Switch']), dflatlng
def excel_output(self, df, subsection, name_file=False):
'''
This function exports the dataframe to an excel sheet
Will likely want to run this twice to get the lat/lng coords for the sections as well.
This method is not bound to the dataframe intentionally such that it can be called multiple times
on one instance. We may want to save with different subsection lengths.
'''
filename_to_excel = ''.join([self.section, self.vehicle_type, self.speed, int(subsection), '.xlsx'])
if name_file:
name_okay = raw_input('The file will be named %s, is this okay? (y/n)' % (filename_to_excel))
if name_okay in ['y', 'Y', 'Yes', 'yes', 'YES']:
print 'Writing to Excel'
writer = pd.ExcelWriter(filename_to_excel)
df.to_excel(writer)
writer.save()
print 'Finished writing to excel'
else:
new_name = raw_input('Please name your file: ')
print 'Writing to Excel'
writer = pd.ExcelWriter(''.join([new_name, '.xlsx']))
df.to_excel(writer)
writer.save()
print 'Finished writing %s to excel' % (new_name + '.xlsx')
else:
print 'Writing to Excel'
writer = pd.ExcelWriter(self.path + 'data/' + filename_to_excel)
df.to_excel(writer)
writer.save()
print 'Finished writing to excel'
def _hour_converter_lt(self, x):
'''
This is a simple helper function to get the hour of the day
'''
return int(x[:x.find(':')]) * 60 + int(x[x.find(':') + 1:x.find(':') + 3]) + float(x[-2:]) / 60
def _get_local_time(self):
'''
This function is needed to get the offset for the time in the OBD file.
'''
with open(self.filename, 'r') as f:
first_line = f.readline()
first_line = first_line.split()[-2:]
# the time in the excel file is not in military time. the following takes care of the military time issue
if first_line[-1] == 'AM':
if first_line[-2][:2] == '12':
return self._hour_converter_lt(first_line[0]) - 60 * 12
else:
return self._hour_converter_lt(first_line[0])
else:
if first_line[-2][:2] == '12':
return self._hour_converter_lt(first_line[0])
else:
return self._hour_converter_lt(first_line[0]) + 12 * 60
|
import numpy as np
def rle2mask(mask_rle, shape):
'''
mask_rle: run-length as string formated (start length)
shape: (width,height) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T
def mask2rle(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if b > prev + 1:
run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
|
# -*- coding: utf-8 -*-
# Author: Simone Marsili <simomarsili@gmail.com>
# License: BSD 3 clause
"""A little parser for alignments of biological sequences."""
import pkg_resources
from lilbio.funcs import uppercase_only
from lilbio.parser import parse, write
project_name = 'little-bio-parser'
__version__ = pkg_resources.require(project_name)[0].version
__copyright__ = 'Copyright (C) 2017 Simone Marsili'
__license__ = 'BSD 3 clause'
__author__ = 'Simone Marsili (simo.marsili@gmail.com)'
__all__ = ['parse', 'write', 'uppercase_only']
|
from flask import Flask,render_template,request,send_file
import os
from pymongo import MongoClient
from flask_pymongo import PyMongo
import csv
client=MongoClient("mongodb+srv://HerokuUser:herokupassword@cluster0-cglnu.mongodb.net/test?retryWrites=true&w=majority")
db=client.get_database("OflUsers")
rec=db.fileUploads
_id="English"
TranslationLibrary=dict()
app=Flask(__name__)
@app.route('/',methods=['GET'])
def renderIndex():
return render_template('index.html')
@app.route('/download',methods=['POST','GET'])
def handledownload():
global _id
if request.method=='POST':
data=request.get_json()
print(data)
# making the id to be non array
_id=data['language']
del data['language']
for key,val in data.items():
if key in TranslationLibrary.keys():
TranslationLibrary[key].append(val)
else:
TranslationLibrary[key]=[val]
TranslationLibrary['_id']=_id
writeToDatabase(TranslationLibrary,_id)
print(TranslationLibrary)
TranslationLibrary.clear()
return "{message:success}"
else:
output = rec.find_one({'_id': _id})
if output:
with open('output.csv','w',newline="",encoding="utf-8") as f:
write=csv.writer(f)
col1=output['Word_in_English']
col2=output['translation']
if len(col1)>1 and len(col1)==len(col2):
write.writerow(['Word_in_English','translation'])
for i in range(len(col1)):
write.writerow([col1[i],col2[i]])
f.close()
return send_file('output.csv',as_attachment=True,cache_timeout=1)
def writeToDatabase(TranslationDictionary,language):
output = rec.find_one({'_id': language})
if output:
print(TranslationDictionary['Word_in_English'])
rec.find_one_and_update(
{'_id': language},
{ '$push': {
'Word_in_English': {
'$each': TranslationDictionary['Word_in_English']
}
}})
rec.find_one_and_update(
{'_id': language},
{
'$push': {
'translation': {
'$each': TranslationDictionary['translation']
}
}
})
else:
rec.insert_one(TranslationDictionary)
if __name__ == "__main__":
app.run(debug=True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-20 01:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('water_watch_api', '0004_auto_20180307_1813'),
]
operations = [
migrations.AlterModelOptions(
name='sensordata',
options={'ordering': ['id', 'sensor_data_dateTime'], 'verbose_name_plural': 'sensor data'},
),
migrations.AlterModelOptions(
name='station',
options={'ordering': ['id', 'station_name']},
),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 15:19:45 2019
@author: matthew
This script is written to replicate the function fft_meanspec written by Adam
booth in matlab.
as of 9/16, I'm still trying to figure out how to get this code working. When
I ran it on the Non-landslide DEM it took FOREVER to evaluate and looks like
there are some issues, for isntance it has significantly lower power in the non landslide terrane
"""
def fft_mean_spec(DEM, w, dx, normalize, plots):
import math
import numpy as np
from Hann2d import Hann2D
import progress
import matplotlib.pyplot as plt
from scipy import signal
from detrend_tp import detrend_tp
DEM[DEM == -9999.0] = np.nan
[nrows, ncols] = np.shape(DEM) # find the dimension of the DEM
center = int(w/2)+1 # center of moving window
tpow = 2**(math.ceil(math.log(w)/math.log(2))) #how much zero padding
Pmat = np.zeros([tpow, tpow]) # intialize the outputgrid
#calculate frequency bin size (frequency goes from zero to nyquist) as
#defined by 1/(2*dz) in tpow/2
df = 1/(dx*tpow)
# create a matrix of radial frequencies
xc = tpow/2
xc = int(xc)
yc = (xc)
x = np.array(list(range(int(tpow))))
y = np.array(list(range(int(tpow))))
#An annoying extra step required because of different
#python indexing
# x = np.delete(x,(0),axis = 0)
# y = np.delete(y,(0),axis = 0)
[cols, rows] = np.meshgrid(x, y)
# %column and row indices
#%Frequency matrix. Note that since fmat contains an even number of rows
#%and columns, xc and yc are rounded up in row and column indices (shifted
#%down and to the right). The first row and first column therefore contain
#%the Nyquist frequency in the x- and y-directions, while the last row and
#%column stop at one bin (df) below the Nyquist:
fmat = ((df*(rows-yc))**2 +\
(df*(cols-xc))**2)**(0.5) #raise to exponent.
#Do a #2D fft in a moving window of size w x w, summing on the go
# bar = Bar("Processing outer FFT loop", max = (nrows-center+1))
# for the test
# m = 24
# n = 24
counter = 0
for m in range(center,(nrows-center+1)):
progress.progress(m,(nrows-center+1 - center),'Doing long job')
for n in range(center,(ncols-center+1)):
# This next step creates problems, when I go between matlab and python
# previous to this step all goes well, so I'm not entirely sure
# what the issue is.
win = DEM[(m-center+1):(m+center-1),\
(n-center+1):(n+center-1)]
if np.sum(np.isnan(win)) == 0:
counter = counter + 1
win = signal.detrend(win, type = 'linear')
# win = detrend_tp(win)
# %(Optional) Normalize so data has unit variance:
if normalize == 1:
win = win/np.std(win)
#Variance of the detrended patch
win_var = np.var(win)
#window with Hann Raised cosine windo
win,_ = Hann2D(win)
#################################### FFT 2d ####################################
win = np.fft.fftshift(np.fft.fft2(win,[tpow, tpow]))
#calculate the Discrete fourier periodogram Ampl^2
win = win*np.conj(win)/(tpow**4)
win = win.real #necessary b/c otherwise pythong doesn't drop the imaginary
#%Set power to zero at the zero frequency (DC). After windowing
#%the data, its mean may no longer be zero, so this ensures that
#%the first-order trend is removed:
win[xc,yc]= 0
# %Normalize so that the sum of the periodogram equals the
# %variance of the detrended local patch. This corrects for the
# %reduction in variance caused by the windowing function:
win = win_var*win/np.sum(win)
# Sum up Pout each time through loop for averaging later:
Pmat = Pmat + win
#%Generate sorted freqency and power vectors. Note: these vectors
#%are redundant and could be reduced in size by half, but as coded below
#%they sum to the variance of the original data:
#divide by the total number of times through the loop to get mean
Pmat = Pmat/counter
Pvec = np.reshape(Pmat, tpow*tpow,1)
fvec = np.reshape(fmat,tpow*tpow,1)
fp = np.column_stack([fvec,Pvec])
fp = fp[fp[:,0].argsort(),]
fvec = fp[:,0]
Pvec = fp[:,1]
# Pvec = np.sort(Pvec)
# fvec = np.sort(fvec)
# Pvec = Pvec[::-1] #reverse the order of Pvec
# fp = np.column_stack([fvec,Pvec])
# fvec = fp[:,0]
# Pvec = fp[:,1]
plt.figure(1)
plt.imshow(np.log(Pmat))
plt.figure(2)
plt.loglog(fvec,Pvec,'.')
return (Pmat, Pvec, fvec, fmat)
|
""" SciKitOpt's Bayesian Optimization implementation from https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.html """
from __future__ import print_function
from collections import OrderedDict
import numpy as np
try:
from skopt import gp_minimize
from kernel_tuner import util
bayes_opt_present = True
except Exception:
BayesianOptimization = None
bayes_opt_present = False
from kernel_tuner.strategies import minimize
supported_methods = ["poi", "ei", "ucb", "gp_hedge"]
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
if not bayes_opt_present:
raise ImportError("Error: optional dependency Bayesian Optimization not installed")
init_points = tuning_options.strategy_options.get("popsize", 20)
n_iter = tuning_options.strategy_options.get("max_fevals", 100)
#defaults as used by Scikit Python package
acq = tuning_options.strategy_options.get("method", "gp_hedge")
tuning_options["scaling"] = True
results = []
counter = []
#function to pass to the optimizer
def func(args):
counter.append(1)
if len(counter) % 50 == 0:
print(len(counter), flush=True)
val = minimize._cost_func(args, kernel_options, tuning_options, runner, results)
return val
bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)
res = gp_minimize(func, bounds, acq_func=acq, n_calls=n_iter, n_initial_points=init_points, n_jobs=-1)
if tuning_options.verbose:
print(res)
return results, runner.dev.get_environment()
|
from selenium import webdriver
import time
import random
"""driver=webdriver.Firefox()
driver.get("https://www.baidu.com/")
time.sleep(5)
f=driver.current_window_handle
driver.get("https://blog.csdn.net/u014801403/article/details/79085085")
time.sleep(3)
all=driver.window_handles
for i in all:
if not i==f:
driver.switch_to(i)
m=driver.current_window_handle
s=driver.title
print(s)"""
driver=webdriver.Firefox()
driver.get("https://www.baidu.com/")
driver.find_element_by_id("kw").send_keys(u"上海")
driver.find_element_by_id("su").click()
time.sleep(5)
m=driver.find_elements_by_xpath("//h3/a")
x=random.randint(0,8)
z=0
#t=m[x].get_attribute("href")
m[x].click()
#print(t)
#driver.get(t)
time.sleep(3)
"""for i in m:
print (i.get_attribute("href"))
z+=1
print(z)"""
driver.quit() |
import random
class Card :
def __init__(self, typeCard, mp, detail):
self.typeCard = typeCard
self.mp = mp
self.detail = detail
def show(self) :
print ("[{}] Mp {} [ Detail : {} ]".format(self.typeCard, self.mp, self.detail))
class Deck :
def __init__(self):
self.cards = []
self.build()
def build(self) :
self.cards.append(Card('At card', 0 ,'At+1'))
self.cards.append(Card('At card', 1 ,'At+2'))
self.cards.append(Card('At card', 0 ,'At+1'))
self.cards.append(Card('AD card', 0 ,'Hp+3'))
self.cards.append(Card('AD card', 0 ,'Mp+2'))
self.cards.append(Card('AD card', 0 ,'Mp+1'))
self.cards.append(Card('Df card', 0 ,'Shield+2'))
self.cards.append(Card('Df card', 0 ,'Shield+1'))
self.cards.append(Card('Df card', 1 ,'Shield+3'))
self.cards.append(Card('AD card', 2 ,'Hp+5'))
self.cards.append(Card('Df card', 2 ,'Shield+4'))
self.cards.append(Card('At card', 2 ,'At+3'))
def show(self) :
for c in self.cards :
c.show()
def shuffle(self) :
for i in range(len(self.cards)) :
r = random.randint(0,i)
self.cards[i], self.cards[r] = self.cards[r], self.cards[i]
def drawCard(self) :
if self.cards == [] :
self.build()
return self.cards.pop()
|
#raices
import cmath
num=float(input('escribe el número '))
num_sqrt=cmath.sqrt(num)
print('la raíz de {0} es: {1}. parte entera: {2} Parte imaginaria: {3}'.format(num, num_sqrt,num_sqrt.real,num_sqrt.imag))
|
# coding=UTF-8
__author__ = 'zhengandy'
# import MySQLdb
import os
import xlrd
import sys
import re
import hashlib
import simplejson
import time
from PreCondition import cfgValue
import pymysql
reload(sys)
sys.setdefaultencoding('utf-8') # @UndefinedVariable
def get_md5_value(src):
'''
It will used for getting MD5 value for string.
Almost it will used for login.
'''
myMd5 = hashlib.md5()
myMd5.update(src)
myMd5_Digest = myMd5.hexdigest()
return myMd5_Digest
def Json2Dict(json):
'''
This function is for the define for Json format transfer to Dictionary format.
It will using for the GetInfo result transfer almost.
'''
dictinfo = simplejson.loads(json)
return dictinfo
class dbOperation:
def __init__(self):
self.host = cfgValue.dbHOST
self.user = cfgValue.dbUSER
self.psw = cfgValue.dbPASSWD
self.dbname = cfgValue.dbName
self.port = cfgValue.dbPORT
self.btool = cfgValue.btool
self.rtool = cfgValue.rtool
def BackupDB(self,target):
print 'Start to backup'
command = '%s -h%s -u%s -p%s %s > %s' % (self.btool, self.host, self.user, self.psw, self.dbname, target)
#print command
try:
os.system(command)
#print 'Success'
except Exception , e :
#print 'Fail'
print e
def RestoreDB(self,source):
print 'Start to restore sql'
command = '%s -h%s -u%s -p%s -P3306 %s < %s' % (self.rtool, self.host, self.user, self.psw, self.dbname, source)
#print command
try:
os.system(command)
#print 'Success'
except Exception , e :
#print 'Fail'
print e
def execSqlCommand(self,sql):
try:
conn = pymysql.connect(host=self.host,user=self.user,passwd=self.psw,db=self.dbname)#host='127.0.0.1', user='root', passwd="123456", db='xw')
# conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd=None, db='mysql')
cur = conn.cursor()
cur.execute(sql)
r = cur.fetchall()
cur.close()
conn.close()
return r
except Exception,e:
print 'Mysql Error %d: %s' % (e.args[0], e.args[1])
#
# def execSqlCommand(self,sql):
# try:
# conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.psw,port=int(self.port),db=self.dbname)
# cur=conn.cursor()
# cur.execute(sql)
# result=cur.fetchall()
# conn.commit()
# cur.close()
# conn.close()
# return result
# except MySQLdb.Error,e:
# print 'Mysql Error %d: %s' % (e.args[0], e.args[1])
class parseExcelData:
def __init__(self,casefile,sheetName):
self.excelFile = casefile
print self.excelFile
self.sheetName = sheetName
def getCases(self):
'''
Get the test data from execl sheet which named TestCase.
And the folder is named common.
Excel file name is TestCase.xlsx.
'''
try:
data=xlrd.open_workbook(self.excelFile)
except Exception,e:
print e
table = data.sheet_by_name(self.sheetName)
rows = table.nrows
#print rows
List=[]
for i in xrange(1,rows):
colName = table.row_values(i)
List.append(colName)
return List
class regCheck:
def __init__(self,patt,data):
self.patt = patt
self.data = data
def getDict(self):
try:
reg2 = re.compile(self.patt)
reg2Match = reg2.match(self.data)
ldict = reg2Match.groupdict()
#print ldict
return ldict
except Exception, e:
print e
def reString(self):
try:
val = re.findall(self.patt,self.data)
#print val
return val
except:
return False
def send_the_Mail(fileTosend, mailto):
"""
This function takes in recipient and will send the email to that email address with an attachment.
:param recipient: the email of the person to get the text file attachment
"""
# Import the needed email libraries
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
time_strf = time.strftime("%Y-%m-%d %X", time.localtime())
# Set the server and the message details
send_from = 'ceshi@echiele.com'
subject = "TestReport For XW API %s." % time_strf
# Create the multipart
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = send_from
msg['To'] = ",".join(mailto) # recipient
# msg preable for those that do not have a email reader
msg.preamble = 'Multipart message.\n'
# Text part of the message
part = MIMEText("Dear Receiver,\n\nThis is the latest XW API test report,and it is an automated sent email. \nNo need to reply... it won't be answered anyway.\nAny issue please contact with the sender, \n\nThanks!")
msg.attach(part)
# The attachment part of the message
fp = open("%s" % fileTosend, "rb")
part = MIMEApplication(fp.read())
fp.close()
part.add_header('Content-Disposition', 'attachment', filename="%s" % fileTosend)
msg.attach(part)
# Create an instance of a SMTP server
sp = SMTP()
sp.connect('smtp.exmail.qq.com')
# Start the server
sp.set_debuglevel(1)
# sp.ehlo()
sp.starttls()
sp.login('ceshi@echiele.com', 'cs123456')
# Send the email
sp.sendmail(msg['From'], mailto, msg.as_string())
sp.quit()
def FilePath(filename): # 指明被遍历的文件夹
cudir = os.path.dirname(os.path.abspath(__file__))
rootdir=os.path.dirname(cudir)
for parent,dirnames,filenames in os.walk(rootdir): #三个参数:分别返回1.父目录 2.所有文件夹名字(不含路径) 3.所有文件名字
# for dirname in dirnames: #输出文件夹信息
# print "parent is: " + parent
# print "dirname is " + dirname
for each in filenames:
if filename == each: #输出文件信息
# print "parent is: " + parent
# print "filename is: " + filename
print "the full name of the file is: " + os.path.join(parent,filename) #输出文件路径信息
return os.path.join(parent,filename)
# if __name__ == '__main__':
#
# # conn = pymysql.connect(host='127.0.0.1', user='root', passwd="123456", db='xw')
# # # conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd=None, db='mysql')
# # cur = conn.cursor()
# # cur.execute("SELECT price FROM price where id = 1")
# # # print cur.description
# # r = cur.fetchall()
# # # print r
# # # ...or...
# # #for r in cur:
# # print r
# #
# # cur.close()
# # conn.close() |
"""empty message
Revision ID: 783a4b75539d
Revises: 8e9a1fd625aa
Create Date: 2020-09-12 15:39:01.370992
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '783a4b75539d'
down_revision = '8e9a1fd625aa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#外部ファイルから質問リストを読み込み辞書に保存するプログラム
import math
import sys
from janome.tokenizer import Tokenizer
import rospy
from std_msgs.msg import String
#text = String()
t = Tokenizer()
qa_dict = {}
def get_Cos_up(v1, v2):
sum=0
for word in v1:
if word in v2:
sum += 1
return sum
def get_Cos_under(list):
return math.sqrt(len(list))
def get_Cos_sim(v1, v2):
return float(get_Cos_up(v1, v2)/get_Cos_under(v1)*get_Cos_under(v2))
def get_Surface(words):
surface=[]
for word in words:
surface.append(word.surface)
return surface
def callback(data):
text = data.data
print(text)
text = text.decode('utf-8')
print(text)
word_surface = get_Surface(t.tokenize(text)) #自分で打ち込んだ形態素解析された質問文
max=0
answer=0
for q, a in qa_dict.items():
q_surfaces = get_Surface(t.tokenize(q))
score = get_Cos_sim(word_surface, q_surfaces)
if (score > max):
max = score
answer = a
if answer is not 0:
print answer
else:
print '答えは見つかりません'
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('chatter', String, callback)
rospy.spin()
if __name__ == '__main__':
with open('./dic.txt', 'r') as f: #pythonでのファイルオープン
qa_list = f.readlines()
for qa in qa_list:
qa = qa.rstrip().decode('utf-8').split(',') #改行コードを削除し,デコードした後カンマで質問と解答に区切る
qa_dict[qa[0]] = qa[1] #辞書に質問をキー,解答を値として保存
listener()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:17:24 2018
@author: user
共同科目
"""
x=set()
y=set()
print("Enter group X's subjects:")
while True:
a=input()
if a == "end":
break
else:
x.add(a)
print("Enter group Y's subjects:")
while True:
a=input()
if a == "end":
break
else:
y.add(a)
z1=list(x|y)
z2=list(x&y)
z3=list(y-x)
z4=list((x|y)-(x&y))
z1.sort()
z2.sort()
z3.sort()
z4.sort()
print(z1)
print(z2)
print(z3)
print(z4) |
class Plant:
def __init__(self, name, type, actiontype, date, time):
"""Fields of a model Plant."""
self.name = name
self.type = type
self.actiontype = actiontype
self.date = date
self.time = time
|
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
rating = pd.read_csv('data/ratings.csv')
architect = pd.read_csv('data/architects.csv')
user = pd.read_csv('data/users.csv');
architect_rating = pd.merge(rating, architect, on='architect_id')
cols = ['Registration', 'Country', 'Address 2', 'Address 3', 'Company', 'WorkPhone', 'City', 'State', 'Postcode', 'Member Type']
architect_rating.drop(cols, axis=1, inplace=True)
architect_rating.head()
rating_count = (architect_rating.
groupby(by = ['architect_id'])['rating'].
count().
reset_index().
rename(columns = {'rating': 'rating_count'})
)
rating_count.head()
threshold = 5
rating_count = rating_count.query('rating_count >= @threshold')
user_rating = pd.merge(rating_count, architect_rating, left_on='architect_id', right_on='architect_id', how='left')
user_count = (user_rating.
groupby(by = ['user_id'])['rating'].
count().
reset_index().
rename(columns = {'rating': 'rating_count'})
[['user_id', 'rating_count']]
)
threshold = 5
user_count = user_count.query('rating_count >= @threshold')
combined = user_rating.merge(user_count, left_on='user_id', right_on='user_id', how='inner')
print('Number of unique architects: ', combined['architect_id'].nunique())
print('Number of unique users: ', combined['user_id'].nunique())
scaler = MinMaxScaler()
combined['rating'] = combined['rating'].values.astype(float)
rating_scaled = pd.DataFrame(scaler.fit_transform(combined['rating'].values.reshape(-1,1)))
combined['rating'] = rating_scaled
combined.head()
combined = combined.drop_duplicates(['user_id', 'architect_id'])
user_architect_matrix = combined.pivot(index='user_id', columns='architect_id', values='rating')
user_architect_matrix.fillna(0, inplace=True)
users = user_architect_matrix.index.tolist()
architects = user_architect_matrix.columns.tolist()
#df.as_matrix() deprecated as of v0.23.0 using df.values
user_architect_matrix = user_architect_matrix.values
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
num_input = combined['architect_id'].nunique()
num_hidden_1 = 10
num_hidden_2 = 5
X = tf.placeholder(tf.float64, [None, num_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1], dtype=tf.float64)),
'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2], dtype=tf.float64)),
'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1], dtype=tf.float64)),
'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input], dtype=tf.float64)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2], dtype=tf.float64)),
'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
'decoder_b2': tf.Variable(tf.random_normal([num_input], dtype=tf.float64)),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
return layer_2
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
return layer_2
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
y_pred = decoder_op
y_true = X
loss = tf.losses.mean_squared_error(y_true, y_pred)
optimizer = tf.train.RMSPropOptimizer(0.03).minimize(loss)
eval_x = tf.placeholder(tf.int32, )
eval_y = tf.placeholder(tf.int32, )
pre, pre_op = tf.metrics.precision(labels=eval_x, predictions=eval_y)
init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()
pred_data = pd.DataFrame()
print(pred_data)
with tf.Session() as session:
epochs = 50
batch_size = 10
session.run(init)
session.run(local_init)
num_batches = int(user_architect_matrix.shape[0] / batch_size)
print(num_batches)
user_architect_matrix = np.array_split(user_architect_matrix, num_batches)
for i in range (epochs):
avg_cost = 0
for batch in user_architect_matrix:
_, l = session.run([optimizer, loss], feed_dict = {X: batch})
avg_cost += 1
avg_cost /= num_batches
print("epoch: {} Loss: {}".format(i+1, avg_cost))
user_architect_matrix = np.concatenate(user_architect_matrix, axis=0)
preds = session.run(decoder_op, feed_dict = {X: user_architect_matrix})
pred_data = pred_data.append(pd.DataFrame(preds))
pred_data = pred_data.stack().reset_index(name='rating')
pred_data.rename(columns = {'level_0': 'user_id', 'level_1': 'architect_id'}, inplace=True)
pred_data['user_id'] = pred_data['user_id'].map(lambda value: users[value])
pred_data['architect_id'] = pred_data['architect_id'].map(lambda value: architects[value])
keys = ['user_id', 'architect_id']
index_1 = pred_data.set_index(keys).index
index_2 = combined.set_index(keys).index
top_ten_ranked = pred_data[~index_1.isin(index_2)]
top_ten_ranked = top_ten_ranked.sort_values(['user_id', 'rating'], ascending=[True, False])
top_ten_ranked = top_ten_ranked.groupby('user_id').head(10)
print(top_ten_ranked.loc[top_ten_ranked['user_id'] == 5])
print(rating.loc[rating['user_id'] == 5].sort_values(by=['rating'], ascending=False))
|
# Чтобы написать тест, мы должны определить функцию, имя которой начинается на test_
# после этого мы используем ключевое слово assert, которое проверят, является ли истинным значение сразу за ним
def test_something():
assert True
def test_equal_string():
greetings = "Hello, " + "world"
assert greetings == "Hello, world"
def test_numbers():
total = 73 + 42
assert total == 115
# После этого мы запускаем код с помощью pytest из консоли
# >> pytest basic_test.py
# ============================= test session starts ==============================
# collected 3 items
# basic_test.py ... [100%]
# =========================== 3 passed in 0.03 seconds =========================== |
from glob import glob
from os.path import join
from pyrosetta import *
from pyrosetta.rosetta.core.simple_metrics.metrics import TotalEnergyMetric, InteractionEnergyMetric
from pyrosetta.rosetta.core.simple_metrics.per_residue_metrics import PerResidueEnergyMetric
from pyrosetta.rosetta.core.select.residue_selector import ChainSelector, ResidueIndexSelector
def parse_args():
info = "Design a protease around a peptide sequence"
parser = argparse.ArgumentParser(description=info)
parser.add_argument("-d", "--directory", required=True,
help="Pick a folder to analyze")
parser.add_argument("-ref", "--reference", required=True,
help="Pick a PDB file to compare against")
args = parser.parse_args()
return args
args = parse_args()
init()
pdbs = glob(join(args.directory, '*.pdb'))
pose = pose_from_pdb(args.reference)
sf = get_fa_scorefxn()
prem = PerResidueEnergyMetric()
prem.set_scorefunction(sf)
tem = TotalEnergyMetric()
tem.set_scorefunction(sf)
results = []
for p in pdbs:
pp = pose_from_pdb(p)
results.append([p, tem.calculate(pp), prem.calculate(pp)])
s=prem.calculate(pose)
with open('res_results.txt','w') as w:
for r in results:
res_scores = []
for i in range(1,116):
res_scores.append(r[2][i])
ol = [r[0], r[1]] + res_scores
w.write(','.join([str(x) for x in ol])+'\n')
csb = ChainSelector('B')
with open('res_interactions.csv','w') as w:
for p in pdbs:
pp = pose_from_pdb(p)
tot_e = tem.calculate(pp)
this_res = [p, tot_e]
for i in range(1,115):
ris = ResidueIndexSelector(str(i))
inte = InteractionEnergyMetric()
inte.set_scorefunction(sf)
inte.set_residue_selectors(ris, csb)
this_res.append(inte.calculate(pp))
w.write(','.join([str(x) for x in this_res])+'\n') |
import torch
import torch.nn as nn
class SetConvLayer(torch.nn.Module):
def __init__(self, cfg, in_dim, out_dim):
super(SetConvLayer, self).__init__()
self.cfg = cfg
self.fc = nn.Linear(in_dim, out_dim, bias=True)
self.w = nn.Parameter(torch.ones(in_dim, out_dim), requires_grad=True)
# initialize the weight matrix
# since we are going to use ReLU as the activation function, we utilize kaiming initialization
torch.nn.init.kaiming_uniform_(self.fc.weight, nonlinearity='relu')
torch.nn.init.zeros_(self.fc.bias)
def forward(self, x, feature):
# x - (N, I)
n, i = x.shape
weight1 = self.fc(x) # (N, O)
# compute khatri_rao product
n, o = weight1.shape
weight1 = weight1.unsqueeze(1).permute(2, 0, 1)
weight2 = torch.softmax(self.w, dim=0)
weight2 = weight2.unsqueeze(-1).permute(1, 2, 0)
w = torch.bmm(weight1, weight2).permute(1, 2, 0)
out = torch.sum(w * feature.unsqueeze(-1), (0, 1)).view(1, -1)
out /= n
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.