text
stringlengths 8
6.05M
|
|---|
import sys
from datetime import datetime
# Shared data
# keg data dictionary
# value is list( volume in liters, empty weight in kg )
keg_data = {
'half_bbl': (58.6, 13.6),
'tall_qtr_bbl': (29.3, 10),
'short_qtr_bbl': (29.3, 10),
'sixth_bbl': (19.5, 7.5),
'corny': (18.9, 4),
}
# CO2 tank data dictionary
# data from https://www.cryofx.com/media-NEW/co2-tanks/20-Lb-Co2-Tank-Dimensions.jpg.jpg
# value is list( contents net weight in kg, tare weight in kg )
co2_data = {
"two_and_a_half_pound_aluminum": (1.133, 2.268),
"two_and_a_half_pound_steel": (1.133, 3.629),
"five_pound_aluminum": (2.268, 3.629),
"five_pound_steel": (2.268, 4.536),
"ten_pound_aluminum": (4.536, 6.804),
"ten_pound_steel": (4.536, 10.433),
"fifteen_pound_aluminum": (6.804, 8.165),
"fifteen_pound_steel": (6.804, 13.608),
"twenty_pound_aluminum": (9.072, 11.340),
"twenty_pound_steel": (9.072, 14.288),
"thirty_five_pound_aluminum": (15.876, 16.783),
"thirty_five_pound_steel": (15.876, 24.948),
"fifty_pound_aluminum": (22.680, 22.226),
"fifty_pound_steel": (22.680, 40.823),
"seventy_five_pound_steel": (34.019, 62.596),
"one_hundred_pound_steel": (45.359, 86.183),
}
# Breakout board port data
# Value is list( pd_sck, dout )
# DEPRECATED: do not use
breakout_ports = {
'1': (6, 5),
'2': (13, 12),
'3': (27, 17),
'4': (25, 22),
}
# Helper functions
def debug_msg(c, message):
# TODO: drop this entirely, and use logging.Logger() instead
# replace this function with a logging handle helper like get_logger()
if c.debug:
print("%s %s::%s: %s" % (datetime.now().isoformat(' '), c.__class__.__name__, sys._getframe(1).f_code.co_name, message))
def as_degC(temp):
return u'%s\u00b0C' % '{0:.1f}'.format(float(temp) / 1000.0)
def as_degF(temp):
real_c = float(temp) / 1000.0
deg_f = real_c * (9.0/5.0) + 32.0
return u'%s\u00b0F' % '{0:.1f}'.format(deg_f)
def as_kg(val):
return "%s kg" % "{0:.2f}".format(val / 1000.0)
def as_pint(val):
return '%s pt.' % int(val / 473)
def format_weight(val, mode, tare=None, cap=None):
if mode == None:
mode = 'as_kg_gross'
if mode == 'as_kg_gross':
return as_kg(val)
elif mode == 'as_kg_net':
if tare == None:
raise ValueError('tare must not be None when using as_kg_net')
else:
return as_kg(val - tare)
elif mode == 'as_pint':
if tare == None:
raise ValueError('tare must not be None when using as_pint')
else:
return as_pint(val - tare)
elif mode == 'as_pct':
if tare == None:
raise ValueError('tare must not be None when using as_pct')
elif max == None:
raise ValueError('max must not be None when using as_pct')
else:
return "%s%%" % int(((val - tare) / cap) * 100)
else:
raise ValueError('bad mode %s' % mode)
def fill_bar_color(percent):
if percent > 0.5:
return "green"
if 0.5 > percent > 0.2:
return "yellow"
if 0.2 > percent:
return "red"
# default in case something breaks
return "gray"
def get_keg_fill_percent(w, cap, tare):
keg_cap = cap * 1000
keg_tare = tare * 1000
net_w = max((w - keg_tare), 0)
fill_percent = net_w / keg_cap
return fill_percent
def get_index_from_port(port, hx_list):
try:
ports = breakout_ports[port]
except KeyError:
return None
index = None
for conf in hx_list:
if conf.get('pd_sck', None) == ports[0] and conf.get('dout', None) == ports[1]:
index = hx_list.index(conf)
return index
def get_port_from_index(index, hx_list):
try:
conf = hx_list[index]
except IndexError:
return None
port = None
for port_key in breakout_ports.keys():
if conf.get('pd_sck', None) == breakout_ports[port_key][0] and conf.get('dout', None) == breakout_ports[port_key][1]:
port = port_key
return port
|
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('first', 'second', 'third', 'nickname')
nation_name = forms.CharField(max_length=100,)
|
#import glob
import os
#import sys
#import pandas
#import numpy
#import math
#import tensorflow as tf
#import matplotlib.pyplot as plt
class Config:
def __init__(self):
self.ROOT_PATH = os.path.abspath(__file__ + "/../../")
self.LIBRARY_PATH = os.path.join(self.ROOT_PATH, 'K3S')
self.DATA_PATH = os.path.join(self.ROOT_PATH, 'data')
self.DATA_PROCESSED_PATH = os.path.join(self.DATA_PATH, 'processed')
self.LOG_LOCATION = os.path.join(self.ROOT_PATH, 'temp', 'logs')
self.RUN_LOCATION = os.path.join(self.ROOT_PATH, 'temp', 'runs')
@staticmethod
def getDbUserConfig():
return {'userName': 'k3s_user', 'host':'localhost', 'password': 'b15m1llah'}
|
#!/usr/bin/env python
import mattermost
from threading import Lock
from flask import request
from flask import Flask, jsonify
app = Flask(__name__)
import configparser
import sys
import time
import random
import shelve
import datetime
from insults import list_of_insults
GAME_MUTEX = Lock()
# Read in the config
config = configparser.ConfigParser()
config.read('config.ini')
# Setup Mattermost connection
mm = mattermost.MMApi(config["mattermost"]["url"])
mm.login(bearer=config["mattermost"]["token"])
user = mm.get_user()
# Setup randomkick config
active_users_since = int(config["randomkick"]["active_users_since_minutes"])
# Setup duel config
duel_max_game_ticks = int(config["duel"]["max_game_tick"])
# Setup shelve storage
with shelve.open('stats') as db:
if 'russianroulette' not in db:
db['russianroulette'] = []
if 'randomkick' not in db:
db['randomkick'] = []
if 'duel' not in db:
db['duel'] = []
if 'insult' not in db:
db['insult'] = []
def eprint(msg):
print(msg, file=sys.stderr)
@app.route("/randomkick", methods=["POST"])
def randomkick():
# Make sure the bot is in the channel
channel = request.form["channel_id"]
channel_name = request.form["channel_name"]
try:
mm.add_user_to_channel(channel, user["id"])
except mattermost.ApiException:
return "I do not have permission to join this channel"
# Get all users that have posted recently
curr_millis = int(round(time.time() * 1000))
delay_millis = active_users_since * 60 * 1000
recent_posts = list(get_posts_for_channel(channel, curr_millis-delay_millis))
recent_users = set([x["user_id"] for x in recent_posts if x["user_id"] != user["id"]])
# Get all channel members
channel_members = set([x["user_id"] for x in mm.get_channel_members(channel)])
# Find the intersection
possible_victims = channel_members & recent_users
# Pick one
victim = mm.get_user(random.sample(possible_victims, 1)[0])
# Notify the channel
mm.create_post(channel, f"Goodbye @{victim['username']}, he was randomly kicked by @{request.form['user_name']}")
channel_name = request.form["channel_name"]
# Save stats
with shelve.open('stats', writeback=True) as db:
victim_name = victim['username']
kicker_name = request.form['user_name']
db['randomkick'].append({
"timestamp": datetime.datetime.now().isoformat(),
"channel_name": channel_name,
"kicker": kicker_name,
"victim": victim_name
})
# Kick them
mm.remove_user_from_channel(channel, victim["id"])
return f"You just killed @{victim['username']}, do you feel happy now?"
@app.route("/russianroulette", methods=["POST"])
def russianroulette():
# Make sure the bot is in the channel
channel = request.form["channel_id"]
channel_name = request.form["channel_name"]
try:
mm.add_user_to_channel(channel, user["id"])
except mattermost.ApiException:
return "I do not have permission to join this channel"
# 1/6 chance...
if random.randint(0,6) == 4:
message = f"BANG, @{request.form['user_name']} shot themselves."
# Kick the user
mm.remove_user_from_channel(channel, request.form["user_id"])
else:
message = "_click_"
# Save stats
with shelve.open('stats', writeback=True) as db:
player_name = request.form['user_name']
db['russianroulette'].append({
"timestamp": datetime.datetime.now().isoformat(),
"channel_name": channel_name,
"player": player_name,
"died": message != "_click_"
})
return jsonify({
"response_type": "in_channel",
"text": message
})
@app.route("/duel", methods=["POST"])
def duel():
# Get the channel, the user and the victim
channel = request.form['channel_id']
channel_name = request.form["channel_name"]
caller = request.form['user_id']
caller_name = request.form['user_name']
victim_name = request.form['text']
# Verify that there is an argument (the user to pass the bomb to)
if victim_name == '':
return "Use /duel (otheruser) to challenge another user to a game of russian roulette"
# Remove leading @
if victim_name[0] == "@":
victim_name = victim_name[1:]
# Try to find the user
try:
victim = mm.get_user_by_username(victim_name)
except mattermost.ApiException:
return f"Could not find the user '{victim_name}'"
# Make sure the bot is in the channel
channel = request.form["channel_id"]
try:
mm.add_user_to_channel(channel, user["id"])
except mattermost.ApiException:
return "I do not have permission to join this channel"
# Make sure the victim is in the channel
channel_members = set([x["user_id"] for x in mm.get_channel_members(channel)])
if victim['id'] not in channel_members:
return f"@{victim['username']} is not in this channel"
with GAME_MUTEX:
mm.create_post(channel, f"@{caller_name} challenges @{victim['username']} for a game of russian roulette")
# If it ducks like a quack
players = [{"username": caller_name, "id": caller}, victim]
game_tick = 0
time.sleep(3)
someone_died = False
while(not someone_died and game_tick < duel_max_game_ticks):
player = players[game_tick % 2]
# 1/6 chance...
if random.randint(0,5) == 0 or game_tick == 1:
mm.create_post(channel, f"@{player['username']} takes the gun... **BANG**!")
someone_died = True
else:
mm.create_post(channel, f"@{player['username']} takes the gun... _click_")
game_tick += 1
time.sleep(3)
mm.remove_user_from_channel(channel, player["id"])
# Save stats
with shelve.open('stats', writeback=True) as db:
db['duel'].append({
"timestamp": datetime.datetime.now().isoformat(),
"channel_name": channel_name,
"starter": caller_name,
"victim": victim_name,
"gameTick": game_tick
})
return "https://www.youtube.com/watch?v=h1PfrmCGFnk"
@app.route("/insult", methods=["POST"])
def insult():
# Verify that there is an argument
if request.form['text'] == '':
return "Use /insult (name) to insult another user"
insult = random.choice(list_of_insults)
insultee = request.form['text']
# Save stats
with shelve.open('stats', writeback=True) as db:
channel_name = request.form['channel_name']
insulter = request.form['user_name']
db['insult'].append({
"timestamp": datetime.datetime.now().isoformat(),
"channel_name": channel_name,
"insulter": insulter,
"insultee": insultee,
"insult": insult
})
return jsonify({
"response_type": "in_channel",
"text": f"{insultee}, {insult}"
})
@app.route("/stats", methods=["GET"])
def stats():
ret = {}
with shelve.open("stats") as db:
ret = dict(db)
return jsonify(ret)
# Based on the mattermost library, but that has no "since" argument
def get_posts_for_channel(channel_id, since):
data_page = mm._get("/v4/channels/"+channel_id+"/posts", params={"since":str(since)})
for order in data_page["order"]:
yield data_page["posts"][order]
|
def testData():
otest = open('./21/test.txt', 'r')
test = otest.readlines()
oanswer = open('./21/answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
foodlist = []
all_allergens = []
all_ings = []
all_recipes = []
definite = {}
#a list of numbers
for line in data:
print(line)
recipe = []
foods, allergens = line.strip().split(" (")
ings = foods.split(" ")
allergens = allergens.replace("contains ", '')
allergens = allergens.replace(")", '')
allergenlist = allergens.split(", ")
recipe = [ings, allergenlist]
all_recipes.append(recipe)
for a in allergenlist:
if a not in all_allergens:
all_allergens.append(a)
for i in ings:
if i not in all_ings:
all_ings.append(i)
print("Allergens and ingredients:")
print(all_allergens)
print(all_ings)
grid = [[True] * len(all_ings) for i in range(len(all_allergens))]
for recipe in all_recipes:
for i, alg in enumerate(all_allergens):
if alg in recipe[1]:
for j, ing in enumerate(all_ings):
if ing not in recipe[0]:
grid[i][j] = False
while len(definite) < len(all_allergens):
for i, row in enumerate(grid):
if row.count(True) == 1:
index = row.index(True)
definite[i] = index
for j, r in enumerate(grid):
grid[j][index] = False
to_remove = []
#Remove the trues from all_ings
for alg in definite:
a = definite[alg]
v = all_ings[a]
to_remove.append(v)
for ing in to_remove:
all_ings.remove(ing)
occurrences = 0
#Loop over all_ings to find the leftovers
for ing in all_ings:
for rec in all_recipes:
occurrences += rec[0].count(ing)
'''
Goal: Find the ingredient that is the only one for a specific allergen to be found in another row where the same allergen is one of several. Then we can eliminate this ingredient to belong to this allergen. If theere are rows where it's only a single ingredient, it's the one.
Save this find in a dict with the allergen as key. Remove this ingredient from all rows, even the own one.
In the end, collect the ingredients that are still on the rows and count how many times they appear and sum it.
'''
print(occurrences)
return occurrences
def match(recipe1, recipe2):
#Recipe is a list of ingredients
counter = 0
found = ''
#print(recipe1)
for ing in recipe1:
#print(ing)
if ing in recipe2:
found = ing
counter+=1
return counter, found
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('./21/input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
# a121_catch_a_turtle.py
#-----import statements-----
import turtle as trtl
import random
#-----game configuration----
t=trtl.Turtle()
score_writer=trtl.Turtle()
t.speed(0)
spot_color = "pink"
score=0
score_writer.speed(0)
#-----initialize turtle-----
t.shape("circle")
t.shapesize(2)
t.fillcolor("black")
score_writer.penup()
score_writer.goto(-380,290)
#-----game functions--------
def update_score():
global score
score+=1
print(score)
def spot_clicked(x,y):
xpos=random.randint(-400,400)
ypos=random.randint(-300,300)
t.penup()
t.goto(xpos,ypos)
update_score()
#-----events----------------
t.onclick(spot_clicked)
wn = trtl.Screen()
wn.mainloop()
|
from django.test import TestCase
from pulp import *
import random
from .models import Nb_creneaux
from .models import plne
class plne_test(TestCase):
def test_1(self):
n = random.randint(0,Nb_creneaux-1)
index = []
for i in range(Nb_creneaux):
index.append(i)
random.shuffle(index)
creneaux = [i for i in range(Nb_creneaux)]
pref= [[1 for i in range(n)] for j in range(Nb_creneaux)]
for k in range(n):
pref[index[k]][k] = 0
t=[(i,j) for i in range(n) for j in range(Nb_creneaux)]
sol = plne(creneaux,t,pref,n)
for i in range(n):
for j in range(Nb_creneaux):
if pref[j][i] == 1:
self.assertEqual(0 == sol[(i,j)].value(),True)
else:
self.assertEqual(1 == sol[(i,j)].value(),True)
|
# -*- coding: utf-8 -*-
"""Points models."""
from tour.database import Column, Model, db
categories = ['Park', 'Museum', 'Restaurant']
class Point(Model):
"""A point model of the app."""
__tablename__ = 'points'
id = db.Column(db.Integer, primary_key=True)
name = Column(db.String(80), nullable=False)
category = Column(db.String(80), nullable=False)
public = Column(db.Boolean(), default=False)
latitude = Column(db.String(15), nullable=False)
longitude = Column(db.String(15), nullable=False)
def __init__(self, name, category, public, latitude, longitude):
"""Create instance."""
db.Model.__init__(self, name=name, category=category)
# if category not in categories:
# self.category = '--'
# else:
# self.category = category[0]
self.category = category[0]
self.name = name[0]
self.public = public
self.latitude = latitude
self.longitude = longitude
def __repr__(self):
"""Represent instance as a unique string."""
return '<Point({name!r})>'.format(name=self.name)
|
import time
import random
# 带参数的装饰器
def get_exec_time(func):
def wrapper(a, b):
begin_time = time.time()
func(a, b)
end_time = time.time()
use_time = end_time-begin_time
print(use_time)
return wrapper
@get_exec_time
def func1(a, b):
sleep_time = random.randint(a, b)
print(sleep_time)
time.sleep(sleep_time)
func1(3, 4)
|
class SentenceAnalyser:
def __init__(self):
self.sentence={"Clauses":0 , "Complete sentences":0 , "Questions":0}
def __str__(self):
string = ""
for i in self.sentence:
string += i + ":" + str(self.sentence[i]) + '\n'
return string
def analyse_sentences(self, decoded_sequence):
decoded_sequence= decoded_sequence.replace(' ', '') # Cancels the spaces in order to recognize
list=[] # consequent punctuations that have just space between them
for i in decoded_sequence: # Puts all the elements of string in a list in order to mute it in following steps
list += i
i = 0
while i < len(list)-1: # This while loop searches for consequent punctuations and then takes only the first occurance of the sequence
if (list[i] in ['?', '.', ','] )and (list[i + 1] in ['?', '.', ',']) : # Because consequent punctuations doesn't represent more than one sentence
del(list[i+1])
else:
i +=1
for i in list: # This loop counts the occurrences of punctuations and stores their meaning in the dictionary.
if i=='?':
self.sentence['Questions'] +=1
elif i=='.':
self.sentence['Complete sentences'] +=1
elif i==',':
self.sentence['Clauses'] +=1
|
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2017-06-22
Prints a message like the following:
417929742755482295 rabbit pairs after 86 months with 1 pairs produced per litter from rabbits of age 2+ months and
rabbits dying after 18 months. Calculated in 0.000448942184448 seconds
"""
__author__ = 'johndibaggio'
import sys
import time
argv = list(sys.argv)
input_file = open(argv[1], 'r+')
output_file = open(argv[2], 'w+')
conditions = input_file.read().split(" ")
n = int(conditions[0])
k = 1
m = int(conditions[1])
memo = {}
def calc_rabbit_pairs(month_n, months_death, multiplier=1):
rabbits_next = calc_rabbit_pairs_linear_recurrence_dynamic(month_n, months_death, multiplier)
total_rabbits = 0
for rabbits_month_i in rabbits_next:
total_rabbits += rabbits_month_i
return total_rabbits
def calc_rabbit_pairs_linear_recurrence_dynamic(month_n, months_death, multiplier=1):
"""
Calculate number of rabbit pairs after month `month_n` with `multiplier` pairs produced per litter, using dynamic
programming (faster) - memorization, by storing answers computed for conditions and then reusing those answers for
the same conditions rather than recomputed them
:type month_n: int
:param month_n: nth month_n after which point we want to know the number of rabbit pairs
:type multiplier: int
:param multiplier: number of rabbit pairs produced per litter
:rtype: array
:return: array, numbers of pairs of rabbits of age 0 months, 1 month, ... n-1 months
"""
args = (month_n, multiplier)
if args in memo:
print('using data store')
return memo[args] # Use previously computed value for conditions
rabbits_current = [0] * months_death
# Compute value for new conditions
if month_n == 1: # new baby rabbit pair
rabbits_current[0] = 1
# print('current:')
# print(rabbits_current)
ans = rabbits_current
# elif month_n == 2: # new mature rabbit pair (will produce offspring in subsequent months)
# rabbits_current[1] = 1
# ans = rabbits_current
else:
rabbits_prev = calc_rabbit_pairs_linear_recurrence_dynamic(month_n-1, months_death, multiplier)
# print('\nprevious:')
# print(rabbits_prev)
rabbits_current[1] = rabbits_prev[0]
rabbits_current[0] += multiplier * rabbits_prev[months_death - 1] # produce offspring from oldest rabbits
for i in range(1, months_death - 1): # kill off oldest rabbits
rabbits_current[i + 1] = rabbits_prev[i] # age the mature rabbits by 1 month
rabbits_current[0] += multiplier * rabbits_current[i + 1] # produce offspring from mature rabbits
# print('current:')
# print(rabbits_current)
ans = rabbits_current
memo[args] = ans # Store the computed value for new conditions
return ans
time_start = time.time()
rabbit_pair_count = calc_rabbit_pairs(n, m, k)
time_end = time.time()
time_elapsed = time_end - time_start
print "{} rabbit pairs after {} months with {} pairs produced per litter from rabbits of age 2+ months and rabbits " \
"dying after {} months. Calculated in {} seconds".format(rabbit_pair_count, n, k, m, time_elapsed)
output_file.write(str(rabbit_pair_count))
output_file.close()
input_file.close()
|
# Removing the duplicate values in a sequence,
# but preserve the order of the remaining items.
# Solution for hashable sequence
def dedupe(items):
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
# Solution for unhashable types (such as dicts)
def dedupe(items, key=None):
seen = set()
for item in items:
if key is None:
val = item
else:
# the purpose of the key argument is to specify a
# function that converts sequence items into a
# hashable type for the purposes of duplicate detection.
val = key(item)
if val not in seen:
yield item
seen.add(val)
# Solution use itertools
from itertools import filterfalse
def dedupe(iterable, key=None):
'''List unique elements, preserving order.
Remember all elements ever seen.'''
seen = set()
if key is None:
for item in filterfalse(seen.__contains__, iterable):
seen.add(item)
yield item
else:
for item in iterable:
val = key(item)
if val not in seen:
yield item
seen.add(val)
a = [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 1, 'y': 2}, {'x': 2, 'y': 4}]
print(list(dedupe(a, key=lambda d: (d['x'], d['y']))))
print(list(dedupe(a, key=lambda d: d['x'])))
'''Discussion
The use of a generator function in this recipe reflects the fact
that you might want the function to be extremely general
purpose—not necessarily tied directly to list processing.
For example, if you want to read a file,
eliminating duplicate lines, you could simply do this:
with open(somefile,'r') as f:
for line in dedupe(f):
...
The specification of a key function mimics similar functionality
in built-in functions such as sorted() , min() , and max() .
'''
|
"""Write a function that takes as input an English sentence (a string) and prints the total
number of vowels and the total number of consonants in the sentence. The function
returns nothing. Note that the sentence could have special characters like dots, dashes,
and so on"""
import string
def eliminate_bad_characters(sentence):
bad_characters = string.whitespace + string.punctuation
new_sentence = ""
for i in sentence:
if i in bad_characters:
new_sentence += ""
else:
new_sentence += i
return new_sentence
def count_vowels(sentence):
vowels = 'aeiou'
vowel_count = 0
for i in sentence:
if i in vowels:
vowel_count += 1
return vowel_count
def count_consonants(sentence):
consonants = "bcdfghjklmnpqrstvwxyz"
consonant_count = 0
for i in sentence:
if i in consonants:
consonant_count += 1
return consonant_count
def count_consonant_and_vowels():
english_sentence = input("Enter a sentence ")
new_sentence = eliminate_bad_characters(english_sentence)
number_of_vowels = count_vowels(new_sentence)
number_of_consonants = count_consonants(new_sentence)
print("Number of vowels is: {}\nNumber of consonants is: {}\n".format(number_of_vowels, number_of_consonants))
def main():
count_consonant_and_vowels()
if __name__ == '__main__':
main()
|
"""
==============
Risk Observers
==============
This module contains tools for observing risk exposure during the simulation.
"""
from collections import Counter
from typing import Dict
import pandas as pd
from vivarium.framework.engine import Builder
from vivarium.framework.event import Event
from vivarium_public_health.metrics.utilities import get_age_bins, get_prevalent_cases, get_state_person_time
class CategoricalRiskObserver:
""" An observer for a categorical risk factor.
Observes category person time for a risk factor.
By default, this observer computes aggregate categorical person time
over the entire simulation. It can be configured to bin these into
age_groups, sexes, and years by setting the ``by_age``, ``by_sex``,
and ``by_year`` flags, respectively.
This component can also observe the number of simulants in each age
group who are alive and in each category of risk at the specified
sample date each year (the sample date defaults to July, 1, and can
be set in the configuration).
Here is an example configuration to change the sample date to Dec. 31:
.. code-block:: yaml
{risk_name}_observer:
sample_date:
month: 12
day: 31
"""
configuration_defaults = {
'metrics': {
'risk': {
'by_age': False,
'by_year': False,
'by_sex': False,
'sample_exposure': {
'sample': False,
'date': {
'month': 7,
'day': 1,
}
},
}
}
}
def __init__(self, risk: str):
"""
Parameters
----------
risk :
the type and name of a risk, specified as "type.name". Type is singular.
"""
self.risk = risk
self.configuration_defaults = {
'metrics': {
f'{self.risk}': CategoricalRiskObserver.configuration_defaults['metrics']['risk']
}
}
@property
def name(self):
return f'categorical_risk_observer.{self.risk}'
# noinspection PyAttributeOutsideInit
def setup(self, builder: Builder):
self.data = {}
self.config = builder.configuration[f'metrics'][f'{self.risk}']
self.clock = builder.time.clock()
self.categories = builder.data.load(f'risk_factor.{self.risk}.categories')
self.age_bins = get_age_bins(builder)
self.person_time = Counter()
self.sampled_exposure = Counter()
columns_required = ['alive']
if self.config.by_age:
columns_required += ['age']
if self.config.by_sex:
columns_required += ['sex']
self.population_view = builder.population.get_view(columns_required)
self.exposure = builder.value.get_value(f'{self.risk}.exposure')
builder.value.register_value_modifier('metrics', self.metrics)
builder.event.register_listener('time_step__prepare', self.on_time_step_prepare)
def on_time_step_prepare(self, event: Event):
pop = pd.concat([self.population_view.get(event.index), pd.Series(self.exposure(event.index), name=self.risk)],
axis=1)
for category in self.categories:
state_person_time_this_step = get_state_person_time(pop, self.config, self.risk, category,
self.clock().year, event.step_size, self.age_bins)
self.person_time.update(state_person_time_this_step)
if self._should_sample(event.time):
sampled_exposure = get_prevalent_cases(pop, self.config.to_dict(), self.risk, event.time, self.age_bins)
self.sampled_exposure.update(sampled_exposure)
def _should_sample(self, event_time: pd.Timestamp) -> bool:
"""Returns true if we should sample on this time step."""
should_sample = self.config.sample_exposure.sample
if should_sample:
sample_date = pd.Timestamp(year=event_time.year, **self.config.sample_prevalence.date.to_dict())
should_sample &= self.clock() <= sample_date < event_time
return should_sample
# noinspection PyUnusedLocal
def metrics(self, index: pd.Index, metrics: Dict) -> Dict:
metrics.update(self.person_time)
metrics.update(self.sampled_exposure)
return metrics
def __repr__(self):
return f"CategoricalRiskObserver({self.risk})"
|
# coding=utf-8
import os
import sys
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.preInit import preinit
# from webTest_pro.common.logger import logger
if __name__ == '__main__':
preinit()
|
"""empty message
Revision ID: fd0fb00bfd08
Revises: 19c4b18cd911
Create Date: 2019-05-29 00:41:00.880720
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fd0fb00bfd08'
down_revision = '19c4b18cd911'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Users_That_Day_db', sa.Column('F2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('F3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('M2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('M3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('S2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('S3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('Su2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('Su3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('T2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('T3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('Th2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('Th3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('W2', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('Users_That_Day_db', sa.Column('W3', postgresql.ARRAY(sa.Integer()), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersF1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersF2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersF3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersM1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersM2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersM3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersS1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersS2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersS3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersSu1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersSu2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersSu3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersT1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersT2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersT3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersTh1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersTh2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersTh3', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersW1', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersW2', sa.Integer(), nullable=True))
op.add_column('number_users_db', sa.Column('number_usersW3', sa.Integer(), nullable=True))
op.drop_column('number_users_db', 'number_usersSu')
op.drop_column('number_users_db', 'number_usersTh')
op.drop_column('number_users_db', 'number_usersS')
op.drop_column('number_users_db', 'number_usersM')
op.drop_column('number_users_db', 'number_usersF')
op.drop_column('number_users_db', 'number_usersW')
op.drop_column('number_users_db', 'number_usersT')
op.add_column('users', sa.Column('fifth', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('firstam', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('firstpm', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('forth', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('postcall', sa.Boolean(), nullable=True))
op.add_column('users', sa.Column('second', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('seventh', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('sixth', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('third', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'third')
op.drop_column('users', 'sixth')
op.drop_column('users', 'seventh')
op.drop_column('users', 'second')
op.drop_column('users', 'postcall')
op.drop_column('users', 'forth')
op.drop_column('users', 'firstpm')
op.drop_column('users', 'firstam')
op.drop_column('users', 'fifth')
op.add_column('number_users_db', sa.Column('number_usersT', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersW', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersF', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersM', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersS', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersTh', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('number_users_db', sa.Column('number_usersSu', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('number_users_db', 'number_usersW3')
op.drop_column('number_users_db', 'number_usersW2')
op.drop_column('number_users_db', 'number_usersW1')
op.drop_column('number_users_db', 'number_usersTh3')
op.drop_column('number_users_db', 'number_usersTh2')
op.drop_column('number_users_db', 'number_usersTh1')
op.drop_column('number_users_db', 'number_usersT3')
op.drop_column('number_users_db', 'number_usersT2')
op.drop_column('number_users_db', 'number_usersT1')
op.drop_column('number_users_db', 'number_usersSu3')
op.drop_column('number_users_db', 'number_usersSu2')
op.drop_column('number_users_db', 'number_usersSu1')
op.drop_column('number_users_db', 'number_usersS3')
op.drop_column('number_users_db', 'number_usersS2')
op.drop_column('number_users_db', 'number_usersS1')
op.drop_column('number_users_db', 'number_usersM3')
op.drop_column('number_users_db', 'number_usersM2')
op.drop_column('number_users_db', 'number_usersM1')
op.drop_column('number_users_db', 'number_usersF3')
op.drop_column('number_users_db', 'number_usersF2')
op.drop_column('number_users_db', 'number_usersF1')
op.drop_column('Users_That_Day_db', 'W3')
op.drop_column('Users_That_Day_db', 'W2')
op.drop_column('Users_That_Day_db', 'Th3')
op.drop_column('Users_That_Day_db', 'Th2')
op.drop_column('Users_That_Day_db', 'T3')
op.drop_column('Users_That_Day_db', 'T2')
op.drop_column('Users_That_Day_db', 'Su3')
op.drop_column('Users_That_Day_db', 'Su2')
op.drop_column('Users_That_Day_db', 'S3')
op.drop_column('Users_That_Day_db', 'S2')
op.drop_column('Users_That_Day_db', 'M3')
op.drop_column('Users_That_Day_db', 'M2')
op.drop_column('Users_That_Day_db', 'F3')
op.drop_column('Users_That_Day_db', 'F2')
# ### end Alembic commands ###
|
import unittest
from katas.kyu_7.count_the_ones import hamming_weight
class CountTheOnesTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(hamming_weight(10), 2)
def test_equal_2(self):
self.assertEqual(hamming_weight(21), 3)
def test_equal_3(self):
self.assertEqual(hamming_weight(7052), 7)
def test_equal_4(self):
self.assertEqual(hamming_weight(9667), 7)
def test_equal_5(self):
self.assertEqual(hamming_weight(738), 5)
def test_equal_6(self):
self.assertEqual(hamming_weight(476), 6)
def test_equal_7(self):
self.assertEqual(hamming_weight(870), 6)
def test_equal_8(self):
self.assertEqual(hamming_weight(9154), 6)
def test_equal_9(self):
self.assertEqual(hamming_weight(2095), 6)
def test_equal_10(self):
self.assertEqual(hamming_weight(1959), 8)
def test_equal_11(self):
self.assertEqual(hamming_weight(7293), 9)
def test_equal_12(self):
self.assertEqual(hamming_weight(7364), 6)
def test_equal_13(self):
self.assertEqual(hamming_weight(1835), 7)
def test_equal_14(self):
self.assertEqual(hamming_weight(1303), 6)
def test_equal_15(self):
self.assertEqual(hamming_weight(6192), 4)
def test_equal_16(self):
self.assertEqual(hamming_weight(1852), 7)
def test_equal_17(self):
self.assertEqual(hamming_weight(1489), 6)
|
from EmpNode import MyNode
class LinkedList(MyNode):
def __init__(self,head = None):
self.__head = head
def isEmpty(self):
return self.__head == None
def AddNew(self,data):
myNode = MyNode(data)
if self.isEmpty() == True:
self.__head = myNode
else:
current = self.__head
while current.getNext() != None:
current = current.getNext()
current.setNext(myNode)
def WeeklyWages(self,numhours):
|
import tensorflow as tf
import numpy as np
from dps.utils import Parameterized, Param
from dps.utils.tf import build_gradient_train_op
class Optimizer(Parameterized):
def __init__(self, agents):
self.agents = agents
def trainable_variables(self, for_opt):
return [v for agent in self.agents for v in agent.trainable_variables(for_opt=for_opt)]
class StochasticGradientDescent(Optimizer):
opt_steps_per_update = Param(1)
sub_batch_size = Param(0)
lr_schedule = Param()
max_grad_norm = Param(None)
noise_schedule = Param(None)
def __init__(self, agents, alg, **kwargs):
super(StochasticGradientDescent, self).__init__(agents)
self.alg = alg
def build_update(self, context):
tvars = self.trainable_variables(for_opt=True)
# `context.objective` is the quantity we want to maximize, but TF minimizes, so use negative.
self.train_op, train_recorded_values = build_gradient_train_op(
-context.objective, tvars, self.alg, self.lr_schedule, self.max_grad_norm,
self.noise_schedule)
context.add_recorded_values(train_recorded_values, train_only=True)
def update(self, n_rollouts, feed_dict, fetches):
sess = tf.get_default_session()
for epoch in range(self.opt_steps_per_update):
record = epoch == self.opt_steps_per_update-1
if not self.sub_batch_size:
_fetches = [self.train_op, fetches] if record else self.train_op
fetched = sess.run(_fetches, feed_dict=feed_dict)
else:
for is_final, fd in self.subsample_feed_dict(n_rollouts, feed_dict):
_fetches = [self.train_op, fetches] if (record and is_final) else self.train_op
fetched = sess.run(_fetches, feed_dict=fd)
return fetched[1]
def subsample_feed_dict(self, n_rollouts, feed_dict):
updates_per_epoch = int(np.floor(n_rollouts / self.sub_batch_size))
permutation = np.random.permutation(n_rollouts)
offset = 0
for i in range(updates_per_epoch):
indices = permutation[offset:offset+self.sub_batch_size]
fd = {}
for k, v in feed_dict.items():
if isinstance(v, np.ndarray):
fd[k] = v[:, indices, ...]
else:
fd[k] = v
yield i == updates_per_epoch-1, fd
offset += self.sub_batch_size
|
import numpy as np
import pandas as pd
from sklearn.svm import SVR
from sklearn.model_selection import KFold
import optuna
class SVRCV(object):
model_cls = SVR
def __init__(self, n_trials=100):
self.n_trials = n_trials
def fit(self, X, y):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
y = pd.DataFrame(y)
elif isinstance(X, pd.DataFrame):
X = X.reset_index(drop=True)
y = y.reset_index(drop=True)
self.X = X
self.y = y
study = optuna.create_study(direction='maximize')
study.optimize(self, n_trials=self.n_trials)
self.best_trial = study.best_trial
print()
print("Bset score:", round(self.best_trial.value, 2))
print("Best params:", self.best_trial.params)
print()
self.best_model = self.model_cls(**self.best_trial.params)
self.best_model.fit(self.X, self.y)
def predict(self, X):
if isinstance(X, pd.Series):
X = X.values.reshape(1, -1)
return self.best_model.predict(X)
def score(self, X, y):
return self.best_model.score(X, y)
def kfold_cv(self, model, splits=5):
scores = []
kf = KFold(n_splits=splits, shuffle=True)
for train_index, test_index in kf.split(self.X):
X_train, X_test = self.X.iloc[train_index], self.X.iloc[test_index]
y_train, y_test = self.y.iloc[train_index], self.y.iloc[test_index]
model.fit(X_train, y_train)
scores.append(model.score(X_test, y_test))
score = np.array(scores).mean()
return score
def __call__(self, trial):
kernel = trial.suggest_categorical('kernel', ['rbf', 'linear'])
C = trial.suggest_loguniform('C', 1e0, 1e2)
epsilon = trial.suggest_loguniform('epsilon', 1e-1, 1e1)
model = self.model_cls(kernel=kernel, C=C,
epsilon=epsilon, gamma='auto')
score = self.kfold_cv(model)
return score
|
#!/usr/bin/env python3
import logging
import os, re;
from datetime import datetime
from threading import Thread, Lock, Event
from urllib.request import urlopen
from urllib.parse import urljoin, urlsplit
from bs4 import BeautifulSoup as BS;
import signal
maxAttempt = 3;
outDir = '/Volumes/flood3/RSS'
bs4FMT = 'lxml'
urlBase = 'http://data.remss.com'
URLs = {'gmi' : urljoin( urlBase, '{}/bmaps_v{:04.1f}/' ),
'tmi' : urljoin( urlBase, '{}/bmaps_v{:04.1f}/' ),
'amsre' : urljoin( urlBase, '{}/bmaps_v{:02.0f}/' ),
'ssmi' : urljoin( urlBase, '{}/{}/bmaps_v{:02.0f}/' )}
EXT = '.gz'
log = logging.getLogger(__name__)
log.setLevel( logging.DEBUG )
log.addHandler( logging.StreamHandler() )
log.handlers[0].setLevel( logging.INFO )
log.handlers[0].setFormatter(
logging.Formatter( '[%(levelname)-4.4s] %(message)s' )
)
STOP = Event()
def sigHandler(*args, **kwargs):
STOP.set()
signal.signal( signal.SIGTERM, sigHandler )
signal.signal( signal.SIGINT, sigHandler )
class NLock( object ):
__n = 0
__nMax = 0
def __init__(self, nMax = 2):
self.__lock1 = Lock()
self.__lock2 = Lock()
self.nMax = nMax
@property
def n(self):
return self.__n
@property
def nMax(self):
return self.__nMax
@nMax.setter
def nMax(self, val):
if isinstance(val, int):
with self.__lock1:
self.__nMax = 1 if (val < 1) else val
def __enter__(self, *args, **kwargs):
self.acquire( *args, **kwargs )
def __exit__(self, *args, **kwargs):
self.release(*args, **kwargs)
def acquire(self, *args, **kwargs):
with self.__lock1:
self.__n += 1
check = self.__n >= self.__nMax
if check:
self.__lock2.acquire()
def release(self, *args, **kwargs):
with self.__lock1:
self.__n -= 1
if self.__lock2.locked():
self.__lock2.release()
class RemoteFile(Thread):
DAILY = False
DAY3 = False
WEEKLY = False
MONTHLY = False
DATE = None
RESP = None
SIZE = None
def __init__(self, URL):
super().__init__()
self.log = logging.getLogger(__name__)
self.URL = URL
self.file = URL.split('/')[-1]
self._filePath = None
fileInfo = self.file.split('_')
if (len(fileInfo) == 3) and ('d3d' in fileInfo[-1]): # Then is a 3-day file
self.DAY3 = True
elif ('weeks' in self.URL):
self.WEEKLY = True
self.date = self._parseDate( fileInfo[1].split('v')[0] )
def checkDate(self, **kwargs):
if not self.date: return False # If date not defined, return False
if isinstance(kwargs.get('start', None), datetime) and (self.date < kwargs['start']):
return False
if isinstance(kwargs.get('end', None), datetime) and (self.date >= kwargs['end']):
return False
status = []
if kwargs.get('daily', False):
status.append( self.DAILY )
if kwargs.get('day3', False):
status.append( self.DAY3 )
if kwargs.get('weekly', False):
status.append( self.WEEKLY )
if kwargs.get('monthly', False):
status.append( self.MONTHLY )
if len(status) == 0: status.append(True)
return any( status )
def localPath(self, outDir):
remotePath = urlsplit( self.URL ).path
localPath = remotePath[1:].replace('/', os.path.sep)
return os.path.join( outDir, localPath )
def download( self, outDir = None, filePath = None ):
if not outDir and not filePath:
raise Exception('Must enter output directory or file path')
elif not filePath:
self._filePath = self.localPath( outDir )
else:
self._filePath = filePath
self.start()
self.join()
def run(self):
if self.open():
if self.getSize() and not self._checkSize(self._filePath):
try:
data = self.RESP.read()
except:
self.error('Failed to download data: {}'.format(self.URL))
else:
with open(self._filePath, 'wb') as fid:
fid.write( data )
self.close()
def open(self):
if not self.RESP:
try:
self.RESP = urlopen( self.URL )
except:
self.log.error('Failed to open URL: {}'.format(self.URL))
return False
return True
def close(self):
if self.RESP:
try:
self.RESP.close()
except:
self.log.warning('Failed to close remote: {}'.format(self.URL))
return False
return True
def getSize(self):
if self.open():
try:
self.SIZE = int( self.RESP.getheader('Content-Length') )
except:
self.log.error('Failed to get remote file size: {}'.format(self.URL))
else:
return True
return False
def _checkSize( self, filePath ):
if os.path.isfile( filePath ):
localSize = os.stat( filePath ).st_size
if (localSize == self.SIZE):
log.info('Local file exists and is same size, skipping download: {}'.format(self.URL))
return True
else:
log.info('Local file exists but wrong size, will redownload: {}'.format(self.URL))
else:
os.makedirs( os.path.dirname(filePath), exist_ok = True )
log.info('Local file not exist, will download: {}'.format(self.URL))
return False
def _parseDate(self, date):
try:
date = datetime.strptime(date, '%Y%m') # Try to parse Year/month from string
except:
pass
else:
self.MONTHLY = True # On success, set MONTHLY flag
if isinstance(date, str): # If date is still string instance, then try to parse year/month/day
try:
date = datetime.strptime(date, '%Y%m%d') # Parse year/month/day
except:
log.error('Failed to parse date from URL: {}'.format(self.URL)) # Log issue
return None # Return None
self.DAILY = (not self.MONTHLY) and (not self.WEEKLY) and (not self.DAY3)
return date
def scraper( URL, ext = EXT, **kwargs ):
'''
Purpose:
Generator function to dive through all directories until
path with given extension is found. Function is recursive
will call itself into directories on remote, only yielding
value when path ends in extension
Inputs:
URL : URL to hunt for data files in
Keywords:
ext : File extension to look for
Various other keys for filtering by daily, 3-day, weekly,
and monthly files, and for start/end date filtering.
See full keyword names in ags list.
'''
log = logging.getLogger(__name__)
html = urlopen( URL ).read()
parse = BS(html, bs4FMT)
path = urlsplit( URL ).path # Path in input URL
for link in parse.find_all('a'): # Loop over all links
log.debug( link['href'] ) # Debug
if path in link['href']: # If path from input URL is in href path, then is file in current directory or directory; used to filter [To Parent] link
URL = urljoin( URL, link['href'] ) # Update URL with new path
log.debug( URL ) # Log url
if link.text.endswith( ext ): # If the URL ends with requested extension
remote = RemoteFile(URL) # Initialize RemoteFile object
if remote.checkDate(**kwargs): # If date checks out
yield remote # Yield RemoteFile object
else: # Else, assume is directory
yield from scraper( URL, ext = ext, **kwargs ) # Recursive call to generator to dive into directory
def downloadFiles(instrument, version, outDir, **kwargs):
log = logging.getLogger(__name__)
URL = URLs.get(instrument[0].lower(), None)
if not URL:
log.critical( 'Instrument not supported: {}'.format( instrument ) )
return False
lock = NLock( kwargs.pop('threads', 2) )
URL = URL.format( *instrument, version )
if isinstance( kwargs.get('start', None), str ):
kwargs['start'] = datetime.strptime(kwargs['start'], '%Y%m%d')
if isinstance( kwargs.get('end', None), str ):
kwargs['end'] = datetime.strptime(kwargs['end'], '%Y%m%d')
for remote in scraper( URL, **kwargs ):
with lock:
remote.download( outDir = outDir )
if STOP.is_set(): break
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('outdir', type=str, help='Output directory for downloaded data')
parser.add_argument('version', type=float, help='Data version to download')
parser.add_argument('instrument', type=str, nargs='+', help='Instrument to download data for. If multiple possible instruments, such as ssmi, enter the sensor name followed by the instrument nubmer; e.g. ssmi f08')
parser.add_argument('-t', '--threads', type=int, default=2, help='Number of simultaneous downloads to allow')
parser.add_argument('-s', '--start', type=str, help='ISO 8601 string specifying start date; e.g., 19980101. Start date is inclusive.')
parser.add_argument('-e', '--end', type=str, help='ISO 8601 string specifying end date; e.g., 19980102. End date is exclusive.')
parser.add_argument('-d', '--daily', action='store_true', help='Set to only download daily data')
parser.add_argument('-d3', '--day3', action='store_true', help='Set to only download 3-day data')
parser.add_argument('-w', '--weekly', action='store_true', help='Set to only download weekly data')
parser.add_argument('-m', '--monthly', action='store_true', help='Set to only download monthly data')
args = parser.parse_args()
downloadFiles( args.instrument, args.version, args.outdir,
threads = args.threads,
start = args.start,
end = args.end,
daily = args.daily,
day3 = args.day3,
weekly = args.weekly,
monthly = args.monthly)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import re
import time
from pathlib import Path
import pytest
from freezegun import freeze_time
from pants.base.build_environment import get_buildroot
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.goal.run_tracker import RunTracker
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.contextutil import environment_as
from pants.util.osutil import getuser
from pants.version import VERSION
@freeze_time(datetime.datetime(2020, 1, 1, 12, 0, 0), as_kwarg="frozen_time")
def test_run_tracker_timing_output(tmp_path: Path, **kwargs) -> None:
frozen_time = kwargs["frozen_time"]
buildroot = tmp_path.as_posix()
with environment_as(PANTS_BUILDROOT_OVERRIDE=buildroot):
ob = create_options_bootstrapper([])
run_tracker = RunTracker(ob.args, ob.bootstrap_options)
run_tracker.start(run_start_time=time.time(), specs=["::"])
frozen_time.tick(delta=datetime.timedelta(seconds=1))
run_tracker.end_run(PANTS_SUCCEEDED_EXIT_CODE)
timings = run_tracker.get_cumulative_timings()
assert timings[0]["label"] == "main"
assert timings[0]["timing"] == 1.0
@pytest.mark.parametrize(
"exit_code,expected",
[(PANTS_SUCCEEDED_EXIT_CODE, "SUCCESS"), (PANTS_FAILED_EXIT_CODE, "FAILURE")],
)
@freeze_time(datetime.datetime(2020, 1, 10, 12, 0, 1), as_kwarg="frozen_time")
def test_run_information(exit_code: ExitCode, expected: str, tmp_path: Path, **kwargs) -> None:
frozen_time = kwargs["frozen_time"]
buildroot = tmp_path.as_posix()
with environment_as(PANTS_BUILDROOT_OVERRIDE=buildroot):
spec = "test/example.py"
ob = create_options_bootstrapper(["list", spec])
run_tracker = RunTracker(ob.args, ob.bootstrap_options)
specs = [spec]
run_tracker.start(run_start_time=time.time(), specs=specs)
run_information = run_tracker.run_information()
assert run_information["buildroot"] == get_buildroot()
assert run_information["path"] == get_buildroot()
# freezegun doesn't seem to accurately mock the time zone,
# (i.e. the time zone used depends on that of the machine that
# executes the test), so we can only safely assert that the
# month and year appear in the human-readable string contained
# in the "datetime" key
assert "Jan" in run_information["datetime"]
assert "2020" in run_information["datetime"]
assert run_information["timestamp"] == 1578657601.0
assert run_information["user"] == getuser()
assert run_information["version"] == VERSION
assert re.match(f"pants.*{spec}", run_information["cmd_line"])
assert run_information["specs_from_command_line"] == [spec]
frozen_time.tick(delta=datetime.timedelta(seconds=1))
run_tracker.end_run(exit_code)
run_information_after_ended = run_tracker.run_information()
assert run_information_after_ended["outcome"] == expected
@freeze_time(datetime.datetime(2020, 1, 10, 12, 0, 1), as_kwarg="frozen_time")
def test_anonymous_telemetry(monkeypatch, tmp_path: Path, **kwargs) -> None:
frozen_time = kwargs["frozen_time"]
buildroot = tmp_path.as_posix()
with environment_as(PANTS_BUILDROOT_OVERRIDE=buildroot):
ob = create_options_bootstrapper([])
opts = ob.bootstrap_options
monkeypatch.setattr(opts, "_goals", ["test", "customgoal", "lint"])
run_tracker = RunTracker(ob.args, opts)
run_tracker.start(run_start_time=time.time(), specs=[])
frozen_time.tick(delta=datetime.timedelta(seconds=1))
run_tracker.end_run(PANTS_SUCCEEDED_EXIT_CODE)
repo_id = "A" * 36
telemetry = run_tracker.get_anonymous_telemetry_data(repo_id)
# Check that all keys have non-trivial values.
for key in (
"run_id",
"timestamp",
"duration",
"outcome",
"platform",
"python_implementation",
"python_version",
"pants_version",
"repo_id",
"machine_id",
"user_id",
"standard_goals",
"num_goals",
):
assert bool(telemetry.get(key))
# Verify a few easy-to-check values.
assert telemetry["timestamp"] == "1578657601.0"
assert telemetry["duration"] == "1.0"
assert telemetry["outcome"] == "SUCCESS"
assert telemetry["standard_goals"] == ["test", "lint"]
assert telemetry["num_goals"] == "3"
def test_anonymous_telemetry_with_no_repo_id(tmp_path: Path) -> None:
buildroot = tmp_path.as_posix()
with environment_as(PANTS_BUILDROOT_OVERRIDE=buildroot):
ob = create_options_bootstrapper([])
run_tracker = RunTracker(ob.args, ob.bootstrap_options)
run_tracker.start(run_start_time=time.time(), specs=[])
run_tracker.end_run(PANTS_SUCCEEDED_EXIT_CODE)
repo_id = ""
telemetry = run_tracker.get_anonymous_telemetry_data(repo_id)
# Check that these keys have non-trivial values.
for key in (
"run_id",
"timestamp",
"duration",
"outcome",
"platform",
"python_implementation",
"python_version",
"pants_version",
):
assert bool(telemetry.get(key))
for key in ("repo_id", "machine_id", "user_id"):
assert telemetry.get(key) == ""
|
from fastapi import APIRouter
from . import user
from . import todo_list
api = APIRouter(prefix='/api')
api.include_router(user.api)
api.include_router(todo_list.api)
|
#this program is used to do the sorting of values by merge sort
from util import utility
try:
lst = [int(x) for x in input("enter the number with space ").split()]
print(utility.merge_sort(lst)) # calls the method nad prints the output
except ValueError:
print("ENTER THE INT VALUES")
|
city0 = "Karachi"
city1 = "Lahore"
city2 = "Islamabad"
city3 = "Quetta"
city4 = "Peshawar"
print("Welcome to city " + city3)
cities = ["Karachi","Lahore","Islamabad","Quetta","Pehawar"]
print("Welcomw to city " + cities[4])
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#####################################################################################
# #
# run_iru_gyro_bias.py: update iru gyro bias database #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last updae: Mar 12, 2021 #
# #
#####################################################################################
import os
import sys
import re
import string
import math
import numpy
import time
import astropy.io.fits as pyfits
import Chandra.Time
import Ska.engarchive.fetch as fetch
import random
#
#--- reading directory list
#
path = '/data/mta/Script/IRU/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import functions
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- some data
#
bias_list = ['aogbias1', 'aogbias2', 'aogbias3']
#----------------------------------------------------------------------------------
#-- run_iru_gyro_bias: update iru gyro bias database --
#----------------------------------------------------------------------------------
def run_iru_gyro_bias(tstart, tstop):
"""
update iru gyro bias database
input: tstart --- starting time
tstop --- stopping time
output: <data_dir>/iru_gyro_bias_year<year>.fits
"""
if tstart == '':
[y_list, b_list, e_list] = find_start_and_stop_time()
for k in range(0, len(y_list)):
year = y_list[k]
tstart = b_list[k]
tstop = e_list[k]
append_new_data(year, tstart, tstop)
else:
date = Chandra.Time.DateTime(0.5*(tstart + tstop)).date
atemp = re.split(':', date)
year = int(atemp[0])
append_new_data(year, tstart, tstop)
#----------------------------------------------------------------------------------
#-- append_new_data: append the new data potion to the existing database --
#----------------------------------------------------------------------------------
def append_new_data(year, tstart, tstop):
"""
append the new data potion to the existing database
input: year --- this year
tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: <data_dir>/iru_gyro_bias_year<year>.fits
"""
#
#--- read the input data fits file
#
out = read_data(tstart, tstop)
#
#--- compute avg and std of roll, pitch, and yaw biases; default is one hour avg/std
#
results = compute_avg(out)
#
#--- convert the table into fits file
#
create_new_data_fits(results)
mfile = data_dir + 'iru_gyro_bias_year' + str(year) + '.fits'
#
#--- if the fits data file exists, append. otherwise, create it
#
if os.path.isfile(mfile):
append_fits_table(mfile, 'bias.fits', 'temp.fits')
cmd = 'mv temp.fits ' + mfile
os.system(cmd)
mcf.rm_file('bias.fits')
else:
cmd = 'mv bias.fits ' + mfile
os.system(cmd)
#----------------------------------------------------------------------------------
#-- find_start_and_stop_time: find starting and stopping time from the existing data
#----------------------------------------------------------------------------------
def find_start_and_stop_time():
"""
find starting and stopping time from the existing data
input: none but read from <data_dir>/iru_gyro_bias_year<year>.fits
output: a list of year
a list of starting time in seconds from 1998.1.1
a list of stopping time in seconds from 1998.1.1
Note, most of the time the lists contain only one value. However
in some occasion, the data span goes over the end/beginning of years
and get two entries.
"""
#
#--- check today's date
#
date = time.strftime("%Y:%j:00:00:00", time.gmtime())
atemp = re.split(':', date)
year = int(atemp[0])
#
#--- check the file exists
#
dfile = data_dir + 'iru_gyro_bias_year' + str(year) + '.fits'
if os.path.isfile(dfile):
fout = pyfits.open(dfile)
data = fout[1].data
dtime = data['time']
dlast = dtime[-1]
fout.close()
tend = Chandra.Time.DateTime(date).secs
return [[year], [dlast], [tend]]
#
#--- if not check the last year
#
else:
dfile2 = data_dir + 'iru_gyro_bias_year' + str(year-1) + '.fits'
if os.path.isfile(dfile2):
fout = pyfits.open(dfile2)
data = fout[1].data
dlast = data['time'][-1]
fout.close()
ybound = str(year) + ':001:00:00:00'
stime = Chandra.Time.DateTime(ybound).secs
etime = Chandra.Time.DateTime(date).secs
return [[year-1, year], [dlast, stime], [stime, etime]]
else:
#
#--- start from begining
#
return [[1999], [48902399.0], [63071999.0]]
#----------------------------------------------------------------------------------
#-- read_data: extract needed data from sot database --
#----------------------------------------------------------------------------------
def read_data(tstart, tstop):
"""
extract needed data from sot database
input: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: data --- a list of arrays of data
"""
save = []
for msid in bias_list:
out = fetch.MSID(msid, tstart, tstop)
val = out.vals
save.append(val)
data = [out.times] + save
return data
#----------------------------------------------------------------------------------
#-- compute_avg: compute avg and std of given data --
#----------------------------------------------------------------------------------
def compute_avg(data, span=3600.0):
"""
compute avg and std of given data
input: data --- a list of data arrays
span --- time span of which you want to compute avg/std; default: 3600
output: out --- a list of arrays of the data
"""
c_len = len(data)
#
#--- assume that the first array is time in seconds
#
dtime = data[0]
start = dtime[0]
tend = dtime[-1]
stop = start + span
aout = []
for k in range(0, c_len*2):
aout.append([])
#
#--- take avg and std of span interval of data
#
while start < tend:
index = (dtime >= start) & (dtime < stop)
m = 0
for k in range(0, c_len):
select = data[k][index]
avg = numpy.mean(select)
std = numpy.std(select)
aout[m].append(avg)
m += 1
aout[m].append(std)
m += 1
start = stop
stop = start + span
#
#--- convert the lists into arrays
#
out = []
for ent in aout:
out.append(numpy.array(ent))
return out
#----------------------------------------------------------------------------------
#-- create_new_data_fits: create a new fits file ---
#----------------------------------------------------------------------------------
def create_new_data_fits(data, out='bias.fits'):
"""
create a new fits file
input: data --- a list of arrays of data
[time (avg/std), roll (avg/std), pitch (avg/std), yaw (avg/std)
out --- output fits file name; defalut: bias.fits
output: out --- fits file
"""
#
#--- skip time 'std'
#
col1 = pyfits.Column(name='time', format='E', array=data[0])
col2 = pyfits.Column(name='roll_bias_avg', format='E', array=data[2])
col3 = pyfits.Column(name='roll_bias_std', format='E', array=data[3])
col4 = pyfits.Column(name='pitch_bias_avg', format='E', array=data[4])
col5 = pyfits.Column(name='pitch_bias_std', format='E', array=data[5])
col6 = pyfits.Column(name='yaw_bias_avg', format='E', array=data[6])
col7 = pyfits.Column(name='yaw_bias_std', format='E', array=data[7])
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7])
tbhdu = pyfits.BinTableHDU.from_columns(cols)
tbhdu.writeto(out)
#----------------------------------------------------------------------------------
#-- append_fits_table: Appending one table fits file to the another --
#----------------------------------------------------------------------------------
def append_fits_table(file1, file2, outname, extension = 1):
"""
Appending one table fits file to the another
the output table will inherit column attributes of the first fits table
Input: file1 --- fits table
file2 --- fits table (will be appended to file1)
outname --- the name of the new fits file
Output: a new fits file "outname"
"""
t1 = pyfits.open(file1)
t2 = pyfits.open(file2)
#
#-- find numbers of rows (two different ways as examples here)
#
nrow1 = t1[extension].data.shape[0]
nrow2 = t2[extension].header['naxis2']
#
#--- total numbers of rows to be created
#
nrows = nrow1 + nrow2
hdu = pyfits.BinTableHDU.from_columns(t1[extension].columns, nrows=nrows)
#
#--- append by the field names
#
for name in t1[extension].columns.names:
hdu.data.field(name)[nrow1:] = t2[extension].data.field(name)
#
#--- write new fits data file
#
hdu.writeto(outname)
t1.close()
t2.close()
#----------------------------------------------------------------------------------
if __name__ == "__main__":
#
#--- tstart an tstop are in seconds from 1998.1.1
#
if len(sys.argv) == 3:
tstart = float(sys.argv[1])
tstop = float(sys.argv[2])
else:
tstart = ''
tstop = ''
run_iru_gyro_bias(tstart, tstop)
# for year in range(1999, 2018):
# if year == 1999:
# date1 = "1999:244:00:00:00"
# else:
# date1 = str(year) + ':001:00:00:00'
#
# date2 = str(year+1) + ':001:00:00:00'
#
# tstart = Chandra.Time.DateTime(date1).secs
# tstop = Chandra.Time.DateTime(date2).secs
#
# print("Running: " + str(date1) + '<-->' + str(date2))
#
# run_iru_gyro_bias(tstart, tstop)
|
import numpy as np
import scipy.spatial.distance as dist
from permaviss.sample_point_clouds.examples import torus3D, take_sample
from permaviss.spectral_sequence.MV_spectral_seq import create_MV_ss
from permaviss.spectral_sequence.local_chains_class import local_chains
from permaviss.simplicial_complexes.vietoris_rips import vietoris_rips
from permaviss.simplicial_complexes.differentials import complex_differentials
from permaviss.persistence_algebra.PH_classic import persistent_homology
def test_zig_zags():
# creating and saving new point cloud ############
X = torus3D(1000, 1, 3)
point_cloud = take_sample(X, 300)
output_file = open("test/spectral_sequence/torus3D.txt", "w")
for row in point_cloud:
np.savetxt(output_file, row)
output_file.close()
# using old point cloud ###################
# saved_data = np.loadtxt("test/spectral_sequence/failing_1.txt")
# no_points = int(np.size(saved_data,0) / 3)
# point_cloud = np.reshape(saved_data, (no_points, 3))
max_r = 1
max_dim = 3
max_div = 2
overlap = max_r * 1.01
p = 3
# compute ordinary persistent homology
Dist = dist.squareform(dist.pdist(point_cloud))
C, R = vietoris_rips(Dist, max_r, max_dim)
Diff = complex_differentials(C, p)
PerHom, _, _ = persistent_homology(Diff, R, max_r, p)
###########################################################################
# compute spectral sequence
MV_ss = create_MV_ss(point_cloud, max_r, max_dim, max_div, overlap, p)
###########################################################################
# check that first page representatives are cycles
for n_deg in range(MV_ss.no_columns):
if n_deg == 0:
no_covers = MV_ss.nerve[0]
else:
no_covers = len(MV_ss.nerve[n_deg])
for nerve_spx_index in range(no_covers):
for mdeg, hom in enumerate(
MV_ss.Hom[0][n_deg][nerve_spx_index][1:]):
if hom.dim > 0:
trivial_image = np.matmul(
MV_ss.zero_diff[n_deg][nerve_spx_index][mdeg+1],
hom.coordinates)
if np.any(trivial_image % p):
raise(RuntimeError)
# end if
# end if
# end for
# end for
# end for
# TEST commutativity of zig-zags
########################################################################
for start_n_dim in range(1, MV_ss.no_columns):
for start_deg in range(MV_ss.no_rows):
Sn_dim, Sdeg = start_n_dim, start_deg
for k, chains in enumerate(
MV_ss.Hom_reps[MV_ss.no_pages - 1][start_n_dim][
start_deg][:-1]):
# calculate Cech differential of chains in (Sn_dim, Sdeg)
diff_im = MV_ss.cech_diff(Sn_dim - 1, Sdeg, chains)
# calculate vertical differential of (Sn_dim - 1, Sdeg + 1)
Sn_dim, Sdeg = Sn_dim - 1, Sdeg + 1
next_chains = MV_ss.Hom_reps[MV_ss.no_pages - 1][
start_n_dim][start_deg][k + 1]
vert_im = local_chains(len(next_chains.ref))
for nerve_spx_index, ref in enumerate(next_chains.ref):
if len(ref) > 0:
# compute vertical image of next_chains locally
vert_im.add_entry(nerve_spx_index, ref, np.matmul(
MV_ss.zero_diff[Sn_dim][nerve_spx_index][Sdeg],
next_chains.coord[nerve_spx_index].T).T % p)
# end if
# end for
# check that d_v(next_chains) + d_cech(chains) = 0
zero_chains = vert_im + diff_im
for coord in zero_chains.coord:
if len(coord) > 0:
if np.any(coord % p):
raise(ValueError)
# end for
# end for
# end for
if __name__ == "__main__":
test_zig_zags()
|
#!/usr/bin/env python
"""
"""
import sys
from collections import defaultdict
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
gff_in_fn = sys.argv[1]
with open(gff_in_fn) as fh:
counter = 1
trans_in_dict = {}
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
sl = line.split()
# gff data structure
seqname, source, feature, start, end, score, strand, frame, transcript_id
seqname = 1
source = 2
feature = 3
gstart = 4
end = 5
gscore = 6
strand = 7
frame = 8
group = 9
line: while>(<>) {
@Fld = split(' ', $_, -1)
if ourcource) {
prin$USAGE
$error = 1
last line
}
$Fld[$sld[$source_] eq $source) { #???
$INDEX0 = $Fld[$seqna $ . $Fld[$group]
# put ds in tthe core correct position --sortert position
for ($j = 1 ($j = 1
$j <= $ncds{$INDEX0} && $Fld[$start] gt $cds{$INDEX0, $j, 1} $j++) { #???
}
for ($k = $ncds{$INDEX0} $k >= $j $k--) { #???
$INDE{$INDEX0, $k + {$INDEX0, $k, 1}
, $k, 1}
$cds{$INDEX0, $k + 1, 2} = $cds{$INDEX0,
}
$cds
$cds{$INDEX0, $j, 1} = $Fld[$start]
$cds{$INDEX0, $j, 2} = $Fld[$end]
$ncds{$INDEX0}++
$strand_{$INDEX0} = $Fld[$strand]
$seqname_{$Fld[$group]} = $Fld[$seqname]
}
}
if ($rror) {
exit
}
foreach $g (keys %ncds) {
@gx = spit($, $g, -1)
printf '%s %s', $gx[(1)-1],, $strand_{$g}
($i = 1 $i <= $ncds{$g} $i++) {
printf intf ' %d %d', $cds{$g, $i, 1}, $cds{$g, $i, 2}
}
printf "\n"
}
|
shopping_list={
"warzywniak":["marchew", "ogórek", "sałata"],
"zoologiczny":["jedzenie dla kota", 'jedzenie dla psa'],
"piekarnia":["chleb", "ciasto"],
"mięsny":["szynka", "kurczak"],
"komputerowy":['pendrive']
}
product_list=[]
for store in shopping_list:
products=str(shopping_list[store])
print(f"Idę do {store.upper()} i kupuję tam {products.upper()}")
product_list+=shopping_list[store]
print(f"W sumię kupiłem {len(product_list)} produktów.")
|
# -*- coding:utf-8 -*-
from django import forms
from .models import UserProfile
DEPT_CHOICES = (
('HWL', '核物理研究室'),
('WSW', '物理生物学研究室'),
('SKX', '水科学研究室'),
('JSQ', '加速器物理与射频技术部'),
('FEL', '自由电子激光技术部'),
('SLK', '束流测量与控制技术部'),
('JXG', '机械工程技术部'),
('DYJ', '电源技术部'),
('GYS', '公用设施技术部'),
('JAJ', '上海光源建安技术部'),
('SMK', '生命科学研究部'),
('WLH', '物理与环境科学研究部'),
('CLN', '材料与能源科学研究部'),
('XJC', '先进成像与工业应用研究部'),
('SXJ', '束线机械工程技术部'),
('SXG', '束线光学工程技术部'),
('YYJ', '应用加速器研究室'),
('FYD', '反应堆物理部'),
('RYJ', '熔盐机械工程技术部'),
('DXT', '反应堆系统工程技术部'),
('RYH', '熔盐化学与工程技术部'),
('DCL', '堆材料与工程技术一部'),
('FSH', '放射化学与工程技术部'),
('HAQ', '核安全与工程技术部'),
('DYG', '堆材料与工程技术二部'),
('HNJ', '核能建安工程技术部'),
('BGS', '办公室'),
('KYC', '科研管理处'),
('DKX', '大科学装置管理部'),
('HNG', '核能管理部'),
('RJC', '人事教育处'),
('CWC', '财务处'),
('TJC', '科研条件处'),
('KFC', '科技开发处'),
('YJS', '研究生部'),
('ZGC', '综合管理处'),
('JAB', '技术安全技术部'),
('XXZ', '信息中心'),
('ZCG', '经营性资产管理中心'),
('HQF', '后勤服务中心'),
('HXH', '核学会'),
('RHT', '日环投资公司'),
('SLG', '世龙科技公司'),
('FZZ', '辐照中心'),
)
class UserProfileForm(forms.ModelForm):
last_name = forms.CharField(max_length=30, label="姓")
first_name = forms.CharField(max_length=30, label="名")
department = forms.CharField(max_length=3, label="所属部门", widget=forms.Select(choices=DEPT_CHOICES))
cellphone = forms.CharField(max_length=15, label="手机号码")
class Meta:
model = UserProfile
fields = ('last_name', 'first_name', 'cellphone', 'department')
|
#========================================
# author: Changlong.Zang
# mail: zclongpop123@163.com
# time: Tue Sep 19 14:40:48 2017
#========================================
import pymel.core as pm
import maya.OpenMaya as OpenMaya
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def get_dag_tree(root):
'''
'''
root_pml_node = pm.PyNode(root)
iterator = OpenMaya.MItDag()
iterator.reset(root_pml_node.__apiobject__())
while not iterator.isDone():
yield iterator.item()
iterator.next()
|
# -*- coding: utf-8 -*-
"""
**Introducción a Redes Complejas en Biología de Sistemas**
Trabajo Computacional 2 - Estructura a Gran Escala
Entrega 14/05
Grupo: Camila Sanz, Matías Zanini.
"""
################################################################################
# PAQUETES
################################################################################
import numpy as np #matemática, simil maltab
import networkx as nx
import matplotlib.pyplot as plt
import random
import math
import plfit
from collections import Counter
from scipy.optimize import curve_fit
# Evitar acumulación de mensajes de warning en el display
import warnings
warnings.filterwarnings("ignore")
#%%
################################################################################
# PUNTO 1
################################################################################
'''
Inciso a)
'''
path = 'D:/Redes 2020/TC02/TC02_data/' #Colocar la ruta donde están guardados los archivos
path2='D:/Redes 2020/TC02/graficos/'
filename=['as-22july06_edgelist','netscience_edgelist','power_enlaces','yeast_Y2H']
grafos=[]
for file in filename:
with open(path+file+'.txt') as f:#abrimos el archivo
data=f.readlines()
for i in np.arange(len(data)):#transformamos en una lista de tuples
if file==filename[1]:
data[i]=data[i].strip().split(';')#el segundo archivo tiene separación con ";"
else:
data[i]=data[i].strip().split()#el resto tiene separación con espacio
data[i]=tuple(data[i][0:2])#ignoramos los pesos
G=nx.Graph()#inicializamos el grafo
G.add_edges_from(data)#agregamos los nodos y enlaces
grafos.append(G)#Guardamos los 4 grafos en una lista
#Distribución de grado P(k) (hacemos un histograma y un scatter plot)
for i in np.arange(len(grafos)):
#histograma:
grados=grafos[i].degree
x_degree=[j for k,j in grados]
x_log=np.log10(x_degree)
logbins = np.logspace(np.min(x_log),np.max(x_log),13)
plt.figure()
hist,bins,bars=plt.hist(x_degree,bins=logbins,density=True,facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Histograma de grado '+filename[i])
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.savefig(path2+'hist_'+filename[i]+'.png')
plt.show()
plt.close()
#scatter plot
N=grafos[i].number_of_nodes()
count=Counter(x_degree)
count_orden=sorted(count.items())
k=[]
p_k=[]
for n,m in count_orden:
k.append(n)
p_k.append(m/N)
plt.figure()
plt.scatter(k,p_k)
plt.xscale('log')
plt.yscale('log')
plt.ylim((np.min(p_k)-10**-(abs(int(math.log10(np.min(p_k))))+1),np.max(p_k)+1))
plt.title('Distribución de probabilidad '+filename[i])
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.savefig(path2+'scatter_'+filename[i]+'.png')
plt.show()
plt.close()
#%%
'''
Inciso b)
Cualitativamente, se puede ver que la distribución de grados de la red de Internet (as-22july06) y
de la red de proteínas (yeast_Y2H) son las que mejor se ajustan a una power law.
Para verificar estas observaciones, podemos hacer los ajustes correspondientes (inciso d)
'''
'''
Inciso c)
Todas las redes exiben efectos de borde, por un lado, ninguna red presenta nodos con k=0. Por otro lado,
si cualitativamente trazamos una recta lineal en los histogramas partiendo del mínimo grado en donde
podemos comenzar a trazar la recta estimamos que deberíamos ver hubs de grado superior en la red.
Cualitativamente, la que parece indicar un efecto de borde mayor es la red de Internet, seguida por la
de proteínas.
Verificamos esta última estimación en el final del script donde comparamos el K máximo que presenta cada
red con el K máximo estimado para redes que siguen una power law.
'''
#%%
'''
Inciso d)
'''
#Guardamos kminimo y gamma de cada red para ver cuantitavente las estimaciones del ejercicio c).
kminimo=[]
gammas=[]
for i in np.arange(len(grafos)):
grados=grafos[i].degree
x_degree=[j for k,j in grados]
fit=plfit.plfit(x_degree)
plt.figure()
fit.plotpdf()
plt.title('Ajuste '+filename[i])
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.savefig(path2+filename[i]+'_ajuste.png')
plt.show()
plt.close()
xmin = fit._xmin
alpha = fit._alpha
print('Red '+filename[i]+': K_min = '+str(xmin)+'; Gamma = '+str(alpha))
gammas.append(alpha)
kminimo.append(xmin)
#%%
'''
Inciso e)
Las redes que siguen una power law son invariantes de escala, es decir, para todas las escalas observamos
el mismo comportamiento, esto también implica que no existe una escala característica en el sistema.
Por lo tanto, carece de sentido calcular el grado medio de los nodos de la red ya que las fluctuaciones
de esta variable son de órden de magnitud similar o mayor a la variable misma.
Para verlo podemos hacer:
'''
for i in [0,3]:#tomamos las 2 redes que siguen una power law
print('Red '+filename[i])
aux=[m for n,m in grafos[i].degree() if m>=kminimo[i]]
promK=np.mean(aux)
promK2=np.mean(np.array(aux)**2)
sigma=(promK2-promK**2)**(1/2)
print('Sigma_k: '+str(sigma))
print('k = '+str(promK)+'+-'+str(sigma))
#%%
#Efectos de bordes en el grado máximo de la red:
for i in np.arange(len(grafos)):
print('Red '+filename[i])
aux=[m for n,m in grafos[i].degree()]
print('K máximo real: '+str(np.max(aux)))
estimado=kminimo[i]*(grafos[i].number_of_nodes()**(1/(gammas[i]-1)))
print('K máximo estimado: '+str(estimado))
#%%
################################################################################
# PUNTO 2
################################################################################
#Definimos una función útil para utilizar durante el punto:
def moneda_cargada(p):
'''
Devuelve un booleano que indica True si
'''
cara = random.random()
if cara <= p:
return True
else:
return False
#%%
'''
Inciso a)
'''
# Armamos la red random cuyos nodos se conectan con probabilidad constante p:
def red_erdos(p, n):
nodos = np.arange(1, n+1, 1)
red_er = nx.Graph() # Inicializamos el grafo
red_er.add_nodes_from(nodos)
enlaces_list = []
for fila in nodos:
# Nos movemos por arriba de la diagonal de la matriz de adyacencia para no contar 2 veces los pares de nodos:
for columna in range(int(fila)+1, len(nodos)+1):
if moneda_cargada(p):
# Añadimos a la lista de enlaces la tupla si sale favorable la tirada con probabilidad p:
enlaces_list.append( (fila, columna) )
red_er.add_edges_from(enlaces_list)
return red_er
p = 0.2 # Probabilidad con la que se conectan dos nodos.
n = 1e4 # Cantidad de Nodos en la red
red_er = red_erdos(p, n)
# i.
m = red_er.number_of_edges()
m_teo = p*n*(n-1)/2 # Valor que se espera para la cantidad de enlaces en una red del tipo E-R
print('La cantidad m de enlaces difiere del valor esperado en un', np.around(abs(m - m_teo)/m_teo * 100, 4), ' %')
'''
La relación m = p*n*(n-1)/2 es esperada ya que la probabilidad p, para un número grande de nodos, indica la fracción de
nodos que están enlazados respecto del total de pares posibles. Por su parte, el total de pares posibles corresponde al
combinatorio (n 2) = n*(n-1)/2, el cual indica la cantidad de grupos de dos nodos que se puede formar en un total de n
nodos.
'''
# ii.
'''
Dado que el grado medio <k> de la red es el promedio de los grados de todos los nodos, se obtiene de forma inmediata la
relación <k> = 1/n * sum(k_i) = 2*m/n. El factor 2 indica que al sumar todos los grados, inevitablemente contamos 2 veces
cada enlace (ya que la red es no dirigida).
'''
k_med = 2*m / n # Grado medio de la red
k_med_teo = p * (n-1) # Valor esperado para el grado medio en una red del tipo E-R
print('El grado medio <k> difiere del valor esperado en un', np.around(abs(k_med - k_med_teo)/k_med_teo * 100, 4), '%')
'''
Vemos que esta relación se cumple como consecuencia inmediata de la utilizada en i.
Si la reemplazamos en el cálculo de <k> nos queda 2* [p*n*(n-1)/2] / n, con lo cual, luego de simplificar, obtenemos la
relación <k> = p * (n-1)
'''
#%%
red_er_lib = nx.erdos_renyi_graph(int(n), p) # Creamos una red E-R utilizando la librería networkx
# i.
m = red_er_lib.number_of_edges()
m_teo = p*n*(n-1)/2 # Valor que se espera para la cantidad de enlaces en una red del tipo E-R
print('La cantidad m de enlaces difiere del valor esperado en un', np.around(abs(m - m_teo)/m_teo * 100, 4), ' %')
k_med = 2*m / n # Grado medio de la red
k_med_teo = p * (n-1) # Valor esperado para el grado medio en una red del tipo E-R
print('El grado medio <k> difiere del valor esperado en un', np.around(abs(k_med - k_med_teo)/k_med_teo * 100, 4), '%')
'''
Vemos que, al igual que con nuestro código, la red E-R generada mediante la librería de networkx cumple las mismas
para m y <k>.
'''
#%%
'''
Inciso b)
'''
def red_random(k0, n):
# Creamos una nueva red del tipo cliqué de grado k0, es decir, hay k0 nodos todos enlazados entre sí:
red_rand = nx.complete_graph(k0)
for ki in range(k0, n):
# Ahora, creamos una lista de nodos al azar (no repetidos) de la red con la que se conectará el nuevo nodo:
enlaces = random.sample(list(red_rand.nodes()), k = k0)
for i in enlaces:
red_rand.add_edge(i, ki) # Agregamos cada enlace para el nuevo nodo.
return red_rand
'''
Con esto hemos creado un algoritmo iterativo para generar una red, cuyos nodos iniciales tienen grado k0, agregando un nuevo
nodo de grado k0 uniéndose a los demás de forma aleatoria en cada paso.
'''
k0 = 5 # Establecer el grado inicial de la red
n = int(1e4) # Establecer el número de nodos que se desea que tenga la red.
red_rand = red_random(k0, n)
#%%
'''
Inciso c)
'''
'''
La red tipo Barabasi es similar a la generada en el item b). Sin embargo, aquí cada nodo agregado se enlazará a los demás
nodos de la red con una probabilidad que depende del grado de los mismos. Cuando mayor sea el grado, mayor la probabilidad
de que el nuevo nodo se conecte a él. Según Barabasi: p(k_i) = k_i / sum(k_i).
'''
def red_barabasi(k0, k, n):
if k>k0:
raise ValueError('No se puede añadir un nodo con un grado k mayor que el inicial k0 sin repetir enlaces.')
red_barab = nx.complete_graph(k0)
for ki in range(k0, n):
grado_arr = np.asarray(red_barab.degree())[:,1] # Genera un array con los grados de la red
probs = grado_arr / sum(grado_arr) # Array con las probabilidades de cada grado segun Barabasi: p(k_i) = ki / sum(k_i)
'''
Ahora, creamos una lista de nodos elegidos con la probabilidad dada por probs (no repetidos) de la red
con la que se conectará el nuevo nodo:
'''
enlaces = np.random.choice(np.asarray(red_barab.nodes()), size = k, p = probs, replace = False)
for i in enlaces:
red_barab.add_edge(i, ki) # Agregamos cada enlace para el nuevo nodo.
return red_barab
# Creamos una nueva red del tipo cliqué de grado k0, es decir, hay k0 nodos todos enlazados entre sí:
k0 = 5 # Establecer el grado inicial de la red
n = int(1e4) # Establecer el número de nodos que se desea que tenga la red.
k = 5 # Establecer el grado de los nodos que se agregarán en cada paso. IMPORTANTE: k <= k0
red_barab = red_barabasi(k0, k, n)
# i.
'''
Ahora tenemos que comparar la cantidad m de enlaces en la red con n*k, en particular podríamos usar k=k0:
'''
m = red_barab.number_of_edges() # Cantidad de enlaces
m_teo_b = n*k # Valor esperado para el número de enlaces en una red del tipo Barabasi
print('El número de enlaces <k> difiere del valor esperado en un', np.around(abs(m-m_teo_b)/m_teo_b * 100, 4), '%')
'''
Como vemos, ambos valores son comprables. Esto se debe a la aparición de hubs, tal como se esperaba. Los nodos con gran
cantidad de enlaces (grado alto), tienden a captar los nuevos nodos agregados a la red. Esto implica que para un número
grande de nodos, la cantidad de enlaces se encuentre dominada por estos hubs.
Esto quiere decir que, como en cada paso se agregó un
nodo de grado k, y que la mayoría de los enlaces fueron a parar a dichos hubs, el tamaño de los enlaces totales en la red
resulta similar a multiplicar k por la cantidad de pasos. Además, como n>>k0>k, se tiene que, si el número de pasos es
n-k0 ---> el número de pasos será similar a n, con lo cual m será similar a k*n.
'''
#%%
'''
Inciso d)
'''
'''
Caso 1. Comparando con "as-22july06_edgelist" :
'''
# Ponemos los valores de n y m que obtuvimos para el ejercicio 1:
n1 = 22941
m1 = 48372
p = m1 / (n1*(n1-1)/2) # Calculamos el p que tendría asociado una red erdos-renyi de n nodos y m enlaces.
red_er_1 = red_erdos(p, n1) #Red erdos-renyi para estos valores de n y m
red_rand_1 = red_random(5, n1) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
k1 = int(m1/n1) # Calculamos el grado k de cada nodo agregado a la red tipo barabasi.
red_barab_1 = red_barabasi(5, k1, n1) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
#%%
# Graficamos:
grados_er = np.asarray(red_er_1.degree())[:,1]
#grados_er_log = np.log10(grados_er)
#bins_er = np.logspace(np.min(grados_er), np.max(grados_er), 13)
bins_er = np.arange(np.min(grados_er), np.max(grados_er)+1, 1)
plt.figure()
hist, bins, bars = plt.hist(grados_er, bins = bins_er, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
#plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Erdos-Renyi simulando la red "as-22july06_edgelist" ')
plt.xlabel('k (linear scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Observamos una distribución del tipo Poisson cuyo máximo se alcanza en el grado k=4. Esto es esperable, ya que el grado
medio de la red es 2*m1/n1 el cual es aproximadamente 4.23.
Debido a que no aparecieron Hubs, no fue necesario emplear la escala logaritmica en el eje x (bins), ya que los grados de
cada nodo son bajos (el máximo es 15).
'''
#%%
grados_rand = np.asarray(red_rand_1.degree())[:,1]
grados_rand_log = np.log10(grados_rand)
bins_rand = np.logspace(np.min(grados_rand_log), np.max(grados_rand_log), 20)
plt.figure()
hist, bins, bars = plt.hist(grados_rand, bins = bins_rand, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Random simulando la red "as-22july06_edgelist" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
#%%
grados_barab = np.asarray(red_barab_1.degree())[:,1]
grados_barab_log = np.log10(grados_barab)
bins_barab = np.logspace(np.min(grados_barab_log), np.max(grados_barab_log), 20)
plt.figure()
hist, bins, bars = plt.hist(grados_barab, bins = bins_barab, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Barabasi simulando la red "as-22july06_edgelist" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
En este histograma, se puede observar claramente un decaimiento del tipo libre de escala. Esto era lo esperado ya que
el modelo de Barabasi propone una mejor aproximación a las redes reales que los modelos random.
'''
#######################################################################################################################
#%%
'''
Caso 2. Comparando con "netscience_edgelist" :
'''
n2 = 1450
m2 = 2727
p = m2 / (n2 *(n2 - 1)/2) # Calculamos el p que tendría asociado una red erdos-renyi de n nodos y m enlaces.
red_er_2 = red_erdos(p, n2) #Red erdos-renyi para estos valores de n y m
red_rand_2 = red_random(5, n2) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
k2 = int(m2/n2) # Calculamos el grado k de cada nodo agregado a la red tipo barabasi.
red_barab_2 = red_barabasi(5, k2, n2) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
#%%
# Graficamos:
grados_er = np.asarray(red_er_2.degree())[:,1]
#grados_er_log = np.log10(grados_er)
#bins_er = np.logspace(np.min(grados_er), np.max(grados_er), 13)
bins_er = np.arange(np.min(grados_er), np.max(grados_er)+1, 1)
plt.figure()
hist, bins, bars = plt.hist(grados_er, bins = bins_er, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
#plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Erdos-Renyi simulando la red "netscience_edgelist" ')
plt.xlabel('k (linear scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Análogo al caso anterior. Se observa una curva tipo Poisson.
'''
#%%
grados_rand = np.asarray(red_rand_2.degree())[:,1]
grados_rand_log = np.log10(grados_rand)
bins_rand = np.logspace(np.min(grados_rand_log), np.max(grados_rand_log), 20)
plt.figure()
hist, bins, bars = plt.hist(grados_rand, bins = bins_rand, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Random simulando la red "netscience_edgelist" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
#%%
grados_barab = np.asarray(red_barab_2.degree())[:,1]
grados_barab_log = np.log10(grados_barab)
bins_barab = np.logspace(np.min(grados_barab_log), np.max(grados_barab_log), 12)
plt.figure()
hist, bins, bars = plt.hist(grados_barab, bins = bins_barab, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Barabasi simulando la red "netscience_edgelist" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Si bien en este histograma se encuentra, nuevamente como era esperado, un decaimiento libre de escala, se ve que los
últimos puntos se alejan de este comportamiento. Es posible que se deba a que esta red posee muchos menos nodos que la
anterior, aumentando el error estadístico.
'''
#######################################################################################################################
#%%
'''
Caso 3. Comparando con "power_enlaces" :
'''
# Ponemos los valores de n y m que obtuvimos para el ejercicio 1:
n3 = 4941
m3 = 6594
p = m3 / (n3*(n3-1)/2) # Calculamos el p que tendría asociado una red erdos-renyi de n nodos y m enlaces.
red_er_3 = red_erdos(p, n3) #Red erdos-renyi para estos valores de n y m
red_rand_3 = red_random(5, n3) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
k3 = int(m3/n3) # Calculamos el grado k de cada nodo agregado a la red tipo barabasi.
red_barab_3 = red_barabasi(5, k3, n3) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
#%%
# Graficamos:
grados_er = np.asarray(red_er_3.degree())[:,1]
#grados_er_log = np.log10(grados_er)
#bins_er = np.logspace(np.min(grados_er), np.max(grados_er), 13)
bins_er = np.arange(np.min(grados_er), np.max(grados_er)+1, 1)
plt.figure()
hist, bins, bars = plt.hist(grados_er, bins = bins_er, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
#plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Erdos-Renyi simulando la red "power_enlaces" ')
plt.xlabel('k (linear scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Análogo a los casos anteriores. Se observa una curva tipo Poisson.
'''
#%%
grados_rand = np.asarray(red_rand_3.degree())[:,1]
grados_rand_log = np.log10(grados_rand)
bins_rand = np.logspace(np.min(grados_rand_log), np.max(grados_rand_log), 20)
plt.figure()
hist, bins, bars = plt.hist(grados_rand, bins = bins_rand, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Random simulando la red "power_enlaces" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
#%%
grados_barab = np.asarray(red_barab_3.degree())[:,1]
grados_barab_log = np.log10(grados_barab)
bins_barab = np.logspace(np.min(grados_barab_log), np.max(grados_barab_log), 13)
plt.figure()
hist, bins, bars = plt.hist(grados_barab, bins = bins_barab, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Barabasi simulando la red "power_enlaces" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Nuevamente, para el caso de la red tipo Barabasi, se encuentra una distribución libre de escala, tal como se esperaba.
'''
#######################################################################################################################
#%%
'''
Caso 4. Comparando con "yeast_Y2H" :
'''
# Ponemos los valores de n y m que obtuvimos para el ejercicio 1:
n4 = 2018
m4 = 2930
p = m4 / (n4*(n4-1)/2) # Calculamos el p que tendría asociado una red erdos-renyi de n nodos y m enlaces.
red_er_4 = red_erdos(p, n4) #Red erdos-renyi para estos valores de n y m
red_rand_4 = red_random(5, n4) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
k4 = int(m4/n4) # Calculamos el grado k de cada nodo agregado a la red tipo barabasi.
red_barab_4 = red_barabasi(5, k4, n4) # Elegimos k0 = 5 de forma arbitraria para inicializar la red random.
#%%
# Graficamos:
grados_er = np.asarray(red_er_4.degree())[:,1]
#grados_er_log = np.log10(grados_er)
#bins_er = np.logspace(np.min(grados_er), np.max(grados_er), 13)
bins_er = np.arange(np.min(grados_er), np.max(grados_er)+1, 1)
plt.figure()
hist, bins, bars = plt.hist(grados_er, bins = bins_er, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
#plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Erdos-Renyi simulando la red "yeast_Y2H" ')
plt.xlabel('k (linear scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Análogo a los casos anteriores. Se observa una curva tipo Poisson.
'''
#%%
grados_rand = np.asarray(red_rand_4.degree())[:,1]
grados_rand_log = np.log10(grados_rand)
bins_rand = np.logspace(np.min(grados_rand_log), np.max(grados_rand_log), 18)
plt.figure()
hist, bins, bars = plt.hist(grados_rand, bins = bins_rand, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Random simulando la red "yeast_Y2H" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
#%%
grados_barab = np.asarray(red_barab_4.degree())[:,1]
grados_barab_log = np.log10(grados_barab)
bins_barab = np.logspace(np.min(grados_barab_log), np.max(grados_barab_log), 13)
plt.figure()
hist, bins, bars = plt.hist(grados_barab, bins = bins_barab, align = 'mid', density = True, facecolor='blue', alpha=0.5, ec='black')
plt.xscale('log')
plt.yscale('log')
plt.title('Distribución de grado para red tipo Barabasi simulando la red "yeast_Y2H" ')
plt.xlabel('k (log scale)')
plt.ylabel('P(k) (log scale)')
plt.grid()
plt.show()
'''
Si bien no sigue la tendencia libre de escala tan bien como en el caso anterior, se observa que se la red tipo Barabasi
muestra este comportamiento de manera satisfactoria.
'''
'''
Como conclusión final, se encuentra que las redes del tipo Random y Erdos-Renyi no son satisfactorias a la hora de predecir
el comportamiento de las redes reales. En ninguna de las redes estudiadas de este tipo se generaron Hubs. Sin embargo,
en las redes reales estos Hubs, o nodos de alto grado, si que aparecen.
Por su parte, las redes de tipo Barabasi, por construcción, establecen una mayor prioridad a la conexión de nuevos nodos
con los demás de la red cuyos grados sean altos. Esto propicia no solo la aparición de Hubs, sino también un comportamiento
libre de escala, el cual se observa en las redes reales.
'''
#%%
################################################################################
# PUNTO 3
################################################################################
#Usamos el mismo código que en el Punto 2, inciso c)
# Creamos una nueva red del tipo cliqué de grado k0, es decir, hay k0 nodos todos enlazados entre sí:
k0 = 7 # Establecer el grado inicial de la red
n = int(10000) # Establecer el número de nodos que se desea que tenga la red.
red_barab = nx.complete_graph(k0)
k = 7 # Establecer el grado de los nodos que se agregarán en cada paso. IMPORTANTE: k <= k0
t=0#paso
#Guardamos en listas los valores de los pasos y los grados para t=5 y t=95
step_5=[]
grado_5=[]
step_95=[]
grado_95=[]
if k>k0:
raise ValueError('No se puede añadir un nodo con un grado k mayor que el inicial k0 sin repetir enlaces.')
for ki in range(k0, n):
t=t+1
grado_arr = np.asarray(red_barab.degree())[:,1] # Genera un array con los grados de la red
probs = grado_arr / sum(grado_arr) # Array con las probabilidades de cada grado segun Barabasi: p(k_i) = ki / sum(k_i)
'''
Ahora, creamos una lista de nodos elegidos con la probabilidad dada por probs (no repetidos) de la red
con la que se conectará el nuevo nodo:
'''
enlaces = np.random.choice(np.asarray(red_barab.nodes()), size = k, p = probs, replace = False)
for i in enlaces:
red_barab.add_edge(i, ki) # Agregamos cada enlace para el nuevo nodo.
if t>=5:
grado_5.append(red_barab.degree[k0+5-1])#restamos 1 porque comenzamos a contar del nodo 0
step_5.append(t)
if t>=95:
grado_95.append(red_barab.degree[k0+95-1])#restamos 1 porque comenzamos a contar del nodo 0
step_95.append(t)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(step_5, grado_5, s=10, c='C0', label='t=5')
ax1.scatter(step_95,grado_95, s=10, c='C2', label='t=95')
ax1.set_xscale('log')
ax1.set_yscale('log')
plt.legend(loc='upper left')
plt.ylabel('Grado (k)')
plt.xlabel('t (paso)')
plt.savefig(path2+'kvst_5_95.png')
plt.show()
plt.close()
'''
Podemos observar que, en un principio, las curvas de la evolución temporal del grado para los nodos
seleccionados son inestables y a medida que incrementamos la cantidad de nodos en la red, adoptan un comportamiento
aproximadamente lineal (en escala logarítmica) con una pendiente similar. Esto puede observarse con
más claridad en la curva de t=5, como este nodo es agregado en pasos posteriores, se puede
observar la estabilidad de la curva para t>100.
Podemos entonces estimar el exponente (pendiente en la escala adoptada) de estas curvas.
'''
#fiteamos
#dividimos por el t0 de cada una para que las curvas comiencen del mismo x0-y0
x5=np.divide(step_5,step_5[0])
x95=np.divide(step_95,step_95[0])
def exp_func(x, a):
return k * (x**a)
popt5, pcov5 = curve_fit(exp_func, x5,grado_5)
popt95, pcov95 = curve_fit(exp_func, x95, grado_95)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(x5, grado_5, s=10,c='C0', label='t=5')
ax1.scatter(x95,grado_95, s=10,c='C2', label='t=95')
ax1.plot(x5, exp_func(x5, *popt5), 'r-',label='Fit: a=%5.3f (t=5)' % tuple(popt5))
ax1.plot(x95, exp_func(x95, *popt95), 'r--',label='Fit: a=%5.3f (t=95)' % tuple(popt95))
ax1.set_xscale('log')
ax1.set_yscale('log')
plt.legend(loc='upper left')
plt.ylabel('Grado (k)')
plt.xlabel('t/t0 (paso)')
plt.savefig(path2+'kvst_5_95_fit.png')
plt.show()
plt.close()
'''
Como podemos ver, la pendiente de ambas curvas es similar a 0.48+-0.03 (este valor puede estar ligeramente
modificado por la forma aleatoria en que se genera la red, recomendamos incrementar la cantidad de
de pasos para observar comportamientos más estables de las curvas, en particular de la curva
para t=95). Podemos estimar que este comportamiento, para t>t0+100 resulta independiente del nodo que tomemos.
Los hubs de la red corresponden, en general, a los nodos más viejos (aquellos agregados en los primeros
pasos, es decir, con menor t0), incrementan su conectividad en una proporción mayor que los nuevos.
Debido a que la probabilidad de establecer un enlace entre un vértice nuevo y viejo es proporcional al grado de
este último, se genera el efecto de "rich-get-richer", en donde aquellos nodos con mayor cantidad de conexiones
tendrán probabilidad más alta de establecer un nuevo enlace.
De esta forma los hubs incrementan su grado y por ende su probabilidad de conexión en cada iteración.
'''
#%%
|
import urllib
def read_text():
quotes = open(src, "r")
contents_file = quotes.read()
print contents_file
quotes.close()
profanity_check(contents_file)
def profanity_check(text_check)
connection = urllib.urlopen("http://www.wdyl.com/profanity?q=" + text_check)
output = connection.read()
print output
connection.close()
read_text()
|
from django.contrib import admin
from .models import Checklistdocument
# Register your models here.
admin.site.register(Checklistdocument)
|
class Print_options:
def display_float_question(self, question):
while True:
try:
awnser = float(input(question))
except ValueError:
print("Sorry, dat begreep ik niet, probeer het opnieuw")
continue
else:
break
return awnser
def print_result(self, party):
if party.cake_count > 0:
print(f"De taart die u kunt nemen is: {party.pie.name}")
print(f"U heeft hiervoor {party.cake_count} cakes voor nodig")
print(f"Dit kost u: {party.cost}")
print(f"Uw budget was: {party.budget}")
print(f"U houd {party.left_over_pieces} stukken over en {party.left_over_money} aan geld")
else:
print("er is geen taart voor u beschikbaar")
|
__author__ = 'aoboturov'
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from competition_tutorial import prepare_data_features, FEATURES
RANDOM_STATE = 42
train, target_with_features = prepare_data_features(lambda df: train_test_split(df, test_size=.2, random_state=RANDOM_STATE),
[1, 0])
X = train[FEATURES]
y = train['label']
TEST_SIZE = 0.2
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
print(X.shape, y.shape)
print(X_train.shape, y_train.shape)
print(X_val.shape, y_val.shape)
clf_new = SVC(random_state=RANDOM_STATE, probability=True, kernel='poly', coef0=3, degree=3)
# kernel='rbf' -> 0.886971791423
# kernel='linear' -> 0.5
# kernel='polynomial', coef0=0, degree=2 -> 0.5
# kernel='polynomial', coef0=0, degree=3 -> 0.5
# kernel='polynomial', coef0=3, degree=3 -> 0.5
clf_new.fit(X_train, y_train)
y_pred_new = clf_new.predict_proba(X_val)[:, 1]
print(roc_auc_score(y_val, y_pred_new))
|
while True:
x = int(input())
if x == 0:
break
for n in range(1, x+1):
if n == x:
print(n)
else:
print(n, end=' ')
|
if __name__ == '__main__':
student_list =[]
for _ in range(int(input())):
name = input()
score = float(input())
student = [name, score]
student_list.append( student )
number_list = sorted(list(set(map(lambda x : x[1], student_list))))
# print (number_list)
second_lowest = number_list[1]
second_lowest_ppl = []
for student in student_list:
if student[1] == second_lowest:
second_lowest_ppl.append(student[0])
sorted_list = sorted(second_lowest_ppl)
print('\n'.join(sorted_list))
|
# Generated by Django 2.1.2 on 2019-01-07 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0033_auto_20190102_1033'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='direction',
new_name='direction_cat',
),
migrations.AlterField(
model_name='photo',
name='photo',
field=models.ImageField(help_text='Прийаються файли(фото) лише у фораматі JPEG!', null=True, upload_to='products_photo', verbose_name='Фото продукту'),
),
]
|
def take_umbrella(weather, rain_chance):
if weather=='rainy':
return True
if weather=='sunny' and rain_chance>0.50:
return True
elif weather=='cloudy'and rain_chance>0.20:
return True
else:
return False
'''
Write a function take_umbrella() that takes two arguments: a string representing
the current weather and a float representing the chance of rain today.
Your function should return True or False based on the following criteria.
You should take an umbrella if it's currently raining or if it's cloudy and the
chance of rain is over 0.20.
You shouldn't take an umbrella if it's sunny unless it's more likely to rain than not.
The options for the current weather are sunny, cloudy, and rainy.
For example, take_umbrella('sunny', 0.40) should return False.
As an additional challenge, consider solving this kata using only logical operaters and not
using any if statements
'''
|
from MarbleManager import marbleManager
commands = [
"**help:** you just used it",
"**ping:** pong",
"**register:** register and recieve a random amount of marbles from 20 to 40",
"**collection:** shows your current marble amount"
]
async def processCommand(message, commandPrefix):
command = message.content[1:] #the message without the commandPrefix
if command == "help":
helpMessage = ""
for x in commands:
helpMessage += x + "\n\n"
await message.channel.send(helpMessage)
elif command == "ping":
await message.channel.send("pong")
elif command == "register":
await marbleManager.register(message.author, message.channel)
elif command == "collection":
await marbleManager.getCollection(message.author, message.channel)
elif command.split()[0] == "coinflip":
parts = command.split()
if len(parts) == 2:
if not parts[1].isdigit():
await message.channel.send("You didn't specify a number correctly dumbo.")
else:
#good to go
await marbleManager.coinflip(message.author, message.channel, int(parts[1]))
else:
await message.channel.send("coinflip should have 2 parts seperated by a space dumbass.")
else:
await message.channel.send("Command not recognized. " + commandPrefix + "help to see commands.")
|
import sys
sys.path.append('../500_common')
import lib
import lib_ss
if False:
images = lib.get_images("data/result.html")
else:
soup = lib_ss.main("/Users/nakamurasatoru/git/d_genji/genji_curation/src/500_common/Chrome3post", "Profile 3", 60)
images = lib.get_images_by_soup(soup)
manifest = "https://kotenseki.nijl.ac.jp/biblio/100266039/manifest"
areas = ["2840,390,2160,3010", "550,390,2160,3010"]
# 2840,387,2160,3012/full/0/default.jpg
countMax = 20
# token = lib.get_token("../token.yml")
token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IjA4MGU0NWJlNGIzMTE4MzA5M2RhNzUyYmIyZGU5Y2RjYTNlNmU4ZTciLCJ0eXAiOiJKV1QifQ.eyJuYW1lIjoi5Lit5p2R6KaaIiwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS8tWHQ1NENUT1pEdVEvQUFBQUFBQUFBQUkvQUFBQUFBQUFBQUEvQUFLV0pKTjg3RWs3MVZqeTZyWTNpeTh6bmFFR0FqeFlpdy9waG90by5qcGciLCJpc3MiOiJodHRwczovL3NlY3VyZXRva2VuLmdvb2dsZS5jb20vY29kaC04MTA0MSIsImF1ZCI6ImNvZGgtODEwNDEiLCJhdXRoX3RpbWUiOjE2MDkzMzYyMDgsInVzZXJfaWQiOiJvZ2Z0UkpaeGxDZzZIRDZMelNPWGZ4ZlBXYUEzIiwic3ViIjoib2dmdFJKWnhsQ2c2SEQ2THpTT1hmeGZQV2FBMyIsImlhdCI6MTYwOTQ1MzI5NiwiZXhwIjoxNjA5NDU2ODk2LCJlbWFpbCI6InNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJmaXJlYmFzZSI6eyJpZGVudGl0aWVzIjp7Imdvb2dsZS5jb20iOlsiMTA0ODEzNDEzMzM0OTI0ODM4NDQzIl0sImVtYWlsIjpbInNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSJdfSwic2lnbl9pbl9wcm92aWRlciI6Imdvb2dsZS5jb20ifX0.oVA1q6JloDC2erOftmaxKytyUdtRr_7Q3d9ooNfkqGDRKN6ax6AKFNbqkkhucckWzAA4FoDrc7x2LQNkXAJ9LUNn6awMCkkT6aaHEJboiEA0NWxwLR12H3MGfduECFSMGt7eZyp51-NsuvnawzmKPkAns0fpvBZHSNSjrSVQEs1fJ86aT_WxY8LksWPlTwM7CoLq_P4bGpbtrMJvNS9ngnZ-_UavMhNuF3L8qLYkPLj4LHER4Bm0JcqkrwUh1d1UEM2-YPo8KyYoBQEIYwNUfusxa0LeHxiMz3ba2bb0XkrR0dA0ndbf7vGtp9LXz6gyT710vMCnsd0up8EaNXv_Kw"
lib.post(manifest, areas, countMax, token, images, "Manifest")
|
# Generated by Django 2.2.6 on 2019-11-06 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0023_dprqty'),
]
operations = [
migrations.RemoveField(
model_name='dprinfra',
name='site',
),
migrations.AddField(
model_name='dprqty',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='dprqty',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='dprqty',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='progressqty',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='progressqty',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='progressqty',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='progressqtyextra',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='progressqtyextra',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='progressqtyextra',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='shiftedqty',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='shiftedqty',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='shiftedqty',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='shiftedqtyextra',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='shiftedqtyextra',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='shiftedqtyextra',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='site',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='site',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='site',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='siteextra',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='siteextra',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='siteextra',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='surveyqty',
name='changeid',
field=models.CharField(blank=True, max_length=50, null=True, unique=True),
),
migrations.AddField(
model_name='surveyqty',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='surveyqty',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.DeleteModel(
name='DprHH',
),
migrations.DeleteModel(
name='DprInfra',
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# from django.contrib.gis.db import models
from django.utils import timezone
class FeatureType(models.Model):
feature_name = models.CharField(max_length=200)
def __str__(self):
return "{}".format(self.feature_name)
class Meta:
ordering = ['feature_name']
class ChangeType(models.Model):
change_type = models.CharField(max_length=50)
def __str__(self):
return "{}".format(self.change_type)
class Meta:
ordering = ['change_type']
class City(models.Model):
city_name = models.CharField(max_length=30)
class MonthFolder(models.Model):
folder_name = models.CharField(max_length=100)
folder_path = models.TextField()
completed = models.BooleanField(default=False)
create_date = models.DateTimeField(default=timezone.now)
edit_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["id"]
# Create your models here.
class Geodatabase(models.Model):
folder = models.ForeignKey(
MonthFolder
)
city = models.ForeignKey(
City
)
geodatabase_name = models.CharField(max_length=200)
create_date = models.DateTimeField(default=timezone.now)
edit_date = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}".format(self.geodatabase_name)
class Meta:
ordering = ['city']
class FeatureClass(models.Model):
geodatabase = models.ForeignKey(
Geodatabase,
)
feature_type = models.ForeignKey(
FeatureType,
)
change_type = models.ForeignKey(
ChangeType,
)
create_date = models.DateTimeField(default=timezone.now)
edit_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['feature_type']
class Feature(models.Model):
feature_class = models.ForeignKey(FeatureClass)
rmwid = models.CharField(max_length=25)
qc_comments = models.CharField(max_length=200, null=True)
qc_approved = models.NullBooleanField()
class Meta:
ordering = ['rmwid']
class FeatureChange(models.Model):
feature = models.ForeignKey(
Feature,
)
change_field = models.CharField(max_length=100, null=True, blank=True)
old_value = models.CharField(max_length=255, null=True, blank=True)
new_value = models.CharField(max_length=255, null=True, blank=True)
|
class Building:
graph = None
floors = None
def __init__(self, graph, floors):
self.graph = graph
self.floors = floors
class Graph:
points = []
connections = []
def __init__(self, points, connections):
self.points = points
self.connections = connections
# download data from db is planned
class Point:
id = -1
name = None
floor_index = -1
x = -1
y = -1
def __init__(self, id, name, floor_index, x, y):
self.id = id
self.name = name
self.floor_index = floor_index
self.x = x
self.y = y
class PointConnection:
point1 = None
point2 = None
connection_weight = -1
floor_index = -1
trans_floor_marker = False
def __init__(self, point1, point2, connection_weight, floor_index):
self.point1 = point1
self.point2 = point2
self.connection_weight = connection_weight
self.floor_index = floor_index
class Floor:
floor_index = -1
picture_path = None
def __init__(self, floor_index, picture_path):
self.floor_index = floor_index
self.picture_path = picture_path
class Path:
points = []
connections = []
floors = []
def __init__(self, points, connections, floors):
self.points = points
self.connections = connections
self.floors = floors
class Router:
building = None
def __init__(self, building):
self.building = building
def find_best_path(self, start, finish):
# TODO
# use dijkstra algorithm
best_path = Path()
return best_path # return instance of class Path
|
# Problem description: http://www.geeksforgeeks.org/partition-a-set-into-two-subsets-such-that-the-difference-of-subset-sums-is-minimum/
def find_minimum_partition(array, set1, total_sum):
# construct all possible values for set 1 from array
if not array:
sum_set1 = sum(set1)
sum_set2 = total_sum - sum_set1
return abs(sum_set1 - sum_set2)
else:
return min(find_minimum_partition(array[:-1], set1 + [array[-1]], total_sum),
find_minimum_partition(array[:-1], set1, total_sum))
arr = [3, 1, 4, 2, 2, 1]
print(find_minimum_partition(arr, [], sum(arr)))
|
# -*- coding: utf-8 -*-
class Solution:
def calculateMinimumHP(self, dungeon):
result = [float("inf") for _ in range(len(dungeon[0]))]
result[-1] = 1
for i in reversed(range(len(dungeon))):
result[-1] = max(result[-1] - dungeon[i][-1], 1)
for j in reversed(range(len(dungeon[0]) - 1)):
result[j] = max(min(result[j], result[j + 1]) - dungeon[i][j], 1)
return result[0]
if __name__ == "__main__":
solution = Solution()
assert 7 == solution.calculateMinimumHP(
[
[-2, -3, 3],
[-5, -10, 1],
[10, 30, -5],
]
)
|
from django.http import HttpResponse
from django.contrib.auth import authenticate,login,get_user_model
from django.shortcuts import render,redirect
from .forms import ContactForm,LoginForm,RegisterForm
# Create your views here.
def home_page(request):
context = {
"title":"Hello world!",
"content":"Welcome to home page",
}
# if request.user.is_authenticated()
return render(request,"home_page.html",context)
def about_page(request):
context = {
"title":"About Page",
"content":"Welcome to about page"
}
return render(request,"home_page.html",context)
def content_page(request):
contact_form=ContactForm(request.POST or None)
context = {
"title":"Content Page",
"content":"Welcome to content page",
"form":contact_form
}
if contact_form.is_valid():
print(contact_form.cleaned_data)
return render(request,"contact/view.html",context)
def login_page(request):
form=LoginForm(request.POST or None)
# print(request.user.is_authenticated())
context={
"form":form
}
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
seo_specialist = authenticate(username=username, password=password)
if seo_specialist is not None:
login(request,seo_specialist)
context['form']=LoginForm()
return redirect("/login")
return render(request,"auth/login.html",context)
User = get_user_model()
def register_page(request):
form =RegisterForm(request.POST or None)
context={
"form":form
}
if form.is_valid():
print(form.cleaned_data)
username = form.cleaned_data.get("username")
email=form.cleaned_data.get("password")
password = form.cleaned_data.get("password")
new_user=User.objects.create_user(username,email,password)
print(new_user)
return render(request,"auth/register.html",context)
# def index(request):
# html_= """
# <!doctype html>
# <html lang="en">
# <head>
# <!-- Required meta tags -->
# <meta charset="utf-8">
# <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
# <!-- Bootstrap CSS -->
# <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
# <title>Hello, world!</title>
# </head>
# <body>
# <h1>Hello, world!</h1>
# <!-- Optional JavaScript -->
# <!-- jQuery first, then Popper.js, then Bootstrap JS -->
# <script src="https://code.jquery.com/jquery-3.4.1.slim.min.js" integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n" crossorigin="anonymous"></script>
# <script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script>
# <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js" integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6" crossorigin="anonymous"></script>
# </body>
# </html>
# """
# return HttpResponse(html_)
|
from django.db import models
from multiselectfield import MultiSelectField
from datetime import datetime, date
# Create your models here.
class Task(models.Model):
name = models.CharField(max_length=255)
genre_choice = [
("Drama", "Drama"),
("Romance", "Romance"),
("Action", "Action"),
("Thriler", "Thriler"),
("Mistery", "Mistery"),
("Fiction", "Fiction"),
("Fantasy", "Fantasy"),
]
genre = MultiSelectField(max_length=255, choices = genre_choice, default='1')
rating = models.CharField(max_length=255, null=True, blank=True)
tanggal = models.DateField(default=datetime.now)
upload_img = models.ImageField(default='', upload_to="images/")
deskripsi = models.TextField(default='')
class Game(models.Model):
judul = models.CharField(max_length=255)
genre = models.CharField(max_length=255)
tanggal = models.CharField(max_length=255)
deskripsi = models.TextField(default='')
class Merch(models.Model):
nama = models.CharField(max_length=255)
kategori = models.CharField(max_length=255)
harga = models.CharField(max_length=255)
stock = models.CharField(max_length=255)
deskripsi = models.TextField(default='')
def tanggal(self):
return self.tanggal.strftime('%Y-%m-%d')
|
#coding:utf-8
#!/usr/bin/env python
from gclib.json import json
from game.models.account import account
from game.models.user import user
from game.routine.gift import gift as giftR
def request(request):
"""
请求加好友
"""
usr = request.user
friendid = request.GET['friend_id']
friendid = int(friendid)
if friendid == usr.roleid:
return {'msg':'friend_can_not_self'}
friend = user.get(int(friendid))
if friend != None:
usrNw = usr.getNetwork()
data = usrNw.addFriendRequest(friend)
if data.has_key('msg'):
return data
return {'friend':data}
return {'msg':'friend_not_exist'}
def friend_anwser(request):
"""
回应好友请求
"""
usr = request.user
request_id = request.GET['request_id']
option = request.GET['option']
usrNw = usr.getNetwork()
return usrNw.friendRequestAnswer(request_id, option)
def search(request):
"""
寻找好友
"""
usr = request.user
friendname = request.GET['friend_name']
friendid = account.getRoleid(friendname)
if friendid == 0:
return {'friend': {}}
friend = user.get(friendid)
if friend != None:
return {'friend':friend.getFriendData()}
else:
return {'friend': {}}
def delete(request):
"""
删除好友
"""
usr = request.user
friendid = request.GET['friend_id']
usrNw = usr.getNetwork()
friend = user.get(friendid)
if not friend:
return {'msg':'friend_not_exist'}
return usrNw.deleteFriend(friend)
def message(request):
"""
留言
"""
friendid = int(request.GET['friend_id'])
msg = request.GET['message']
usr = request.user
toUser = None
if friendid == usr.roleid:
toUser = usr
else:
toUser = user.get(friendid)
if toUser:
usrNw = usr.getNetwork()
toUserNw = toUser.getNetwork()
if toUserNw.isBan(usr.roleid):
return {'msg':'user_is_in_ban'}
usrNw.sendMessage(toUser, msg)
return {}
return {'msg':'friend_not_exist'}
def get_message(request):
"""
刷新留言
"""
friendid = request.GET['friend_id']
friend = user.get(friendid)
if not friend:
return {'msg':'friend_not_exist'}
friendNw = friend.getNetwork()
return {'message':friendNw.message}
def message_delete(request):
"""
删除留言
"""
messageid = request.GET['message_id']
usr = request.user
usrNw = usr.getNetwork()
usrNw.deleteMessage(messageid)
return {'message_delete': messageid}
def mail(request):
"""
发送私信
"""
friendid = request.GET['friend_id']
mail = request.GET['mail']
usr = request.user
if friendid == usr.roleid:
return {'msg':'friend_can_not_self'}
toUser = user.get(friendid)
if toUser:
toUserNw = toUser.getNetwork()
if toUserNw.isBan(usr.roleid):
return {'msg':'user_is_in_ban'}
usrNw = usr.getNetwork()
usrNw.sendMail(toUser, mail)
return {}
return {'msg':'friend_not_exist'}
def delete_mail(request):
"""
删除私信
"""
friendid = request.GET['friend_id']
mailid = request.GET['mail_id']
usr = request.user
usrNw = usr.getNetwork()
usrNw.deleteMail(friendid, mailid)
return {'mailid': mailid}
def delete_friend_mail(request):
"""
删除好友私信
"""
friendid = request.GET['friend_id']
usr = request.user
usrNw = usr.getNetwork()
usrNw.deleteFriendMail(friendid)
return {}
def email_read(request):
"""
阅读邮件
"""
emailid = request.GET['id']
usr = request.user
usrNw = usr.getNetwork()
ret = usrNw.emailMarkReaded(emailid)
if ret.has_key(emailid):
return ret
return {'update_email':ret}
def email_open(request):
"""
打开邮件
"""
emailid = request.GET['id']
usr = request.user
usrNw = usr.getNetwork()
return usrNw.emailOpen(emailid)
def email_delete(request):
"""
删除邮件
"""
emailid = request.GET['id']
usr = request.user
usrNw = usr.getNetwork()
return usrNw.emailDelete(emailid)
def ban(request):
"""
阻止好友
"""
banid = request.GET['ban_id']
usr = request.user
banUser = user.get(banid)
if banUser:
usrNw = usr.getNetwork()
usrNw.ban(banid, banUser.name)
return {}
return {'msg':'friend_not_exist'}
def yell(request):
"""
世界聊天
"""
message = request.GET['message']
usr = request.user
usrNw = usr.getNetwork()
return usrNw.yell(usr.name, message)
def gift(request):
"""
送礼
"""
item = request.GET['item']
usr = request.user
friendid = request.GET['friendid']
return giftR.send_gift(usr, item, friendid)
def gift_ladder(request):
"""
送礼排行榜
"""
tp = request.GET['type']
begin = request.GET['begin']
end = request.GET['end']
usr = request.user
return giftR.gift_ladder(usr, tp, begin, end)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as pat
import math
class Ellipsoid:
def __init__(self, a, b, c, p, xc):
self.a = a
self.b = b
self.c = c
self.p = p
self.xc = xc
self.w = c
fig = plt.figure()
# Draw constraints
self.ax = fig.add_subplot(1, 1, 1)
plt.axis([-2, 6, -2, 6])
x = np.linspace(0, 5)
y = -(a[3][0] / a[3][1]) * x + b[3] / a[3][1]
plt.plot(x, y)
y = -(a[4][0] / a[4][1]) * x + b[4] / a[4][1]
plt.plot(x, y)
plt.axvline(x=1)
def draw_ellipse(self, p, xc):
vals, vecs = np.linalg.eig(p)
e = pat.Ellipse(xy=xc, width=vals[0], height=vals[1], angle=math.asin(vecs[1, 0]), fill=False)
self.ax.add_patch(e)
def solve(self):
p = self.p
xc = self.xc
n = 2
w = self.w
for i in range(60):
self.draw_ellipse(p, xc)
idxs = np.where(np.greater(np.matmul(self.a, xc), self.b))[0]
if len(idxs) > 0:
w = self.a[idxs[0]]
else:
w = self.c
v = np.matmul(p, w) / np.sqrt(np.matmul(w, np.matmul(p, w)))
xc = xc - v / (n + 1)
p = (n * n / (n * n - 1)) * (p - ((2 * np.matmul(np.transpose([v]), [v])) / (n + 1)))
return xc
# Constrains => A*X <= B
a = np.array([[-1, 0], [0, -1], [1, 0], [0, 1], [1, 1]])
b = np.array([0, 0, 1, 1, 1.5])
c = np.array([-1, -2])
p = np.array([[4, 0], [0, 4]])
xc = np.array([0, 0])
ellipse = Ellipsoid(a, b, c, p, xc)
print(ellipse.solve())
plt.show()
|
'''5206.删除字符串中的所有相邻重复项'''
s = "deeedbbcccbdaa"
k = 3
i=0
n=len(s)
while(i+k<=n):
'''if s[i:i+k]==s[i]*k:
#1.采用切片结合字符串的乘法来进行对比处理
#2.或者将字符逐个对比,累计相同字符的个数然后与k对比
#3.或者将字符逐个添加到一个list中,进行set处理,看
#处理之后长度是否为1
#后面两种方法花费时间比第一种方法更长
s=s[:i]+s[i+k:]
n=n-k
if i-k>0:
i-=k
else:
i=0
'''
count = 1
preI = i
while count < k and s[preI] == s[preI + 1]:
count += 1
preI += 1
'''
count=0
for j in range(i,i+k-1):
if s[j+1]==s[j]:
count+=1
else:
break
if j==i+k-2:
count+=1
'''
if count==k:
s=s[:i]+s[i+k:]
n=n-k
if i-k>=0:
i=i-k
else:
i=0
else:
i+=1
print(s)
|
import binary
import flipMove
# encode -> binary <- decode
# decode/encode
def language():
language = input('binary, flipMove: ')
if 'binary'.startswith(language.lower()):
if binary.question():
print('done')
if 'flipMove'.startswith(language.lower()):
flipMove()
language()
|
import torchvision.transforms as transforms
import config as cf
from autoaugment import DogBreedPolicy
def transform_training():
transform_train = transforms.Compose([
transforms.Resize(227),
#transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
#DogBreedPolicy(),
transforms.ToTensor(),
]) # meanstd transformation
return transform_train
def transform_testing():
transform_test = transforms.Compose([
transforms.Resize(227,227),
#transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# CIFAR10Policy(),
transforms.ToTensor(),
])
return transform_test
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-24 02:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treatment_sheets', '0005_auto_20160323_0547'),
]
operations = [
migrations.AlterField(
model_name='txsheet',
name='comment',
field=models.TextField(default='', max_length=10000),
),
]
|
import argparse
import os
import shutil
import time
from pathlib import Path
from PIL import Image
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import numpy as np
import math
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from detect import detect
def rotation(img):
h, w = img.shape[:2]
blured = cv2.blur(img,(5,5))
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(blured, mask, (w-1,h-1), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (0,0), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (0,h-1), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (w-1,0), (255,255,255), (2,2,2),(3,3,3),8)
gray = cv2.cvtColor(blured,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray,250,255,cv2.THRESH_BINARY)
image=[]
for i in range(len(binary)):
for j in range(len(binary[0])):
if(binary[i][j]==0):
image.append([j,i])
image=np.float32(image)
rect = cv2.minAreaRect(image)
box = cv2.boxPoints(rect)
print(box)
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
#
cv2.circle(img,(int(left_point_x),int(left_point_y)),3,(255,0,0),-1)
cv2.circle(img,(int(right_point_x),int(right_point_y)),3,(255,0,0),-1)
cv2.circle(img,(int(top_point_x),int(top_point_y)),3,(255,0,0),-1)
cv2.circle(img,(int(bottom_point_x),int(bottom_point_y)),3,(255,0,0),-1)
cv2.line(img,(int(left_point_x),int(left_point_y)),(int(top_point_x),int(top_point_y)),(255,0,0),2,8)
cv2.line(img,(int(top_point_x),int(top_point_y)),(int(right_point_x),int(right_point_y)),(255,0,0),2,8)
cv2.line(img,(int(right_point_x),int(right_point_y)),(int(bottom_point_x),int(bottom_point_y)),(255,0,0),2,8)
cv2.line(img,(int(bottom_point_x),int(bottom_point_y)),(int(left_point_x),int(left_point_y)),(255,0,0),2,8)
#
if((right_point_x-bottom_point_x)**2+(right_point_y-bottom_point_y)**2>=(right_point_x-top_point_x)**2+(right_point_y-top_point_y)**2):
if(int(box[0][0])==int(box[1][0]) or int(box[0][1])==int(box[1][1])):
angle1=90
else:
angle1=math.atan((bottom_point_y-right_point_y)/(right_point_x-bottom_point_x))/3.1415926535*180
else:
angle1=180+math.atan((right_point_y-top_point_y)/(top_point_x-right_point_x))/3.1415926535*180
#angle=-rect[2]
#cv2.imshow("binary",binary)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return angle1
###################################################################################
'''def rotation(img):
h, w = img.shape[:2]
blured = cv2.blur(img,(5,5))
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(blured, mask, (w-1,h-1), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (0,0), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (0,h-1), (255,255,255), (2,2,2),(3,3,3),8)
cv2.floodFill(blured, mask, (w-1,0), (255,255,255), (2,2,2),(3,3,3),8)
gray = cv2.cvtColor(blured,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray,250,255,cv2.THRESH_BINARY)
image=[]
for i in range(len(binary)):
for j in range(len(binary[0])):
if(binary[i][j]==0):
image.append([j,i])
image=np.float32(image)
rect = cv2.minAreaRect(image)
angle=-rect[2]
return angle
#cv2.imshow("binary", binary)
#cv2.waitKey(0)
#cv2.destroyAllWindows()'''
################################################################################
def detectreal(model,input):
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='./tt1.jpg', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.1, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.3, help='IOU threshold for NMS')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='inference/output', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
half = False
device = 'cpu'
imgsz = 640
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
dataset = LoadImages(input)
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
img = torch.from_numpy(dataset.img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
returnvalue = []
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img, augment=opt.augment)[0]
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
if(pred[0]!=None):
print(pred[0])
for i in enumerate(pred[0]):
ix =i[1]
if(int(ix[5])==67):
#print(ix[5])
angle1=rotation(dataset.img0[int(ix[1]):int(ix[3]),int(ix[0]):int(ix[2])])
print(angle1)
ix=ix.numpy()
returnvalue.append(angle1)
returnvalue.append(int((ix[0]+ix[2])/2))
returnvalue.append(int((ix[1]+ix[3])/2))
#ix.append(angle1)
cv2.circle(dataset.img0,(int((ix[0]+ix[2])/2),int((ix[1]+ix[3])/2)),5,(255,0,0),5)
returnimg = Image.fromarray(np.uint8(dataset.img0))
return np.array(returnvalue),returnimg
else:
continue
return np.array([0,0]),Image.fromarray(np.uint8(dataset.img0))
return np.array([0,0]),Image.fromarray(np.uint8(dataset.img0))
|
import WH
conn = WH.WH()
conn.connect()
r = conn.add_allowed_host(40466,"https","nunahealth.com")
print r.text
|
#!/usr/bin/env python
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
#
"""
XML as plain object module.
This module is a set of utility functions for parsing XML input
into plain list/dict/string types.
These plain XML objects in turn can be emitted through YAML
for instance as bare YAML without python objects.
The motivating usage is to dump XML to YAML, manually edit
files as YAML, and emit XML output back.
The original XML file is supposed to be preserved except
for comments and if requested spaces between elements.
Note that there are alternative modules with nearly the same
functionality, but none of them both provide simple plain objects
and preserve the initial XML content for non structured XML.
XML namespaces are preserved for attributes/elements
and re-emitted as is.
WARNING: from the original XML documents, DTD specification,
XML comments and processing entities will be discarded.
Also system external entities are not allowed and will
generate an exception.
If one needs some of these features, it's probably
not the right usage for this module. Fill an issue if unsure.
:Example:
>>> import xmlplain, sys
>>> _ = sys.stdout.write(open("tests/example-1.xml").read())
<example>
<doc>This is an example for xmlobj documentation. </doc>
<content version="beta">
<kind>document</kind>
<class>example</class>
<structured/>
<elements>
<item>Elt 1</item>
<doc>Elt 2</doc>
<item>Elt 3</item>
<doc>Elt 4</doc>
</elements>
</content>
</example>
>>> root = xmlplain.xml_to_obj(open("tests/example-1.xml"), strip_space=True, fold_dict=True)
>>> xmlplain.obj_to_yaml(root, sys.stdout)
example:
doc: 'This is an example for xmlobj documentation. '
content:
'@version': beta
kind: document
class: example
structured: ''
elements:
- item: Elt 1
- doc: Elt 2
- item: Elt 3
- doc: Elt 4
>>> xmlplain.xml_from_obj(root, sys.stdout)
<?xml version="1.0" encoding="UTF-8"?>
<example>
<doc>This is an example for xmlobj documentation. </doc>
<content version="beta">
<kind>document</kind>
<class>example</class>
<structured></structured>
<elements>
<item>Elt 1</item>
<doc>Elt 2</doc>
<item>Elt 3</item>
<doc>Elt 4</doc>
</elements>
</content>
</example>
"""
from __future__ import print_function
__version__ = '1.6.0'
import yaml, sys, xml, io
import contextlib
import xml.sax.saxutils
try:
from collections import OrderedDict
except ImportError: # pragma: no cover # python 2.6 only
from ordereddict import OrderedDict
def xml_to_events(inf, handler=None, encoding="UTF-8", process_content=None):
"""
Generates XML events tuples from the input stream.
The generated events consist of pairs: (type, value)
where type is a single char identifier for the event and
value is a variable length tuple.
Events correspond to xml.sax events with the exception that
attributes are generated as events instead of being part of
the start element event.
The XML stresm is parsed with xml.sax.make_parser().
:param inf: input stream file or string or bytestring
:param handler: events receiver implementing the append() method or None,
in which case a new list will be generated
:param encoding: encoding used whebn the input is a bytes string
:param process_content: a function to apply to the cdata content (str for
python3 or unicode for python2) after the XML reader content generation
:return: returns the handler or the generated list
The defined XML events tuples in this module are:
- ("[", ("",)) for the document start
- ("]", ("",)) for the document end
- ("<", (elt_name,)) for an element start
- (">", (elt_name,)) for an element end
- ("@", (attr_name, attr_value)) for an attribute associated to the
last start element
- ("|", (content,)) for a CDATA string content
- ("#", (whitespace,)) for an ignorable whitespace string
.. seealso: xml_from_events(), xml.sax.parse()
"""
class EventGenerator(xml.sax.ContentHandler):
def __init__(self, handler, process_content=None):
self.handler = handler
self.process_content = process_content
def startElement(self, name, attrs):
self.handler.append(("<", (name,)))
# Enforce a stable order as sax attributes are unordered
for attr in sorted(attrs.keys()):
handler.append(("@", (attr, attrs[attr])))
def endElement(self, name):
self.handler.append((">", (name,)))
def startDocument(self):
self.handler.append(("[", ("",)))
def endDocument(self):
self.handler.append(("]", ("",)))
def characters(self, content):
if self.process_content != None:
content = self.process_content(content)
self.handler.append(("|", (content,)))
class EntityResolver(xml.sax.handler.EntityResolver):
def resolveEntity(self, publicId, systemId):
raise Exception("invalid system entity found: (%s, %s)" % (publicId, systemId))
if handler == None: handler = []
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, False)
parser.setFeature(xml.sax.handler.feature_namespace_prefixes, False)
parser.setFeature(xml.sax.handler.feature_external_ges, True)
parser.setEntityResolver(EntityResolver())
parser.setContentHandler(EventGenerator(handler, process_content=process_content))
if sys.version_info[0] == 2 and isinstance(inf, unicode):
inf = inf.encode(encoding)
if sys.version_info[0] >= 3 and isinstance(inf, str):
inf = inf.encode(encoding)
if isinstance(inf, bytes):
src = xml.sax.xmlreader.InputSource()
src.setEncoding(encoding)
src.setByteStream(io.BytesIO(inf))
parser.parse(src)
else:
parser.parse(inf)
return handler
def xml_from_events(events, outf=None, encoding='UTF-8', process_content=None):
"""
Outputs the XML document from the events tuples.
From the given events tuples lists as specified in xml_to_events(),
generated a well formed XML document.
The XML output is generated through xml.saxutils.XMLGenerator().
:param events: events tuples list or iterator
:param outf: output file stream or None for bytestring output
:param encoding: output encoding
:param process_content: a function to apply to the cdata content (str for
python3 or unicode for python2) before being processed by the XML
writer
:return: created byte string when outf if None
.. note: unknown events types are ignored
.. seealso: xml_to_events(), xml.sax.saxutils.XMLGenerator()
"""
class SaxGenerator():
def __init__(self, sax_receiver, process_content=None):
self.sax_receiver = sax_receiver
self.process_content = process_content
def append(self, evt):
kind, value = evt
if kind == '[':
self.sax_receiver.startDocument()
self.start = None
return
if kind == '@':
self.start[1][value[0]] = value[1]
return
if self.start != None:
self.sax_receiver.startElement(*self.start)
self.start = None
if kind == ']':
self.sax_receiver.endDocument()
elif kind == '<':
self.start = (value[0], OrderedDict())
elif kind == '>':
self.sax_receiver.endElement(value[0])
elif kind == '|':
content = value[0]
if self.process_content != None:
content = self.process_content(content)
self.sax_receiver.characters(content)
elif kind == '#':
self.sax_receiver.ignorableWhitespace(value[0])
class QuotingWriter():
def __init__(self, parent, encoding):
self.parent = parent
self.input_encoding = encoding
# '\r' must be quoted to 
 in the output
# XMLGenerator() does not, hence we do it there
self.quoting = [(b'\r', b'
')]
self.binary = True
try:
self.parent.write(b'')
except TypeError as e:
self.binary = False
def write(self, content):
assert(isinstance(content, bytes))
for k, v in self.quoting:
content = content.replace(k, v)
if not self.binary:
content = content.decode(self.input_encoding)
return self.parent.write(content)
getvalue = None
if outf == None:
outf = io.BytesIO()
getvalue = outf.getvalue
writer = QuotingWriter(outf, encoding=encoding)
generator = xml.sax.saxutils.XMLGenerator(writer, encoding=encoding)
generator = SaxGenerator(generator, process_content=process_content)
for evt in events: generator.append(evt)
if getvalue:
return getvalue()
def xml_to_obj(inf, encoding="UTF-8", strip_space=False, fold_dict=False, process_content=None):
"""
Generate an plain object representation from the XML input.
The representation consists of lists of plain
elements which are either XML elements as dict
{ elt_name: children_list } or XML CDATA text contents as
plain strings.
This plain object for a XML document can be emitted to
YAML for instance with no python dependency.
When the 'fold' option is given, an elements list may be
simplified into a multiple key ordered dict or a single text content.
Note that in this case, some Ordered dict python objects may be generated,
one should then use the obj_to_yaml() method in order to get a bare
YAML output.
When the 'strip_space' option is given, non-leaf text content
are striped, this is in most case safe when managing structured
XML, though, note that this change your XML document content.
Generally one would use this in conjonction with pretty=true
when emitting back the object to XML with xml_from_obj().
:param inf: input stream file or string or bytestring
:param encoding: encoding used when the input is bytes string
:param strip_space: strip spaces from non-leaf text content
:param fold_dict: optimized unambiguous lists of dict into ordered dicts
:param process_content: a function to apply to the cdata content (str for
python3 or unicode for python2) after the XML reader content generation
:return: the root of the generated plain object, actually a single key dict
:Example:
>>> import xmlplain, yaml, sys
>>> root = xmlplain.xml_to_obj(open("tests/example-1.xml"), strip_space=True)
>>> yaml.safe_dump(root, sys.stdout, default_flow_style=False, allow_unicode=True)
example:
- doc: 'This is an example for xmlobj documentation. '
- content:
- '@version': beta
- kind: document
- class: example
- structured: ''
- elements:
- item: Elt 1
- doc: Elt 2
- item: Elt 3
- doc: Elt 4
>>> root = xmlplain.xml_to_obj(open("tests/example-1.xml"), strip_space=True, fold_dict=True)
>>> xmlplain.obj_to_yaml(root, sys.stdout)
example:
doc: 'This is an example for xmlobj documentation. '
content:
'@version': beta
kind: document
class: example
structured: ''
elements:
- item: Elt 1
- doc: Elt 2
- item: Elt 3
- doc: Elt 4
.. seealso: xml_from_obj()
"""
class ObjGenerator():
def __init__(self, strip_space=False, fold_dict=False):
self.value = None
self.strip_space = strip_space
self.fold_dict = fold_dict
def get_value(self):
return self.value
def strip_space_elts(self, elts):
# Only strip space when not a leaf
if len(elts) <= 1: return elts
elts = [e for e in
[s.strip() if not isinstance(s, dict) else s for s in elts]
if e != ""]
return elts
def fold_dict_elts(self, elts):
if len(elts) <= 1: return elts
# Simplify into an OrderedDict if there is no mixed text and no key duplicates
keys = ['#' if not isinstance(e, dict) else list(e.keys())[0] for e in elts]
unique_keys = list(set(keys))
if len(unique_keys) == len(keys) and '#' not in unique_keys:
return OrderedDict([list(elt.items())[0] for elt in elts])
return elts
def fold_trivial(self, elts):
if isinstance(elts, list):
if len(elts) == 0: return ""
if len(elts) == 1: return elts[0]
return elts
def process_children(self):
name, children = list(self.stack[-1].items())[0]
children = self.children()
if self.strip_space: children = self.strip_space_elts(children)
if self.fold_dict: children = self.fold_dict_elts(children)
children = self.fold_trivial(children)
self.stack[-1][name] = children
def children(self):
return list(self.stack[-1].values())[0]
def push_elt(self, name):
elt = {name: []}
self.children().append(elt)
self.stack.append(elt)
def pop_elt(self, name):
self.stack.pop()
def append_attr(self, name, value):
self.children().append({'@%s' % name: value})
def append_content(self, content):
children = self.children()
if len(children) > 0 and not isinstance(children[-1], dict):
children[-1] += content
else:
children.append(content)
def append(self, event):
kind, value = event
if kind == '[':
self.stack = [{'_': []}]
elif kind == ']':
self.value = self.children()[0]
elif kind == '<':
self.push_elt(value[0])
elif kind == '>':
self.process_children()
self.pop_elt(value[0])
elif kind == '@':
self.append_attr(value[0], value[1])
elif kind == '|':
self.append_content(value[0])
return xml_to_events(inf, ObjGenerator(strip_space=strip_space,
fold_dict=fold_dict),
encoding=encoding,
process_content=process_content).get_value()
def events_filter_pretty(events, handler=None, indent=" "):
"""
Augment an XML event list for pretty printing.
This is a filter function taking an event stream and returning the
augmented event stream including ignorable whitespaces for an indented
pretty print. the generated events stream is still a valid events stream
suitable for xml_from_events().
:param events: the input XML events stream
:param handler: events receiver implementing the append() method or None,
in which case a new list will be generated
:param indent: the base indent string, defaults to 2-space indent
:return: the handler if not None or the newly created events list
.. seealso: xml_from_event()
"""
class EventFilterPretty():
def __init__(self, handler, indent=" "):
self.handler = handler
self.indent = indent
def filter(self, events):
events = iter(events)
lookahead = []
depth = 0
while True:
if len(lookahead) == 0:
while True:
e = next(events, None)
if e == None: break
lookahead.append(e)
if e[0] in [">", "]"]: break
if len(lookahead) == 0: break
kinds = list(next(iter(zip(*lookahead))))
if kinds[0] == "<" and not "<" in kinds[1:]:
if depth > 0: self.handler.append(('#', ('\n',)))
self.handler.append(('#', (self.indent * depth,)))
while lookahead[0][0] != ">": self.handler.append(lookahead.pop(0))
self.handler.append(lookahead.pop(0))
if depth == 0: self.handler.append(('#', ('\n',)))
else:
if kinds[0] == "<":
if depth > 0: self.handler.append(('#', ('\n',)))
self.handler.append(('#', (self.indent * depth,)))
self.handler.append(lookahead.pop(0))
depth += 1
elif kinds[0] == ">":
depth -= 1
self.handler.append(('#', ('\n',)))
self.handler.append(('#', (self.indent * depth,)))
self.handler.append(lookahead.pop(0))
if depth == 0: self.handler.append(('#', ('\n',)))
elif kinds[0] == "|":
self.handler.append(('#', ('\n',)))
self.handler.append(('#', (self.indent * depth,)))
self.handler.append(lookahead.pop(0))
else:
self.handler.append(lookahead.pop(0))
assert(next(events, None) == None) # assert all events are consummed
if handler == None: handler = []
EventFilterPretty(handler).filter(events)
return handler
def events_from_obj(root, handler=None):
"""
Creates an XML events stream from plain object.
Generates an XML event stream suitable for xml_from_events() from
a well formed XML plain object and pass it through the append()
method to the receiver or to a newly created list.
:param root: root of the XML plain object
:param handler: events receiver implementing the append() method or None,
in which case a new list will be generated
:return: the handler if not None or the created events list
.. seealso: xml_from_events()
"""
class EventGenerator():
def __init__(self, handler):
self.handler = handler
def gen_content(self, token):
self.handler.append(('|', (token,)))
def gen_elt(self, name, children):
self.handler.append(('<', (name,)))
self.gen_attrs_or_elts(children)
self.handler.append(('>', (name,)))
def gen_attr(self, name, value):
self.handler.append(('@', (name, value)))
def gen_attr_or_elt(self, name, children):
if name[0] == "@":
self.gen_attr(name[1:], children)
else:
self.gen_elt(name, children)
def gen_attrs_or_elts(self, elts):
if isinstance(elts, list):
for elt in elts: self.gen_attrs_or_elts(elt)
elif isinstance(elts, dict):
for name, children in elts.items(): self.gen_attr_or_elt(name, children)
else: self.gen_content(elts)
def generate_from(self, root):
assert(isinstance(root, dict))
assert(len(root.items()) == 1)
(name, children) = list(root.items())[0]
self.handler.append(('[', ("",)))
self.gen_elt(name, children)
self.handler.append((']', ("",)))
if handler == None: handler = []
EventGenerator(handler).generate_from(root)
return handler
def xml_from_obj(root, outf=None, encoding='UTF-8', pretty=True, indent=" ", process_content=None):
"""
Generate a XML output from a plain object
Generates to the XML representation for the plain object as
generated by this module..
This function does the opposite of xml_to_obj().
:param root: the root of the plain object
:param outf: output file stream or None for bytestring output
:param encoding: the encoding to be used (default to "UTF-8")
:param pretty: does indentation when True
:param indent: base indent string (default to 2-space)
:param process_content: a function to apply to the cdata content (str for
python3 or unicode for python2) before being processed by the XML
writer
:return: created byte string when outf if None
.. seealso xml_to_obj()
"""
events = events_from_obj(root)
if pretty: events = events_filter_pretty(events, indent=indent)
return xml_from_events(events, outf, encoding=encoding, process_content=process_content)
def obj_to_yaml(root, outf=None, encoding="UTF-8", process_string=None):
"""
Output an XML plain object to yaml.
Output an object to yaml with some specific
management for OrderedDict, Strings and Tuples.
The specific treatment for these objects are
there in order to preserve the XML ordered structure
while generating a bare yaml file without any python object.
Note that reading back the emitted YAML object should be done
though obj_from_yaml() in order to preserve dict order.
To be used as an alternative to a bare yaml.dump if one
needs an editable YAML view of the XML plain object.
:param root: root of the plain object to dump
:param outf: output file stream or None for bytestring output
:param encoding: output bytestring or file stream encoding
:param process_string: a function to apply to strings (str for
python3 or unicode for python2) before the YAML writer output
:return: None or the generated byte string if stream is None
"""
class LocalDumper(yaml.SafeDumper):
def dict_representer(self, data):
return self.represent_dict(data.items())
def represent_scalar(self, tag, value, style=None):
if tag == 'tag:yaml.org,2002:str':
if process_string != None:
value = process_string(value)
# force strings with newlines to output as block mode
if tag == 'tag:yaml.org,2002:str' and style != '|' and value.find('\n') >= 0:
style = '|'
return yaml.SafeDumper.represent_scalar(self, tag, value, style)
LocalDumper.add_representer(OrderedDict, LocalDumper.dict_representer)
return yaml.dump(root, outf, allow_unicode=True, default_flow_style=False,
encoding=encoding, Dumper=LocalDumper)
def obj_from_yaml(inf, encoding="UTF-8", process_string=None):
"""
Read a YAML object, possibly holding a XML plain object.
Returns the XML plain obj from the YAML stream or string.
The dicts read from the YAML stream are stored as
OrderedDict such that the XML plain object elements
are kept in order.
:param inf: input YAML file stream or string or bytestring
:param encoding: encoding of the input when a byte stream or byte string
:param process_string: a function to apply to strings (str for
python3 or unicode for python2) after the YAML reader input
:return: the constructed plain object
"""
class LocalLoader(yaml.SafeLoader):
def map_constructor(self, node):
self.flatten_mapping(node)
return OrderedDict(self.construct_pairs(node))
def str_constructor(self, node):
value = yaml.SafeLoader.construct_yaml_str(self, node)
encoded = False
if sys.version_info[0] == 2 and isinstance(value, bytes):
encoded = True
value = value.decode('ascii')
if process_string != None:
value = process_string(value)
if encoded:
value = value.encode('ascii')
return value
LocalLoader.add_constructor('tag:yaml.org,2002:map', LocalLoader.map_constructor)
LocalLoader.add_constructor('tag:yaml.org,2002:str', LocalLoader.str_constructor)
# Yaml assume utf-8/utf-16 encoding only on reading,
# hence decode first if the requested encoding is not utf-8
if encoding.upper != "UTF-8":
if hasattr(inf, 'read'):
inf = inf.read()
if isinstance(inf, bytes):
inf = inf.decode(encoding)
return yaml.load(inf, Loader=LocalLoader)
if __name__ == "__main__":
import argparse, sys, os
if "--doctest" in sys.argv:
import doctest
test = doctest.testmod()
sys.exit(0 if test.failed == 0 else 1)
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='xmlplain version %s (path: %s, python: %s)' % (__version__, __file__, sys.version.split()[0]))
parser.add_argument("--doctest", action="store_true", help="run documentation tests")
parser.add_argument("--test", action="store_true", help="run in test mode, filter exceptions")
parser.add_argument("--string", action="store_true", help="read from or write to string first")
parser.add_argument("--in-process", nargs=2, help="2 arguments 'str_in' 'str_out' for processing on read")
parser.add_argument("--out-process", nargs=2, help="2 arguments 'str_in' 'str_out' for processing on write")
parser.add_argument("--in-encoding", default="UTF-8", help="encoding for input")
parser.add_argument("--out-encoding", default="UTF-8", help="encoding for output")
parser.add_argument("--bin", action="store_true", help="read from or write to byte stream or string")
parser.add_argument("--inf", default="xml", help="input format, one of: xml, yml, evt (default: xml)")
parser.add_argument("--outf", default="xml", help="output format, one of: xml, yml, evt, py (default: xml)")
parser.add_argument("--pretty", action='store_true', help="pretty parse/unparse")
parser.add_argument("--filter", default="obj", help="intermefdiate filter, one of: obj, evt (default: obj)")
parser.add_argument("input", nargs='?', help="input file or stdin")
parser.add_argument("output", nargs='?', help="output file or stdout")
args = parser.parse_args()
if args.inf not in ["xml", "yml", "py"]: parser.exit(2, "%s: error: argument to --inf is invalid\n" % parser.prog)
if args.outf not in ["xml", "yml", "py"]: parser.exit(2, "%s: error: argument to --outf is invalid\n" % parser.prog)
if args.filter not in ["obj", "evt"]: parser.exit(2, "%s: error: argument to --filter is invalid\n" % parser.prog)
if args.filter == "evt" and args.inf not in ["xml", "py"]: parser.exit(2, "%s: error: input format incompatible with filter\n" % parser.prog)
if args.filter == "evt" and args.outf not in ["xml", "py"]: parser.exit(2, "%s: error: output format incompatible with filter\n" % parser.prog)
if args.input == None or args.input == "-": args.input = sys.stdin
else: args.input = open(args.input, "rb") if args.bin else open(args.input, "r")
if args.output == None or args.output == "-": args.output = sys.stdout
else: args.output = open(args.output, "wb") if args.bin else open(args.output, "w")
in_process = None
if args.in_process:
in_process = lambda x: x.replace(args.in_process[0], args.in_process[1])
out_process = None
if args.out_process:
out_process = lambda x: x.replace(args.out_process[0], args.out_process[1])
if args.inf == "py":
if args.filter == "obj":
root = eval(args.input.read())
else:
events = eval(args.input.read())
elif args.inf == "xml":
if args.string:
args.input = args.input.read()
if not args.bin and isinstance(args.input, bytes):
args.input = args.input.decode(args.in_encoding)
if args.filter == "evt":
if not args.test:
events = xml_to_events(args.input, process_content=in_process, encoding=args.in_encoding)
else:
try:
events = xml_to_events(args.input, process_content=in_process, encoding=args.in_encoding)
except Exception as e:
events = events_from_obj({ "exception": str(e).encode("utf-8").decode("utf-8")})
else:
if not args.test:
root = xml_to_obj(args.input, strip_space=args.pretty, fold_dict=args.pretty,
process_content=in_process, encoding=args.in_encoding)
else:
try:
root = xml_to_obj(args.input, strip_space=args.pretty, fold_dict=args.pretty,
process_content=in_process, encoding=args.in_encoding)
except Exception as e:
root = { "exception": str(e).encode("utf-8").decode("utf-8")}
elif args.inf == "yml":
if args.string:
args.input = args.input.read()
if not args.bin and isinstance(args.input, bytes):
args.input = args.input.decode(args.in_encoding)
root = obj_from_yaml(args.input, encoding=args.in_encoding, process_string=in_process)
if args.outf == "xml":
if args.filter == "obj":
if args.string:
string = xml_from_obj(root, outf=None, pretty=args.pretty, process_content=out_process, encoding=args.out_encoding)
if sys.version_info[0] >= 3 and args.bin == False: string = string.decode(args.out_encoding)
args.output.write(string)
else:
xml_from_obj(root, args.output, pretty=args.pretty, process_content=out_process, encoding=args.out_encoding)
else:
xml_from_events(events, args.output, process_content=out_process, encoding=args.out_encoding)
elif args.outf == "yml":
if args.filter == "obj":
if args.string:
string = obj_to_yaml(root, outf=None, encoding=args.out_encoding, process_string=out_process)
if sys.version_info[0] >= 3 and args.bin == False: string = string.decode(args.out_encoding)
args.output.write(string)
else:
obj_to_yaml(root, args.output, encoding=args.out_encoding, process_string=out_process)
elif args.outf == "py":
if args.filter == "obj":
args.output.write(str(root))
else:
args.output.write(str(events))
|
import sys
import rosbag
if __name__ == '__main__':
inputfile = sys.argv[1]
if len(sys.argv) == 3:
outputfile = sys.argv[2]
else:
outputfile = inputfile[:-4] + '-converted.bag'
with rosbag.Bag(outputfile, 'w') as outbag:
for topic, msg, t in rosbag.Bag(inputfile).read_messages():
if 'merged_map' in topic:
msg.id = msg.id.replace('RoughOcTree-', 'RoughOcTree-S-')
outbag.write(topic, msg, t)
|
#Batch export of orthophotos based on individual cameras or user selected cameras
#creates custom menu item
#compatibility Agisoft PhotoScan Pro 1.1.0
#no arguments required
import os
import time
import random
import PhotoScan
from PySide import QtCore, QtGui
def intersect(p0, pn, l0, l):
d = ((p0 - l0) * pn) / (l * pn)
return d * l + l0
class ExportOrthoDlg(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
self.blend_types = {"Average": PhotoScan.BlendingMode.AverageBlending, "Mosaic": PhotoScan.BlendingMode.MosaicBlending, "Min intensity": PhotoScan.BlendingMode.MinBlending, "Max Intensity": PhotoScan.BlendingMode.MaxBlending}
self.setWindowTitle("Export individual orthophotos")
self.btnQuit = QtGui.QPushButton("Quit")
self.btnQuit.setFixedSize(130,50)
self.btnP1 = QtGui.QPushButton("Export")
self.btnP1.setFixedSize(130,50)
self.pBar = QtGui.QProgressBar()
self.pBar.setTextVisible(False)
self.pBar.setFixedSize(150, 50)
self.resTxt = QtGui.QLabel()
self.resTxt.setText("Export resolution (m/pix):")
self.resTxt.setFixedSize(130, 25)
self.blendTxt = QtGui.QLabel()
self.blendTxt.setText("Blending mode:")
self.blendTxt.setFixedSize(130, 25)
self.blendCmb = QtGui.QComboBox() #texture type values
self.blendCmb.setFixedSize(100, 25)
for type in self.blend_types.keys():
self.blendCmb.addItem(type)
self.resEdt = QtGui.QLineEdit()
self.resEdt.setPlaceholderText("export resolution (m/pix), e.g 0.01")
self.resEdt.setFixedSize(100, 25)
self.selTxt = QtGui.QLabel()
self.selTxt.setText("Export for:")
self.selTxt.setFixedSize(100, 25)
self.radioBtn_all = QtGui.QRadioButton("all cameras")
self.radioBtn_sel = QtGui.QRadioButton("selected cameras")
self.radioBtn_rnd = QtGui.QRadioButton("random 10 cameras")
self.radioBtn_all.setChecked(True)
self.radioBtn_rnd.setChecked(False)
self.radioBtn_sel.setChecked(False)
layout = QtGui.QGridLayout() #creating layout
layout.addWidget(self.resTxt, 0, 1)
layout.addWidget(self.resEdt, 0, 2)
layout.addWidget(self.blendTxt, 1, 1)
layout.addWidget(self.blendCmb, 1, 2)
layout.addWidget(self.selTxt, 0, 0)
layout.addWidget(self.radioBtn_all, 1, 0)
layout.addWidget(self.radioBtn_sel, 2, 0)
layout.addWidget(self.radioBtn_rnd, 3, 0)
layout.addWidget(self.btnP1, 4, 1)
layout.addWidget(self.btnQuit, 4, 2)
layout.addWidget(self.pBar, 3, 0, 5, 1)
self.setLayout(layout)
proc_exp = lambda : self.exp_ortho()
QtCore.QObject.connect(self.btnP1, QtCore.SIGNAL("clicked()"), proc_exp)
QtCore.QObject.connect(self.btnQuit, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("reject()"))
self.exec()
def surf_height(self, chunk, photo):
points_h = list()
point_cloud = chunk.point_cloud
points = point_cloud.points
npoints = len(points)
num_valid = 0
point_index = 0
for proj in point_cloud.projections[photo]:
track_id = proj.track_id
while point_index < npoints and points[point_index].track_id < track_id:
point_index += 1
if point_index < npoints and points[point_index].track_id == track_id:
if not points[point_index].valid:
continue
v = points[point_index].coord
vt = chunk.transform.matrix.mulp(v)
if chunk.crs:
vt = chunk.crs.project(vt)
points_h.append(vt[2])
num_valid += 1
points_h.sort()
height = points_h[num_valid // 2]
return height
def exp_ortho(self):
doc = PhotoScan.app.document
chunk = doc.chunk
path = doc.path.rsplit("\\", 1)[0]
if not chunk.model:
PhotoScan.app.messageBox("No mesh generated!\n")
return False
try:
resolution = float(self.resEdt.text())
except(ValueError):
PhotoScan.app.messageBox("Incorrect export resolution! Please use point delimiter.\n")
print("Script aborted.")
return False
print("Export started...") #information message
self.btnP1.setDisabled(True)
self.btnQuit.setDisabled(True)
self.pBar.setMinimum(0)
self.pBar.setMaximum(100)
export_list = list()
if self.radioBtn_sel.isChecked():
for photo in chunk.cameras:
if photo.selected:
export_list.append(photo)
elif self.radioBtn_all.isChecked():
export_list = list(chunk.cameras)
elif self.radioBtn_rnd.isChecked():
random_cams = random.sample(range(len(chunk.cameras)), 10) #number of random cameras
for i in range (0, p_num):
export_list.append(chunk.cameras[random_cams[i]])
for photo in chunk.cameras:
photo.enabled = False
blending_mode = self.blend_types[self.blendCmb.currentText()]
processed = 0
t0 = time.time()
for i in range (0, len(chunk.cameras)):
photo = chunk.cameras[i]
photo.enabled = False
PhotoScan.app.update()
for photo in export_list:
if not photo.transform:
continue
x0 = x1 = x2 = x3 = PhotoScan.Vector((0.0,0.0,0.0))
width = photo.sensor.width
height = photo.sensor.height
calibration = photo.sensor.calibration
# vectors corresponding to photo corners
v0 = PhotoScan.Vector(( -calibration.cx / calibration.fx, -calibration.cy / calibration.fy, 1))
v1 = PhotoScan.Vector(( (width - calibration.cx) / calibration.fx, -calibration.cy / calibration.fy, 1))
v2 = PhotoScan.Vector(( -calibration.cx / calibration.fx, (height - calibration.cy) / calibration.fy, 1))
v3 = PhotoScan.Vector(( (width - calibration.cx) / calibration.fx, (height - calibration.cy) / calibration.fy, 1))
vc = photo.center
v0.size = v1.size = v2.size = v3.size = vc.size = 4
v0[3] = v1[3] = v2[3] = v3[3] = 0
vc[3] = 1
M = chunk.transform.matrix * photo.transform
v0_gc = M * v0
v1_gc = M * v1
v2_gc = M * v2
v3_gc = M * v3
vc_gc = chunk.transform.matrix * vc
v0_gc.size = v1_gc.size = v2_gc.size = v3_gc.size = vc_gc.size = 3
# surface normal
cen_p = photo.center
cen_t = chunk.transform.matrix.mulp(cen_p)
if chunk.crs:
cen_t = chunk.crs.project(cen_t)
h = self.surf_height(chunk, photo)
vloc = PhotoScan.Vector((cen_t[0], cen_t[1], h))
vloc_h = PhotoScan.Vector((cen_t[0], cen_t[1], h))
vloc_h[2] += 1
if chunk.crs:
vloc_gc = chunk.crs.unproject(vloc)
vloc_h_gc = chunk.crs.unproject(vloc_h)
surf_n = vloc_h_gc - vloc_gc
else:
vloc_gc = vloc
vloc_h_gc = vloc_h
surf_n = vloc_h - vloc
surf_n.normalize()
v0_gc.normalize()
v1_gc.normalize()
v2_gc.normalize()
v3_gc.normalize()
#intersection with the surface
x0 = intersect(vloc_gc, surf_n, vc_gc, v0_gc)
x1 = intersect(vloc_gc, surf_n, vc_gc, v1_gc)
x2 = intersect(vloc_gc, surf_n, vc_gc, v2_gc)
x3 = intersect(vloc_gc, surf_n, vc_gc, v3_gc)
if chunk.crs:
x0 = chunk.crs.project(x0)
x1 = chunk.crs.project(x1)
x2 = chunk.crs.project(x2)
x3 = chunk.crs.project(x3)
x_0 = min(x0[0], x1[0], x2[0], x3[0])
x_1 = max(x0[0], x1[0], x2[0], x3[0])
y_0 = min(x0[1], x1[1], x2[1], x3[1])
y_1 = max(x0[1], x1[1], x2[1], x3[1])
x_0 -= (x_1 - x_0) / 20.
x_1 += (x_1 - x_0) / 20.
y_0 -= (y_1 - y_0) / 20.
y_1 += (y_1 - y_0) / 20.
reg = (x_0, y_0, x_1, y_1)
photo.enabled = True
PhotoScan.app.update()
p_name = photo.photo.path.rsplit("/", 1)[1].rsplit(".",1)[0]
p_name = "ortho_" + p_name
if chunk.crs:
proj = chunk.crs ##export in chunk coordinate system
else:
proj = PhotoScan.Matrix().diag([1,1,1,1]) #TopXY
d_x = d_y = resolution
#recalculating WGS84 resolution from degrees into meters if required
if chunk.crs:
if not ('PROJCS' in proj.wkt):
crd = photo.reference.location
#longitude
v1 = PhotoScan.Vector((crd[0], crd[1], 0) )
v2 = PhotoScan.Vector((crd[0] + 0.001, crd[1], 0))
vm1 = chunk.crs.unproject(v1)
vm2 = chunk.crs.unproject(v2)
res_x = (vm2 - vm1).norm() * 1000
#latitude
v2 = PhotoScan.Vector( (crd[0], crd[1] + 0.001, 0))
vm2 = chunk.crs.unproject(v2)
res_y = (vm2 - vm1).norm() * 1000
pixel_x = pixel_y = resolution #export resolution (meters/pix)
d_x = pixel_x / res_x
d_y = pixel_y / res_y
if chunk.exportOrthophoto(path + "\\" + p_name + ".tif", format = "tif", blending = blending_mode, color_correction = False, projection = proj, region = reg, dx = d_x, dy = d_y, write_world = True):
processed +=1
photo.enabled = False
self.pBar.setValue(int(processed / len(export_list) * 100))
for i in range (0, len(chunk.cameras)):
photo = chunk.cameras[i]
photo.enabled = True
PhotoScan.app.update()
self.btnP1.setDisabled(False)
self.btnQuit.setDisabled(False)
t1 = time.time()
t1 -= t0
t1 = int(t1)
PhotoScan.app.messageBox("Processing finished.\nProcessed "+ str(processed) +" images to orthophotos.\nProcessing time: "+ str(t1) +" seconds.\nPress OK.") #information message
return 1
def main():
global doc
doc = PhotoScan.app.document
app = QtGui.QApplication.instance()
parent = app.activeWindow()
dlg = ExportOrthoDlg(parent)
PhotoScan.app.addMenuItem("Custom/Export individual orthophotos", main)
|
from django.shortcuts import render, reverse
from .forms import CommentForm
from django.core import serializers
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, JsonResponse
from posts.models import Post
def comment_create_view(request, post_id , *args, **kwargs):
posts = get_object_or_404(Post, id=post_id)
if request.is_ajax and request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.post = posts
instance.user = request.user
instance.save()
ser_instance = serializers.serialize('json', [instance, ])
return JsonResponse({'instance': ser_instance}, status=200)
else:
return JsonResponse({'error': form.errors}, status=400)
return JsonResponse({'error': ''}, status=400)
# posts = get_object_or_404(Post, id=post_id)
# form = CommentForm(request.POST or None)
# next_url = request.POST.get('next') or None
# print(next_url)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.post = posts
# comment.user = request.user
# comment.save()
# if next_url != None:
# return redirect(next_url)
# form = CommentForm()
# return HttpResponseRedirect(reverse('postdetails', args=[post_id]))
# context = {
# 'form': form
# }
# return render(request, 'comments/comment.html', context)
|
class BuiltinFunctionDemo:
"""
内置函数Demo
"""
def __init__(self):
self._name = None
"""
def get_name(self):
return self._name
def set_name(self, value):
self._name = value
def del_name(self):
del self._name
name = property(get_name, set_name, del_name, "姓名")
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@name.deleter
def name(self):
del self._name
test = BuiltinFunctionDemo()
test.name = 'anyu'
print(test.name)
del test.name
print(test.name)
|
# django_file_system_searcher/serializers.py
from rest_framework import serializers
from .models import LightroomImageFileInfo, LightroomCatalog, ImageToFileInfo
class LightroomImageFileInfoSerializer(serializers.ModelSerializer):
class Meta:
model = LightroomImageFileInfo
fields = [
"lightroom_catalog",
"id", "status",
"root_id", "folder_id", "file_id",
"root_name", "root_rel_path_from_catalog",
"folder_path_from_root", "root_absolute_path",
"file_original_name", "file_base_name", "file_extension",
"print_path",
]
class LightroomCatalogSerializer(serializers.ModelSerializer):
class Meta:
model = LightroomCatalog
fields = [
"id", "parent",
"hostname", "database_file_name", "full_database_file_path", "is_backup",
"created",
]
class ImageToFileInfoSerializer(serializers.ModelSerializer):
class Meta:
model = ImageToFileInfo
fields = [
"id", "lightroom_catalog", "lightroom_image_file_info", "file_info",
"certainty", "created",
]
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
def to_tensor(array, dtype=tf.float32):
#return tf.convert_to_tensor(array, dtype=dtype)
if 'tensorflow.python.framework.ops.Tensor' not in str(type(array)):
return tf.convert_to_tensor(array, dtype=dtype)
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = tf.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] + rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return tf.atan2(-rot_mats[:, 2, 0], sy)
|
w = 64
w_ave = 71.9
w_sd = 10.61
cv = w_sd/w_ave
print(cv)
print(1.2)
coefficient1 = w/(w_ave + 2*w_sd)
print(coefficient1)
coefficient2 = w_ave/(w_ave + 2*w_sd)
print(coefficient2)
|
import datetime
import random
class SimpleScheduler(object):
def __init__(self, setting_dict, right_now=None):
self.frequency = setting_dict.get('check_freq', 1) # how often the action occurs (Never, Daily, Weekly, Monthly)
self.specific_time = setting_dict.get('check_time', 0) # whether the action should take place at a specific or random time (boolean)
self.day = setting_dict.get('check_weekday', 0) # the weekday when the action should occur, Monday=0, Sunday=6
self.daynum = setting_dict.get('check_day', 1) # the days from month end that the action should occur [-16, 16]
self.hour = setting_dict.get('check_hour', 3) # the hour the action should occur (integer)
self.minute = setting_dict.get('check_minute', 0) # the minute the action should occur (integer)
self.trigger_time = datetime.datetime.now().replace(year=2224) # the time of the next update check
self.leeway = 15 # the number of minutes past the action time that the action can still take place
if right_now is None:
right_now = datetime.datetime.now()
self.set_trigger(right_now)
def set_trigger(self, right_now):
# use right_nows year and month
if self.frequency == 1:
# the initial trigger time will be this day, and the users specified hour and minute (using defaults if not provided)
# if the user wants a specific time, then use that, otherwise use a random time
self.trigger_time = self.set_trigger_time(right_now)
elif self.frequency == 2:
# the initial trigger time will be this year and month, but the day is the one the user has chosen, as well as the users
# specified hour and minute (using defaults if not provided)
right_now_weekday = right_now.weekday()
delta_days = self.day - right_now_weekday
# mon = 0, tue = 1, wed = 2, thu = 3, fri = 4, sat = 5, sun = 6
# if the user wants a specific time, then use that, otherwise use a random time
self.trigger_time = self.set_trigger_time( right_now + datetime.timedelta(days=delta_days) )
elif self.frequency == 3:
# the initial trigger time will be this year and month, but the day number is the one the user has chosen, as well as the users
# specified hour and minute (using defaults if not provided)
# End of this current month plus or minus the number of days the user has chosen in settings
month = max([1, (right_now.month + 1) % 13])
year = right_now.year if month != 1 else right_now.year + 1
trigger_day = right_now.replace(year=year, month=month, day = 1) + datetime.timedelta(days=self.daynum-1)
# today, with day replaced by 1 and adding 1 to the month plus the number of days the user has chosen
# minus an extra day to rebase it to month-end
# if the user wants a specific time, then use that, otherwise use a random time
self.trigger_time = self.set_trigger_time(trigger_day)
# if the trigger time is before the current time, then step it to the next period
while self.trigger_time < right_now:
self.step_trigger()
def set_trigger_time(self, trigger_time):
''' Applies either the users desired time, or a random one, to the trigger '''
if self.specific_time:
new_trigger = trigger_time.replace(hour=self.hour, minute=self.minute)
else:
new_trigger = trigger_time.replace(hour=random.randint(0,23), minute=random.randint(0,59))
return new_trigger
def step_trigger(self):
''' Progress the trigger time from its current position to its next position '''
if self.frequency == 1:
# jump one say ahead from the current trigger date
self.trigger_time = self.trigger_time + datetime.timedelta(days=1)
elif self.frequency == 2:
# jump 7 days ahead from teh current trigger date
self.trigger_time = self.trigger_time + datetime.timedelta(days=7)
elif self.frequency == 3:
if self.daynum > 0:
# if the daynum is 1 to 16 then just add one month to the existing month and set the day to be the users chosen date
month = (self.trigger_time.month % 12) + 1
year = self.trigger_time.year + 1 if month == 1 else self.trigger_time.year
self.trigger_time = self.trigger_time.replace(year=year, month = month, day = self.daynum)
else:
# if the daynum is negative, that is, the user wants the update to run a certain number of days BEFORE month-end,
# then jump to the first day of the month two months ahead of the current one, and then move back one day to get
# next months month-end date, then subtract the number of days the user has chosen
month = (((self.trigger_time.month % 12) + 1) % 12) + 1
if month < 3:
year = self.trigger_time.year + 1
else:
year = self.trigger_time.year
self.trigger_time = self.trigger_time.replace(year=year, month = month, day = 1) + datetime.timedelta(days=self.daynum-1)
def check_trigger(self):
right_now = datetime.datetime.now()
# check if the current time is between the trigger time and the trigger time plus leeway
if self.trigger_time < right_now < self.trigger_time + datetime.timedelta(minutes=self.leeway):
# time is currently after the trigger time, but within the leeway
self.step_trigger()
return True
else:
return False
def test(settings=None):
if settings is not None:
x = settings
else:
x = {'check_freq':3, 'check_time':0, 'check_weekday':1, 'check_day': 5, 'check_hour':22, 'check_minute':00}
right_now = datetime.datetime.now()
for z in range(3700):
right_now += datetime.timedelta(days=1)
print z
print right_now
s = SimpleScheduler(x, right_now)
print '%s\n' % s.trigger_time
|
import math, random
import pygame as pg
from pygame.sprite import *
from utils import Bullet, media_path
TRANSPARENT = (0, 0, 0, 0)
class Player(Sprite):
def __init__(self, pos, size):
Sprite.__init__(self)
# Create Player Image
self.original_img = self.make_image(size)
self.image = self.original_img.copy()
self.rect = self.image.get_rect(center=pos)
# Player Variables
self.true_pos = list(self.rect.center)
self.angle = 0
self.speed = 200
self.bullets = Group()
self.rapid_fire = 0
# Health
self.max_health = 100
self.health = 100
# Sounds
self.shoot = pg.mixer.Sound(media_path('gunshot.wav'))
self.hit = pg.mixer.Sound(media_path('hit.wav'))
def make_image(self, size):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
pg.draw.rect(img, pg.Color('black'), [rect.center[0] - 5, 25, 10, 50])
pg.draw.ellipse(img, pg.Color('black'), rect.inflate(-10, -10))
pg.draw.ellipse(img, pg.Color('tomato'), rect.inflate(-20, -20))
return img
def update(self, dt):
# Keys and mouse
pos = pg.mouse.get_pos()
keys = pg.key.get_pressed()
# Movement
vec = pg.math.Vector2(pos[0] - self.true_pos[0], pos[1] - self.true_pos[1])
if vec.length() > 5:
# Rotate towards the mouse cursor
self.rotate(pos)
# Move towards the mouse cursor
direction = vec.normalize()
if keys[pg.K_w]:
self.true_pos[0] += direction[0] * self.speed * dt
self.true_pos[1] += direction[1] * self.speed * dt
if keys[pg.K_s]:
self.true_pos[0] -= direction[0] * self.speed * dt
self.true_pos[1] -= direction[1] * self.speed * dt
self.rect.center = self.true_pos
# Keep the player within the screen area
self.clamp()
# Update bullets
self.bullets.update(dt)
# Rapid fire:
if pg.mouse.get_pressed()[2]:
if self.rapid_fire % 10 == 0:
self.shoot_bullet()
self.rapid_fire += 1
def rotate(self, pos):
offset = (pos[1] - self.rect.centery, pos[0] - self.rect.centerx)
self.angle = 90 - math.degrees(math.atan2(*offset))
self.image = pg.transform.rotate(self.original_img, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
def shoot_bullet(self):
self.shoot.play()
pos = pg.mouse.get_pos()
vec = pg.math.Vector2(pos[0] - self.true_pos[0], pos[1] - self.true_pos[1]).normalize()
gun_pos = (self.rect.centerx + (vec.x * 25), self.rect.centery + (vec.y * 25))
self.bullets.add(Bullet(gun_pos, self.angle))
def clamp(self):
screen_rect = pg.display.get_surface().get_rect()
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def check_collision(self, enemies):
for enemy in enemies:
# Check if enemy has bullets
if enemy.bullets:
for bullet in enemy.bullets.sprites():
if self.rect.colliderect(bullet.rect):
# self.hit.play()
bullet.kill()
self.health -= 5
|
#coding=utf-8
# dict = {'Alice': 2341, 'Beth': 9102, 'Cecil': 3258}
# dict = sorted(dict.items(),key=lambda item:item[1],reverse=1)
# for i in range(0,2):
# print(dict[i])
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import TodoList, Item
# Create your views here.
def home(request, id):
todo_list = TodoList.objects.get(id=id)
item_list = todo_list.item_set.all()
context = {
'todo_list': todo_list,
'item_list': item_list,
}
return render(request, 'main/home.html', context)
|
"""
Exercício 3: Elefantes
Este exercício tem duas partes:
Implemente a função incomodam(n) que devolve uma string contendo "incomodam " (a palavra seguida de um espaço) n vezes. Se n não for um inteiro estritamente positivo, a função deve devolver uma string vazia. Essa função deve ser implementada utilizando recursão.
Utilizando a função acima, implemente a função elefantes(n) que devolve uma string contendo a letra da música "Um elefante incomoda muita gente" de 1 até n elefantes. Se n não for maior que 1, a função deve devolver uma string vazia. Essa função também deve ser implementada utilizando recursão.
Observe que, para um elefante, você deve escrever por extenso e no singular ("Um elefante..."); para os demais, utilize números e o plural ("2 elefantes...").
Dica: lembre-se que é possível juntar strings com o operador "+". Lembre-se também que é possível transformar números em strings com a função str().
Dica: Será que neste caso a base da recursão é diferente de n == 1 n==1?
No exemplo de execução abaixo, note que há uma diferença entre como a string é e como ela é interpretada. Na função print o símbolo "\n" é interpretado como quebra de linha
"""
def incomodam(n):
if n < 1:
return ""
elif n == 1:
return "incomodam "
else:
return n * incomodam(n-(n-1))
def elefantes(n):
if n < 1:
return ""
elif n != 1:
return elefantes(n-(1)) + f"{n} elefantes {incomodam(n)}muito mais\n{n} elefantes incomodam muita gente\n"
else:
return "Um elefante incomoda muita gente\n"
|
''' This file contains the script to run the Random Forest Classifier.
'''
import Utils
import numpy
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
print("Loading test data...")
class_names, y, X = Utils.load_training_data()
# Instantiate a random forest classifier
my_random_forest = RandomForestClassifier(n_estimators=100, n_jobs=3)
# To validate over fitting, I am running cross validation on my training data
# cv = number of folds
print("Running cross validations...")
cross_validation_score = cross_validation.cross_val_score(my_random_forest, X, y, cv=5, n_jobs=1)
print("Accuracy of all classes: " + str(numpy.mean(cross_validation_score)))
# Build a forest from training data
print("Building random forest...")
my_random_forest.fit(X,y)
# Now, load test data
print("Loading test data...")
images_for_test, X_test = Utils.load_test_data()
print("Predicting test classes...")
prediction = my_random_forest.predict_proba(X_test)
print("Writing results to a file...")
total_predictions, correct_predictions = Utils.export_results('Results.csv', images_for_test, class_names, prediction)
prediction_accuracy = (correct_predictions/total_predictions)
print('Number of images predicted: {}'.format(total_predictions))
print('Number of correct predictions: {}'.format(correct_predictions))
print('Prediction accuracy: {}'.format(prediction_accuracy))
|
from rest_framework import generics, serializers
from .models import Match
from .serializers import MatchSerializer
class MatchListApi(generics.ListAPIView):
"""
match queryset/list API
"""
model = Match
serializer_class = MatchSerializer
def get_queryset(self):
queryset = self.model.objects.all()
kwargs = {}
for key, vals in self.request.GET.lists():
if key not in [x.name for x in self.model._meta.fields] and key not in ['page_size', 'page', 'ordering', 'teams']:
raise serializers.ValidationError("Invalid query param passed: " + str(key))
for v in vals:
kwargs[key] = v
if 'page_size' in kwargs:
kwargs.pop('page_size')
if 'page' in kwargs:
kwargs.pop('page')
if 'ordering' in kwargs:
kwargs.pop('ordering')
if 'teams' in kwargs:
kwargs['team1__in'] = kwargs['teams'].split(',')
kwargs['team2__in'] = kwargs['teams'].split(',')
kwargs.pop('teams')
print (kwargs)
queryset = queryset.filter(**kwargs)
if self.request.query_params.get('ordering', None) not in [None, ""]:
ordering = self.request.query_params.get('ordering', None)
return queryset.order_by(ordering)
return queryset
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 18:36:43 2020
@author: Barmando
"""
import flask
import base64
import hashlib
import hmac
import json
import sys
import xmltodict
from flask import request
from flask import Response
from Modules.GUI.GUI2 import Ui_MainWindow
from PyQt5 import QtWidgets
from constants import Twitter_, Spotify_, Twitch_, HTML_, Weather_, TEMP_ON, GUI_, Youtube_, Game_, Raspi_
app = flask.Flask("Webhooks listener")
app.config["DEBUG"] = False
# Logger
# import logging
# logger = logging.getLogger("werkzeug")
# logger.setLevel(logging.ERROR)
quotes = ["But the Earth refused to die",
"Come meet me by the river \n See how time it flows",
"A blood black nothingness began to spin ",
"A system of cells interlinked within cells interlinked within cells interlinked within one stem",
"And dreadfully distinct against the dark, a tall white fountain played",
]
@app.route('/twitch/user/<username>', methods=["GET","POST"])
def notifs_event(username):
raw_data = request.get_data()
try:
data = request.json
except Exception as e:
print(e)
data = {}
return Response(status=400)
signed = False
signature = request.headers.get("x-hub-signature", "=").split("=")[-1]
hash = hmac.new(str.encode(twitch_thread.Client_ID), msg=raw_data, digestmod=hashlib.sha256).hexdigest()
if hash == signature:
signed = True
if request.method == 'GET':
return request.args.get("hub.challenge")
elif signed:
twitch_thread.incoming_data(data, username)
return Response(status=200)
else:
print("Sign ature could not be verified")
return Response(status=401)
@app.route('/twitter/webhooks', methods=["GET", "POST"])
def twitter_requests():
if request.method == 'GET':
crc=request.args.get('crc_token')
validation = hmac.new(
key=bytes(twitter_thread.CONSUMER_SECRET, 'utf-8'),
msg=bytes(crc, 'utf-8'),
digestmod=hashlib.sha256
)
digested = base64.b64encode(validation.digest())
response = {
'response_token': 'sha256=' + format(str(digested)[2:-1])
}
return json.dumps(response)
else:
#print(request.get_json())
twitter_thread.tweetAnalyzer(request.get_json())
return Response(status=200)
@app.route('/youtube/user/<username>', methods=["GET", "POST"])
def youtube_webhooks(username):
if request.method == 'GET':
return Response(str(request.args.get("hub.challenge")), status=200, mimetype="text/plain")
else:
try:
print("Data for" + username + "\n")
data = xmltodict.parse(request.data)
# print(data)
yt_thread.incomming_Data(data)
return Response(status=200)
except:
print("Error during xml data shit")
return Response(status=200)
if __name__ == "__main__":
appThread = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow, app)
if Twitch_:
twitch_thread = ui.launchTwitchThread()
if Twitter_:
twitter_thread = ui.launchTwitterThread()
if HTML_:
HTML_thread = ui.launchHTMLThread()
if Spotify_:
spotify_thread = ui.launchSpotifyThread()
if Weather_:
weather_thread = ui.launchWeatherThread()
if TEMP_ON:
temp_thread = ui.launchTemperatureThread()
if Youtube_:
yt_thread = ui.launchYoutubeThread()
if Game_:
game_thread = ui.launchGameTrackerThread()
if Raspi_:
raspi_thread = ui.launchRaspiThread()
if GUI_:
MainWindow.show()
sys.exit(appThread.exec_())
|
from multiprocessing import Pool
import numpy as np
import torch
from torchtext import data
"""
Example (a colleciton of text is one)
"""
class Example(data.Example):
@classmethod
def fromlist(cls, data, fields, step=None, noise_generators=None):
ex = super().fromlist(data, fields)
if noise_generators is not None:
for i, (name, field) in enumerate(fields):
if noise_generators[i] is not None:
setattr(ex, name + '_n', noise_generators[i](getattr(
ex, name)))
if step is not None:
setattr(ex, 'id', step)
return ex
"""
Text Field
"""
class Symbols(data.Field):
def __init__(self,
reverse_tokenize,
additional_tokens=None,
noise_gen=None,
**kwargs):
super().__init__(**kwargs)
self.reverse_tokenizer = reverse_tokenize
self.additional_tokens = additional_tokens if additional_tokens is not None else []
self.name = 'symbols'
self.noise_generator = noise_gen
def set_noise_generator(self, noise_gen=None):
self.noise_generator = noise_gen
def apply_noise(self, batch):
return [self.noise_generator(ex) for ex in batch]
def process(self, batch, device=None):
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor
def extend_padding(self, batch, maxlen):
new_batch = batch.new_zeros(batch.size(0), maxlen).fill_(
self.vocab.stoi[self.pad_token])
new_batch[:, :batch.size(1)] = batch
return new_batch
def reverse(self,
batch,
width=1,
return_saved_time=False,
reverse_token=False):
if not self.batch_first:
batch.t_()
with torch.cuda.device_of(batch):
batch = batch.tolist()
batch = [[self.vocab.itos[ind] for ind in ex]
for ex in batch] # denumericalize
def trim(s, t):
sentence = []
for w in s:
if w == t:
break
sentence.append(w)
return sentence
batch = [trim(ex, self.eos_token)
for ex in batch] # trim past frst eos
def filter_special(tok):
return tok not in (self.init_token, self.pad_token)
batch_filtered = [list(filter(filter_special, ex)) for ex in batch]
if not reverse_token:
return batch_filtered
output = [self.reverse_tokenizer(ex) for ex in batch_filtered]
return output
"""
COCO image features field (only useful for image caption experiments)
"""
class Features(data.Field):
def __init__(self, map_size=7, feature_size=512, workers=8, **kwargs):
super().__init__(**kwargs)
self.map_size = map_size
self.feature_size = feature_size
self.name = 'features'
self.reverse_tokenizer = lambda x: x[0]
self.tokenizer = lambda x: [x]
self.data_dir = None
self.noise_generator = None
def set_datapath(self, data_dir):
self.data_dir = data_dir
def process(self, batch, device=None):
if self.data_dir is None:
raise FileNotFoundError('Must set an image path first')
with Pool(8) as pool:
arr = np.array(
pool.map_async(np.load,
[self.data_dir + '/' + x[0]
for x in batch]).get())
tensor = torch.from_numpy(arr).to(device).view(
-1, self.feature_size,
self.map_size * self.map_size).transpose(1, 2).contiguous()
return tensor
def reverse(self, batch):
return ['image features' for _ in range(batch.size(0))]
|
from django.conf.urls.defaults import patterns
urlpatterns = patterns('bluenotepad.public.views',
(r'^dataset/(?P<filename>.+)$', 'dataset'),
)
|
import math
from MyQueue import Queue
class PriorityQueue(Queue):
def __init__(self, _sortFunction = None, _ascending = True, _initialElements = []):
super().__init__()
self.ascending = _ascending
setattr(PriorityQueue, "sortFunction", _sortFunction)
if(_sortFunction is None):
self.sortFunction = self.defaultsortFunction
for el in _initialElements:
self.enqueue(el)
def defaultsortFunction(self, item1, item2):
# item1 must be at a lower level in tree than item2
if(self.ascending):
return item1 < item2
else:
return item1 > item2
def topDownHeapify(self, i):
focus = i
left = 2 * i + 1
right = 2 * i + 2
arr = self._queue
if(left < self.size and self.sortFunction(arr[left], arr[focus])):
focus = left
if(right < self.size and self.sortFunction(arr[right], arr[focus])):
focus = right
if(focus != i):
arr[i], arr[focus] = arr[focus], arr[i]
self.topDownHeapify(focus)
def bottomUpHeapify(self):
i = self.size - 1
arr = self._queue
while(i > 0):
parent_idx = math.floor((i-1)/2)
if(self.sortFunction(arr[i],arr[parent_idx])):
arr[i], arr[parent_idx] = arr[parent_idx], arr[i]
i = parent_idx
self._queue = arr
def enqueue(self, item):
super().enqueue(item)
self.bottomUpHeapify()
def dequeue(self):
# Replace the top element with last element
self._queue[0] = self._queue[self.size-1]
self.topDownHeapify(0)
return self._queue.pop()
def __str__(self):
q = [str(x) for x in self._queue]
return '[' + ','.join(q) + ']'
def MysortFunction(pq, item1, item2):
# item1 must be at a lower level in tree than item2
if(pq.ascending):
return item1['roll'] < item2['roll']
else:
return item1['roll'] > item2['roll']
pq = PriorityQueue(_ascending=False, _initialElements=[9,12,13,22,29,56,85,90])
print(pq)
pq.dequeue()
pq.dequeue()
pq.dequeue()
pq.dequeue()
pq.dequeue()
pq.dequeue()
pq.dequeue()
pq.dequeue()
print(pq.size)
# pq.enqueue({"roll":9, "name": 'U'})
# pq.enqueue({"roll":12, "name": 'Y'})
# pq.enqueue({"roll":13, "name": 'E'})
# pq.enqueue({"roll":21, "name": 'V'})
# pq.enqueue({"roll":22, "name": '0'})
# pq.enqueue({"roll":45, "name": '0'})
# pq.enqueue({"roll":86, "name": 'L'})
# pq.enqueue({"roll":90, "name": 'I'})
print(pq)
|
from dronekit import connect, VehicleMode, LocationGlobalRelative, APIException, Command
import time
import socket
import exceptions
import math
import argparse #To import some values from command line and use it on our python script
from pymavlink import mavutil
#####################functions####
def connectMyCopter():
parser=argparse.ArgumentParser(description='commands')
parser.add_argument('--connect') ##Sec
args = parser.parse_args()
connection_string=args.connect
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
vehicle=connect(connection_string,wait_ready=True) #To connect the vehicle using ip address and wait ready means it will pass command only when the connection is set up.
return vehicle
def arm_and_takeoff(targetHeight):
while vehicle.is_armable != True:
print ("Waiting for vehicle to become armable")
time.sleep(1)
print("Vehicle is now armable.")
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode != "GUIDED":
print("wainting for drone to enter GUIDED mode ")
time.sleep(1)
print("Vehicle now in GUIDED mode.")
vehicle.armed = True
while vehicle.armed == False:
print ("Waiting for vehicle to become armed")
time.sleep(1)
print("Look out! props are spinning!!")
vehicle.simple_takeoff(targetHeight) ##meters
while True:
print("Current altitude: %d"%vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >= .95*targetHeight:
break
time.sleep(1)
print("Target altitude reached !!")
return None
##send a velocity command with +x being the heading of the drone
def send_local_ned_velocity(vx, vy, vz):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0,
0,0,
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED,
0b0000111111000111, #BITMASK ->Consider only the velocities
0,0,0, #-- POSITION
vx, vy, vz, #--VELOCITY
0,0,0, ##-- ACCELERATIONS
0,0
)
vehicle.sendmavlink(msg)
vehicle.flush()
##send a velocity command with +x being TRUE North of the earth
def send_global_ned_velocity(vx, vy, vz):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, #time_boot_ms(not used )
0,0, #target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, ##is LOCAL to the earth
0b0000111111000111, #BITMASK ->Consider only the velocities
0,0,0, #-- POSITION
vx, vy, vz, #--VELOCITY in m/s
0,0,0, ##-- ACCELERATIONS (not supported yet, ignored in GCS_Mavlink ???)
0,0 ## yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink ???)
)
vehicle.sendmavlink(msg)
vehicle.flush()
vehicle =connectMyCopter()
vehicle.wait_ready('autopilot_version')
arm_and_takeoff(10)
time.sleep(2)
counter=0
while counter < 5:
send_local_ned_velocity(5, 0, 0)
time.sleep(1)
print("Moving North relative to front of drone")
counter = counter + 1
time.sleep(2)
counter=0
while counter < 5:
send_local_ned_velocity(0, -5, 0)
time.sleep(1)
print("Moving West relative to front of drone")
counter = counter + 1
counter=0
while counter < 5:
send_global_ned_velocity(5, 0, 0)
time.sleep(1)
print("Moving TRUE North relative to front of drone")
counter = counter + 1
time.sleep(2)
counter=0
while counter < 5:
send_global_ned_velocity(0, -5, 0)
time.sleep(1)
print("Moving TRUE West relative to front of drone")
counter = counter + 1
##UP and DOWN ####
counter=0
while counter < 5:
send_local_ned_velocity(0, 0, -5)
time.sleep(1)
print("Moving UP")
counter = counter + 1
time.sleep(2)
counter=0
while counter < 5:
send_global_ned_velocity(0, 0, 5)
time.sleep(1)
print("Moving DOWN")
counter = counter + 1
while True:
time.sleep(1)
|
import sys
from .cmd_main import run_vvc_command
sys.exit(run_vvc_command(sys.argv[1:]))
|
from pico2d import *
import random
import game_framework
import game_world
from game_object import GameObject
class Missile(GameObject):
# image = None
RUN_SPEED_PPS = 200
def __init__(self, x, y, dx, dy, size):
super(Missile, self).__init__()
self.x, self.y = x, y
self.dx, self.dy = dx, dy
self.size = size
self.w = 2 * size
self.h = 2 * size
self.fps = 8 + random.randint(0, 20)
self.frame = random.randint(0, 23)
self.init_image(Missile, 'fireball.png', 24)
# def draw(self):
# self.image.clip_draw(128 * self.frame, 0, 128, 128, self.x, self.y, 2 * self.size, 2 * self.size)
def update(self):
super(Missile,self).update_frame()
if game_world.isPaused():
return
self.x += Missile.RUN_SPEED_PPS * game_framework.frame_time * self.dx
self.y += Missile.RUN_SPEED_PPS * game_framework.frame_time * self.dy
if self.x < -self.size or \
self.y < -self.size or \
self.x > get_canvas_width() + self.size or \
self.y > get_canvas_height() + self.size:
game_world.remove_object(self)
def isInField(self, width, height):
if (self.x < 0): return False
if (self.y < 0): return False
if (self.x > width): return False
if (self.y > height): return False
return True
|
n = int(input("Enter size of pyramid: "))
for i in range(n,0,-1):
for j in range(i):
print("*",end=' ')
print('\r')
|
import os
import sys
from pathlib import Path
import shlex
import sc2
portconfig = sc2.portconfig.Portconfig()
gameid = os.environ["sc2_match_id"]
# Ensure SC2 gid and write permission
os.chown("/replays", -1, 1500)
os.chmod("/replays", 0o775)
commands = [
[
"cd" # home directory
], [
"cd", "repo"
], [
"python3", "start_bot.py",
os.environ["sc2_map_name"],
os.environ["sc2_races"],
portconfig.as_json,
]
]
if "sc2_step_time_limit" in os.environ:
commands[-1] += ["--step-time-limit", os.environ["sc2_step_time_limit"]]
if "sc2_game_time_limit" in os.environ:
commands[-1] += ["--game-time-limit", os.environ["sc2_game_time_limit"]]
if os.fork() == 0:
commands[-1] += ["--master"]
commands[-1] += ["--log-path", f"/replays/{gameid}_0.log"]
commands[-1] += ["--replay-path", f"/replays/{gameid}_0.SC2Replay"]
os.execlp("runuser", "-l", "user0", "-c", " && ".join(" ".join(shlex.quote(c) for c in cmd) for cmd in commands))
else:
# HACK: Delay the joining client so the host has time to start up
import time; time.sleep(5)
commands[-1] += ["--log-path", f"/replays/{gameid}_1.log"]
commands[-1] += ["--replay-path", f"/replays/{gameid}_1.SC2Replay"]
os.execlp("runuser", "-l", "user1", "-c", " && ".join(" ".join(shlex.quote(c) for c in cmd) for cmd in commands))
|
import uuid
import base64
from cookies import Cookies
from google.appengine.api import memcache
def authenticate(handler):
return check_cookie(handler)
def process_auth(handler, userId):
return check_cookie(handler, userId)
def check_cookie(handler, userId=None):
cookies = Cookies(handler)
if cookies.__contains__('sessionid') and memcache.get(cookies['sessionid']):
sessionId=cookies['sessionid']
userId=str(base64.b64decode(memcache.get(sessionId))).strip()
memcache.set(sessionId, memcache.get(sessionId))
return userId
elif userId:
uniqueId=str(uuid.uuid1())
cookies['sessionid'] = uniqueId
memcache.add(uniqueId, str(base64.b64encode(userId)))
return userId
return None
|
from django.conf.urls import url
from django.urls import include, path
from . import views
app_name='notes_app'
urlpatterns = [
url(r'^$',views.all_notes , name='all_notes'),
path("analysis", views.algorithm_analysis, name='algorithm_analysis'),
path("render_pdf", views.render_pdf, name='render_pdf' )
]
|
#concatnate all the features together.
#'note_pairs_wnsimilarity.csv' # wordnet::similarity package
features_path='/Users/gary/Documents/2020Fall/IntroNLP/project/'
feature_files=['OntoNotes_SensesPairs.csv','note_pairs_wnsimilarity.csv','OntoNotes_SensesPairs_WNFeatures.csv',
'WN21mapWn16_topic_similarity.csv','WN21mapWn16_WN_Domain_feature.csv','WN_OED_Map_Feature_OntoNotes_SensesPairs.csv']
df_sensepari=pd.read_csv(features_path+feature_files[0])
df_sensepari= df_sensepari[['Pos', 'Sense1', 'Sense2', 'Merge']]
df_sensepari=df_sensepari.drop_duplicates()
print('df_sensepari',len(df_sensepari))
df_wnpackage=pd.read_csv(features_path+feature_files[1])
print('df_wnpackage',len(df_wnpackage))
df_features_tmp = df_sensepari.merge(df_wnpackage,left_on=['Sense1', 'Sense2','Merge'],
right_on=['sense1', 'sense2','merge'], how='left')
df_features_tmp=df_features_tmp[['Pos', 'Sense1', 'Sense2', 'Merge',
'lch', 'hso', 'jcn', 'leskvalue', 'linvalue', 'resvalue', 'vecvalue',
'wupvalue']]
df_features_tmp=df_features_tmp.drop_duplicates()
print('df_features_tmp',len(df_features_tmp))
df_wncorpus=pd.read_csv(features_path+feature_files[2])
print('df_wncorpus',len(df_wncorpus))
df_features_tmp1 = df_features_tmp.merge(df_wncorpus,left_on=['Pos', 'Sense1', 'Sense2', 'Merge'],
right_on=['Pos', 'Sense1', 'Sense2', 'Merge'], how='left')
df_features_tmp1.columns
print('df_features_tmp1',len(df_features_tmp1))
df_features_tmp1=df_features_tmp1[['Pos', 'Sense1', 'Sense2', 'Merge', 'lch', 'hso', 'jcn', 'leskvalue',
'linvalue', 'resvalue', 'vecvalue', 'wupvalue',
'pertainyms', 'antonyms', 'deriv', 'lemmas',
'verbgroup', 'verbframe', 'hyper_min', 'hyper_max']]
df_features_tmp1=df_features_tmp1.drop_duplicates()
#topic feature
df_topfea=pd.read_csv(features_path+feature_files[3])
#replace '.' with '#'
df_topfea['sense1_wn21']=df_topfea['sense1_wn21'].str.replace('.','#')
df_topfea['sense2_wn21']=df_topfea['sense2_wn21'].str.replace('.','#')
df_topfea
print('df_topfea',len(df_topfea))
df_features_tmp2 = df_features_tmp1.merge(df_topfea[['pos','sense1_wn21','sense2_wn21','topic_similarity']],left_on=['Pos', 'Sense1', 'Sense2'],
right_on=['pos', 'sense1_wn21','sense2_wn21'], how='left')
print('df_features_tmp2',len(df_features_tmp2))
df_features_tmp2=df_features_tmp2[['Pos', 'Sense1', 'Sense2', 'Merge', 'lch', 'hso', 'jcn', 'leskvalue',
'linvalue', 'resvalue', 'vecvalue', 'wupvalue', 'pertainyms',
'antonyms', 'deriv', 'lemmas', 'verbgroup', 'verbframe', 'hyper_min',
'hyper_max', 'topic_similarity']]
df_features_tmp2=df_features_tmp2.drop_duplicates()
#domain feature
df_domainfea=pd.read_csv(features_path+feature_files[4])
df_domainfea['sense1_wn21']=df_domainfea['sense1_wn21'].str.replace('.','#')
df_domainfea['sense2_wn21']=df_domainfea['sense2_wn21'].str.replace('.','#')
print('df_domainfea',len(df_domainfea))
df_features_tmp3 = df_features_tmp2.merge(df_domainfea[['pos','sense1_wn21','sense2_wn21','wn_domain_feature']],left_on=['Pos', 'Sense1', 'Sense2'],
right_on=['pos', 'sense1_wn21','sense2_wn21'], how='left')
df_features_tmp3.columns
print('df_features_tmp3',len(df_features_tmp3))
df_features_tmp3=df_features_tmp3[['Pos', 'Sense1', 'Sense2', 'Merge', 'lch', 'hso', 'jcn', 'leskvalue',
'linvalue', 'resvalue', 'vecvalue', 'wupvalue', 'pertainyms',
'antonyms', 'deriv', 'lemmas', 'verbgroup', 'verbframe', 'hyper_min',
'hyper_max', 'topic_similarity', 'wn_domain_feature']]
df_features_tmp3=df_features_tmp3.drop_duplicates()
#wn-oed mapping feature
df_wnoedfea=pd.read_csv(features_path+feature_files[5])
df_wnoedfea
print('df_wnoedfea',len(df_wnoedfea))
df_features = df_features_tmp3.merge(df_wnoedfea[['Pos', 'Sense1', 'Sense2', 'Merge','wn_oed_feature']],left_on=['Pos', 'Sense1', 'Sense2','Merge'],
right_on=['Pos', 'Sense1', 'Sense2', 'Merge'], how='left')
df_features=df_features[['Pos', 'Sense1', 'Sense2', 'Merge', 'lch', 'hso', 'jcn', 'leskvalue',
'linvalue', 'resvalue', 'vecvalue', 'wupvalue', 'pertainyms',
'antonyms', 'deriv', 'lemmas', 'verbgroup', 'verbframe', 'hyper_min',
'hyper_max', 'topic_similarity', 'wn_domain_feature', 'wn_oed_feature']]
df_features=df_features.drop_duplicates()
df_features
print('df_features',len(df_features))
#since NaN, there are still duplicated rows in df_features. drop duplicates after replacing
#Nan with other values
#update verb group using results from perl
#use perl to get the following features: antonyms, deriv, pertainyms, verbgroups
file_path='/Users/gary/Documents/2020Fall/IntroNLP/project/note_pairs_wncourpus_similarity.csv'
df_perl=pd.read_csv(file_path)
df_perl_vg=df_perl[df_perl[' vgrp1']==df_perl[' vgrp2']]
for i in range(len(df_perl_vg)):
sense1=df_perl_vg.loc[df_perl_vg.index[i],'sense1']
sense2=df_perl_vg.loc[df_perl_vg.index[i],' sense2']
df_features.loc[((df_features['Sense1']==sense1)&(df_features['Sense2']==sense2)),'verbgroup']=1
df_features.to_csv('feature_space.csv',index=False)
|
from _beatbox import _tPartnerNS, _tSObjectNS
from _beatbox import Client as BaseClient
from marshall import marshall
from types import TupleType, ListType
from xmltramp import Namespace
import copy
import re
_tSchemaInstanceNS = Namespace('http://www.w3.org/2001/XMLSchema-instance')
_tSchemaNS = Namespace('http://www.w3.org/2001/XMLSchema')
DEFAULT_FIELD_TYPE = "string"
querytyperegx = re.compile('(?:from|FROM) (\S+)')
class QueryRecord(dict):
def __getattr__(self, n):
try:
return self[n]
except KeyError:
return dict.__getattr__(self, n)
def __setattr__(self, n, v):
self[n] = v
class QueryRecordSet(list):
def __init__(self, records, done, size, **kw):
for r in records:
self.append(r)
self.done = done
self.size = size
for k, v in kw.items():
setattr(self, k, v)
@property
def records(self):
return self
def __getitem__(self, n):
if type(n) == type(''):
try:
return getattr(self, n)
except AttributeError, n:
raise KeyError
else:
return list.__getitem__(self, n)
class SObject(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def marshall(self, fieldname, xml):
if self.fields.has_key(fieldname):
field = self.fields[fieldname]
else:
return marshall(DEFAULT_FIELD_TYPE, fieldname, xml)
return field.marshall(xml)
class Client(BaseClient):
cacheTypeDescriptions = False
def __init__(self, serverUrl=None, cacheTypeDescriptions=False):
BaseClient.__init__(self, serverUrl=serverUrl)
self.cacheTypeDescriptions = cacheTypeDescriptions
self.typeDescs = {}
def login(self, username, passwd):
res = BaseClient.login(self, username, passwd)
data = dict()
data['passwordExpired'] = _bool(res[_tPartnerNS.passwordExpired])
data['serverUrl'] = str(res[_tPartnerNS.serverUrl])
data['sessionId'] = str(res[_tPartnerNS.sessionId])
data['userId'] = str(res[_tPartnerNS.userId])
data['userInfo'] = _extractUserInfo(res[_tPartnerNS.userInfo])
return data
def isConnected(self):
""" First pass at a method to check if we're connected or not """
if self.__conn and self.__conn._HTTPConnection__state == 'Idle':
return True
return False
def describeGlobal(self):
res = BaseClient.describeGlobal(self)
data = dict()
data['encoding'] = str(res[_tPartnerNS.encoding])
data['maxBatchSize'] = int(str(res[_tPartnerNS.maxBatchSize]))
sobjects = list()
for r in res[_tPartnerNS.sobjects:]:
d = dict()
d['activateable'] = _bool(r[_tPartnerNS.activateable])
d['createable'] = _bool(r[_tPartnerNS.createable])
d['custom'] = _bool(r[_tPartnerNS.custom])
try:
d['customSetting'] = _bool(r[_tPartnerNS.customSetting])
except KeyError:
pass
d['deletable'] = _bool(r[_tPartnerNS.deletable])
d['deprecatedAndHidden'] = _bool(r[_tPartnerNS.deprecatedAndHidden])
try:
d['feedEnabled'] = _bool(r[_tPartnerNS.feedEnabled])
except KeyError:
pass
d['keyPrefix'] = str(r[_tPartnerNS.keyPrefix])
d['label'] = str(r[_tPartnerNS.label])
d['labelPlural'] = str(r[_tPartnerNS.labelPlural])
d['layoutable'] = _bool(r[_tPartnerNS.layoutable])
d['mergeable'] = _bool(r[_tPartnerNS.mergeable])
d['name'] = str(r[_tPartnerNS.name])
d['queryable'] = _bool(r[_tPartnerNS.queryable])
d['replicateable'] = _bool(r[_tPartnerNS.replicateable])
d['retrieveable'] = _bool(r[_tPartnerNS.retrieveable])
d['searchable'] = _bool(r[_tPartnerNS.searchable])
d['triggerable'] = _bool(r[_tPartnerNS.triggerable])
d['undeletable'] = _bool(r[_tPartnerNS.undeletable])
d['updateable'] = _bool(r[_tPartnerNS.updateable])
sobjects.append(SObject(**d))
data['sobjects'] = sobjects
data['types'] = [str(t) for t in res[_tPartnerNS.types:]]
if not data['types']:
# BBB for code written against API < 17.0
data['types'] = [s.name for s in data['sobjects']]
return data
def describeSObjects(self, sObjectTypes):
res = BaseClient.describeSObjects(self, sObjectTypes)
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict()
d['activateable'] = _bool(r[_tPartnerNS.activateable])
rawreldata = r[_tPartnerNS.ChildRelationships:]
relinfo = [_extractChildRelInfo(cr) for cr in rawreldata]
d['ChildRelationships'] = relinfo
d['createable'] = _bool(r[_tPartnerNS.createable])
d['custom'] = _bool(r[_tPartnerNS.custom])
try:
d['customSetting'] = _bool(r[_tPartnerNS.customSetting])
except KeyError:
pass
d['deletable'] = _bool(r[_tPartnerNS.deletable])
d['deprecatedAndHidden'] = _bool(r[_tPartnerNS.deprecatedAndHidden])
try:
d['feedEnabled'] = _bool(r[_tPartnerNS.feedEnabled])
except KeyError:
pass
fields = r[_tPartnerNS.fields:]
fields = [_extractFieldInfo(f) for f in fields]
field_map = dict()
for f in fields:
field_map[f.name] = f
d['fields'] = field_map
d['keyPrefix'] = str(r[_tPartnerNS.keyPrefix])
d['label'] = str(r[_tPartnerNS.label])
d['labelPlural'] = str(r[_tPartnerNS.labelPlural])
d['layoutable'] = _bool(r[_tPartnerNS.layoutable])
d['mergeable'] = _bool(r[_tPartnerNS.mergeable])
d['name'] = str(r[_tPartnerNS.name])
d['queryable'] = _bool(r[_tPartnerNS.queryable])
d['recordTypeInfos'] = [_extractRecordTypeInfo(rti) for rti in r[_tPartnerNS.recordTypeInfos:]]
d['replicateable'] = _bool(r[_tPartnerNS.replicateable])
d['retrieveable'] = _bool(r[_tPartnerNS.retrieveable])
d['searchable'] = _bool(r[_tPartnerNS.searchable])
try:
d['triggerable'] = _bool(r[_tPartnerNS.triggerable])
except KeyError:
pass
d['undeletable'] = _bool(r[_tPartnerNS.undeletable])
d['updateable'] = _bool(r[_tPartnerNS.updateable])
d['urlDetail'] = str(r[_tPartnerNS.urlDetail])
d['urlEdit'] = str(r[_tPartnerNS.urlEdit])
d['urlNew'] = str(r[_tPartnerNS.urlNew])
data.append(SObject(**d))
return data
def create(self, sObjects):
preparedObjects = _prepareSObjects(sObjects)
res = BaseClient.create(self, preparedObjects)
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict()
data.append(d)
d['id'] = str(r[_tPartnerNS.id])
d['success'] = success = _bool(r[_tPartnerNS.success])
if not success:
d['errors'] = [_extractError(e)
for e in r[_tPartnerNS.errors:]]
else:
d['errors'] = list()
return data
def retrieve(self, fields, sObjectType, ids):
resultSet = BaseClient.retrieve(self, fields, sObjectType, ids)
type_data = self.describeSObjects(sObjectType)[0]
if type(resultSet) not in (TupleType, ListType):
if isnil(resultSet):
resultSet = list()
else:
resultSet = [resultSet]
fields = [f.strip() for f in fields.split(',')]
data = list()
for result in resultSet:
d = dict()
data.append(d)
for fname in fields:
d[fname] = type_data.marshall(fname, result)
return data
def update(self, sObjects):
preparedObjects = _prepareSObjects(sObjects)
res = BaseClient.update(self, preparedObjects)
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict()
data.append(d)
d['id'] = str(r[_tPartnerNS.id])
d['success'] = success = _bool(r[_tPartnerNS.success])
if not success:
d['errors'] = [_extractError(e)
for e in r[_tPartnerNS.errors:]]
else:
d['errors'] = list()
return data
def queryTypesDescriptions(self, types):
"""
"""
types = list(types)
if types:
types_descs = self.describeSObjects(types)
else:
types_descs = []
return dict(map(lambda t, d: (t, d), types, types_descs))
def _extractRecord(self, r):
record = QueryRecord()
if r:
type_data = self.typeDescs[str(r[_tSObjectNS.type])]
for field in r:
fname = str(field._name[1])
if isObject(field):
record[fname] = self._extractRecord(r[field._name:][0])
elif isQueryResult(field):
record[fname] = QueryRecordSet(
records=[self._extractRecord(rec) for rec in field[_tPartnerNS.records:]],
done=field[_tPartnerNS.done],
size=int(str(field[_tPartnerNS.size]))
)
else:
record[fname] = type_data.marshall(fname, r)
return record
def flushTypeDescriptionsCache(self):
self.typeDescs = {}
def query(self, *args, **kw):
if len(args) == 1: # full query string
queryString = args[0]
elif len(args) == 2: # BBB: fields, sObjectType
queryString = 'select %s from %s' % (args[0], args[1])
if 'conditionalExpression' in kw: # BBB: fields, sObjectType, conditionExpression as kwarg
queryString += ' where %s' % (kw['conditionalExpression'])
elif len(args) == 3: # BBB: fields, sObjectType, conditionExpression as positional arg
whereClause = args[2] and (' where %s' % args[2]) or ''
queryString = 'select %s from %s%s' % (args[0], args[1], whereClause)
else:
raise RuntimeError("Wrong number of arguments to query method.")
res = BaseClient.query(self, queryString)
# calculate the union of the sets of record types from each record
types = reduce(lambda a, b: a|b, [getRecordTypes(r) for r in res[_tPartnerNS.records:]], set())
if not self.cacheTypeDescriptions:
self.flushTypeDescriptionsCache()
new_types = types - set(self.typeDescs.keys())
if new_types:
self.typeDescs.update(self.queryTypesDescriptions(new_types))
data = QueryRecordSet(
records=[self._extractRecord(r) for r in res[_tPartnerNS.records:]],
done=_bool(res[_tPartnerNS.done]),
size=int(str(res[_tPartnerNS.size])),
queryLocator=str(res[_tPartnerNS.queryLocator]))
return data
def queryMore(self, queryLocator):
locator = queryLocator
res = BaseClient.queryMore(self, locator)
# calculate the union of the sets of record types from each record
types = reduce(lambda a, b: a | b, [getRecordTypes(r) for r in res[_tPartnerNS.records:]], set())
new_types = types - set(self.typeDescs.keys())
if new_types:
self.typeDescs.update(self.queryTypesDescriptions(new_types))
data = QueryRecordSet(
records=[self._extractRecord(r) for r in res[_tPartnerNS.records:]],
done=_bool(res[_tPartnerNS.done]),
size=int(str(res[_tPartnerNS.size])),
queryLocator=str(res[_tPartnerNS.queryLocator]))
return data
def search(self, sosl):
res = BaseClient.search(self, sosl)
if not self.cacheTypeDescriptions:
self.flushTypeDescriptionsCache()
try:
res = res[_tPartnerNS.searchRecords]
except KeyError:
return []
# calculate the union of the sets of record types from each record
if len(res):
types = reduce(lambda a, b: a | b, [getRecordTypes(r) for r in res], set())
new_types = types - set(self.typeDescs.keys())
if new_types:
self.typeDescs.update(self.queryTypesDescriptions(new_types))
return [self._extractRecord(r) for r in res]
else:
return []
def delete(self, ids):
res = BaseClient.delete(self, ids)
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict()
data.append(d)
d['id'] = str(r[_tPartnerNS.id])
d['success'] = success = _bool(r[_tPartnerNS.success])
if not success:
d['errors'] = [_extractError(e)
for e in r[_tPartnerNS.errors:]]
else:
d['errors'] = list()
return data
def upsert(self, externalIdName, sObjects):
preparedObjects = _prepareSObjects(sObjects)
res = BaseClient.upsert(self, externalIdName, preparedObjects)
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict()
data.append(d)
d['id'] = str(r[_tPartnerNS.id])
d['success'] = success = _bool(r[_tPartnerNS.success])
if not success:
d['errors'] = [_extractError(e)
for e in r[_tPartnerNS.errors:]]
else:
d['errors'] = list()
d['isCreated'] = d['created'] = _bool(r[_tPartnerNS.created])
return data
def getDeleted(self, sObjectType, start, end):
res = BaseClient.getDeleted(self, sObjectType, start, end)
res = res[_tPartnerNS.deletedRecords:]
if type(res) not in (TupleType, ListType):
res = [res]
data = list()
for r in res:
d = dict(
id=str(r[_tPartnerNS.id]),
deletedDate=marshall(
'datetime', 'deletedDate', r, ns=_tPartnerNS)
)
data.append(d)
return data
def getUpdated(self, sObjectType, start, end):
res = BaseClient.getUpdated(self, sObjectType, start, end)
res = res[_tPartnerNS.ids:]
if type(res) not in (TupleType, ListType):
res = [res]
return [str(r) for r in res]
def getUserInfo(self):
res = BaseClient.getUserInfo(self)
data = _extractUserInfo(res)
return data
def describeTabs(self):
res = BaseClient.describeTabs(self)
data = list()
for r in res:
tabs = [_extractTab(t) for t in r[_tPartnerNS.tabs:]]
d = dict(
label=str(r[_tPartnerNS.label]),
logoUrl=str(r[_tPartnerNS.logoUrl]),
selected=_bool(r[_tPartnerNS.selected]),
tabs=tabs)
data.append(d)
return data
def describeLayout(self, sObjectType):
raise NotImplementedError
class Field(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def marshall(self, xml):
return marshall(self.type, self.name, xml)
# sObjects can be 1 or a list. If values are python lists or tuples, we
# convert these to strings:
# ['one','two','three'] becomes 'one;two;three'
def _prepareSObjects(sObjects):
def _doPrep(field_dict):
"""Do some prep work converting python types into formats that
Salesforce will accept. This includes converting lists of strings
to "apple;orange;pear" format as well as Null-ing any empty lists
or None values.
"""
fieldsToNull = []
for k, v in field_dict.items():
if v is None:
fieldsToNull.append(k)
field_dict[k] = []
if hasattr(v, '__iter__'):
if len(v) == 0:
fieldsToNull.append(k)
else:
field_dict[k] = ";".join(v)
if 'fieldsToNull' in field_dict:
raise ValueError("fieldsToNull should be populated by the client, not the caller.")
field_dict['fieldsToNull'] = fieldsToNull
sObjectsCopy = copy.deepcopy(sObjects)
if isinstance(sObjectsCopy, dict):
_doPrep(sObjectsCopy)
else:
for listitems in sObjectsCopy:
_doPrep(listitems)
return sObjectsCopy
def _bool(val):
return str(val) == 'true'
def _extractFieldInfo(fdata):
data = dict()
data['autoNumber'] = _bool(fdata[_tPartnerNS.autoNumber])
data['byteLength'] = int(str(fdata[_tPartnerNS.byteLength]))
data['calculated'] = _bool(fdata[_tPartnerNS.calculated])
data['createable'] = _bool(fdata[_tPartnerNS.createable])
data['nillable'] = _bool(fdata[_tPartnerNS.nillable])
data['custom'] = _bool(fdata[_tPartnerNS.custom])
data['defaultedOnCreate'] = _bool(fdata[_tPartnerNS.defaultedOnCreate])
data['digits'] = int(str(fdata[_tPartnerNS.digits]))
data['filterable'] = _bool(fdata[_tPartnerNS.filterable])
try:
data['htmlFormatted'] = _bool(fdata[_tPartnerNS.htmlFormatted])
except KeyError:
data['htmlFormatted'] = False
data['label'] = str(fdata[_tPartnerNS.label])
data['length'] = int(str(fdata[_tPartnerNS.length]))
data['name'] = str(fdata[_tPartnerNS.name])
data['nameField'] = _bool(fdata[_tPartnerNS.nameField])
plValues = fdata[_tPartnerNS.picklistValues:]
data['picklistValues'] = [_extractPicklistEntry(p) for p in plValues]
data['precision'] = int(str(fdata[_tPartnerNS.precision]))
data['referenceTo'] = [str(r) for r in fdata[_tPartnerNS.referenceTo:]]
data['restrictedPicklist'] = _bool(fdata[_tPartnerNS.restrictedPicklist])
data['scale'] = int(str(fdata[_tPartnerNS.scale]))
data['soapType'] = str(fdata[_tPartnerNS.soapType])
data['type'] = str(fdata[_tPartnerNS.type])
data['updateable'] = _bool(fdata[_tPartnerNS.updateable])
try:
data['dependentPicklist'] = _bool(fdata[_tPartnerNS.dependentPicklist])
data['controllerName'] = str(fdata[_tPartnerNS.controllerName])
except KeyError:
data['dependentPicklist'] = False
data['controllerName'] = ''
return Field(**data)
def _extractPicklistEntry(pldata):
data = dict()
data['active'] = _bool(pldata[_tPartnerNS.active])
data['validFor'] = [str(v) for v in pldata[_tPartnerNS.validFor:]]
data['defaultValue'] = _bool(pldata[_tPartnerNS.defaultValue])
data['label'] = str(pldata[_tPartnerNS.label])
data['value'] = str(pldata[_tPartnerNS.value])
return data
def _extractChildRelInfo(crdata):
data = dict()
data['cascadeDelete'] = _bool(crdata[_tPartnerNS.cascadeDelete])
data['childSObject'] = str(crdata[_tPartnerNS.childSObject])
data['field'] = str(crdata[_tPartnerNS.field])
return data
def _extractRecordTypeInfo(rtidata):
data = dict()
data['available'] = _bool(rtidata[_tPartnerNS.available])
data['defaultRecordTypeMapping'] = _bool(rtidata[_tPartnerNS.defaultRecordTypeMapping])
data['name'] = str(rtidata[_tPartnerNS.name])
data['recordTypeId'] = str(rtidata[_tPartnerNS.recordTypeId])
return data
def _extractError(edata):
data = dict()
data['statusCode'] = str(edata[_tPartnerNS.statusCode])
data['message'] = str(edata[_tPartnerNS.message])
data['fields'] = [str(f) for f in edata[_tPartnerNS.fields:]]
return data
def _extractTab(tdata):
data = dict(
custom=_bool(tdata[_tPartnerNS.custom]),
label=str(tdata[_tPartnerNS.label]),
sObjectName=str(tdata[_tPartnerNS.sobjectName]),
url=str(tdata[_tPartnerNS.url]))
return data
def _extractUserInfo(res):
data = dict(
accessibilityMode=_bool(res[_tPartnerNS.accessibilityMode]),
currencySymbol=str(res[_tPartnerNS.currencySymbol]),
organizationId=str(res[_tPartnerNS.organizationId]),
organizationMultiCurrency=_bool(
res[_tPartnerNS.organizationMultiCurrency]),
organizationName=str(res[_tPartnerNS.organizationName]),
userDefaultCurrencyIsoCode=str(
res[_tPartnerNS.userDefaultCurrencyIsoCode]),
userEmail=str(res[_tPartnerNS.userEmail]),
userFullName=str(res[_tPartnerNS.userFullName]),
userId=str(res[_tPartnerNS.userId]),
userLanguage=str(res[_tPartnerNS.userLanguage]),
userLocale=str(res[_tPartnerNS.userLocale]),
userTimeZone=str(res[_tPartnerNS.userTimeZone]),
userUiSkin=str(res[_tPartnerNS.userUiSkin]))
return data
def isObject(xml):
try:
if xml(_tSchemaInstanceNS.type) == 'sf:sObject':
return True
else:
return False
except KeyError:
return False
def isQueryResult(xml):
try:
if xml(_tSchemaInstanceNS.type) == 'QueryResult':
return True
else:
return False
except KeyError:
return False
def isnil(xml):
try:
if xml(_tSchemaInstanceNS.nil) == 'true':
return True
else:
return False
except KeyError:
return False
def getRecordTypes(xml):
record_types = set()
if xml:
record_types.add(str(xml[_tSObjectNS.type]))
for field in xml:
if isObject(field):
record_types.update(getRecordTypes(field))
elif isQueryResult(field):
record_types.update(reduce(lambda x, y: x | y, [getRecordTypes(r) for r in field[_tPartnerNS.records:]]))
return record_types
|
from ft232.wrapper import FT232
import logging
import time
from ft232.dll_h import *
class UART(FT232):
def __init__(self, description, BaudRate, Parity, ByteSize, Stopbits):
FT232.__init__(self, description)
self.BaudRate = BaudRate
self.Parity = Parity
self.ByteSize = ByteSize
self.Stopbits = Stopbits
self.open()
self.config_to_uart()
self.uart_config()
def config_to_uart(self):
self.FT_ResetDevice()
self.check_status()
self.FT_SetBitMode(0, FT_BITMODE_RESET)
self.check_status()
self.FT_SetUSBParameters(65536, 65536)
self.check_status()
number_to_read = self.FT_GetQueueStatus()
self.check_status()
if number_to_read:
number_read = self.FT_Read(number_to_read)
logging.debug('FT_Read, %d, %d, %d' %
(number_read, self.status, self.inbytes))
self.FT_SetChars(0, 0, 0, 0)
self.check_status()
self.FT_SetTimeouts(100, 100)
self.check_status()
self.FT_SetLatencyTimer(1)
self.check_status()
self.FT_SetFlowControl(FT_FLOW_NONE, 0, 0)
self.check_status()
def uart_config(self):
self.FT_SetBaudRate(self.BaudRate)
if self.ByteSize not in [7, 8]:
logging.error('invalid data width')
return False
if self.Stopbits == 1:
ftstopbit = FT_STOP_BITS_1
elif self.Stopbits == 2:
ftstopbit = FT_STOP_BITS_2
else:
logging.error('invalid Stopbits')
return False
if self.Parity in ['n', 'N']:
ftparity = FT_PARITY_NONE
elif self.Parity in ['O', 'o']:
ftparity = FT_PARITY_ODD
elif self.Parity in ['e', 'E']:
ftparity = FT_PARITY_EVEN
else:
logging.error('invalid Parity')
return False
self.FT_SetDataCharacteristics(self.ByteSize, ftstopbit, ftparity)
self.check_status()
def uart_close(self):
self.close()
def flushinbuff(self):
number_to_read = self.FT_GetQueueStatus()
self.check_status()
if number_to_read:
number_read = self.FT_Read(number_to_read)
if number_to_read != number_read:
logging.warning('buffer free may fail %d in buff, but %d read' % (
number_to_read, number_read))
self.check_status()
logging.info('flush', str(self.inbytes))
def uart_read(self, num, mtimeout=100):
start = time.time()
while(time.time() - start < mtimeout / 1000):
num_in_queue = self.FT_GetQueueStatus()
# print(num_in_queue)
if num_in_queue >= num:
self.FT_Read(num)
self.check_status()
if num_in_queue:
self.FT_Read(num_in_queue)
self.check_status()
else:
logging.warning('no data in queue')
return self.inbytes
def uart_readall(self):
num_in_queue = self.FT_GetQueueStatus()
if num_in_queue:
self.FT_Read(num_in_queue)
self.check_status()
return self.inbytes
def uart_write(self, bdata):
num_wirtten = self.FT_Write(bdata)
if num_wirtten != len(bdata):
logging.warning('TX %d, %d wanted' % (num_wirtten, len(bdata)))
self.check_status()
|
import numpy as np
from sklearn.base import clone
from ._utils_boot import boot_manual, draw_weights
from ._utils import fit_predict, fit_predict_proba, tune_grid_search
def fit_irm(y, x, d,
learner_g, learner_m, all_smpls, dml_procedure, score,
n_rep=1, g0_params=None, g1_params=None, m_params=None,
trimming_threshold=1e-12):
n_obs = len(y)
thetas = np.zeros(n_rep)
ses = np.zeros(n_rep)
all_g_hat0 = list()
all_g_hat1 = list()
all_m_hat = list()
all_p_hat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
g_hat0, g_hat1, m_hat, p_hat = fit_nuisance_irm(y, x, d,
learner_g, learner_m, smpls,
score,
g0_params=g0_params, g1_params=g1_params, m_params=m_params,
trimming_threshold=trimming_threshold)
all_g_hat0.append(g_hat0)
all_g_hat1.append(g_hat1)
all_m_hat.append(m_hat)
all_p_hat.append(p_hat)
if dml_procedure == 'dml1':
thetas[i_rep], ses[i_rep] = irm_dml1(y, x, d,
g_hat0, g_hat1, m_hat, p_hat,
smpls, score)
else:
assert dml_procedure == 'dml2'
thetas[i_rep], ses[i_rep] = irm_dml2(y, x, d,
g_hat0, g_hat1, m_hat, p_hat,
smpls, score)
theta = np.median(thetas)
se = np.sqrt(np.median(np.power(ses, 2) * n_obs + np.power(thetas - theta, 2)) / n_obs)
res = {'theta': theta, 'se': se,
'thetas': thetas, 'ses': ses,
'all_g_hat0': all_g_hat0, 'all_g_hat1': all_g_hat1, 'all_m_hat': all_m_hat, 'all_p_hat': all_p_hat}
return res
def fit_nuisance_irm(y, x, d, learner_g, learner_m, smpls, score,
g0_params=None, g1_params=None, m_params=None,
trimming_threshold=1e-12):
ml_g0 = clone(learner_g)
ml_g1 = clone(learner_g)
train_cond0 = np.where(d == 0)[0]
g_hat0_list = fit_predict(y, x, ml_g0, g0_params, smpls,
train_cond=train_cond0)
if score == 'ATE':
train_cond1 = np.where(d == 1)[0]
g_hat1_list = fit_predict(y, x, ml_g1, g1_params, smpls,
train_cond=train_cond1)
else:
assert score == 'ATTE'
g_hat1_list = list()
for idx, _ in enumerate(smpls):
# fill it up, but its not further used
g_hat1_list.append(np.zeros_like(g_hat0_list[idx], dtype='float64'))
ml_m = clone(learner_m)
m_hat_list = fit_predict_proba(d, x, ml_m, m_params, smpls,
trimming_threshold=trimming_threshold)
p_hat_list = []
for (_, test_index) in smpls:
p_hat_list.append(np.mean(d[test_index]))
return g_hat0_list, g_hat1_list, m_hat_list, p_hat_list
def tune_nuisance_irm(y, x, d, ml_g, ml_m, smpls, score, n_folds_tune,
param_grid_g, param_grid_m):
train_cond0 = np.where(d == 0)[0]
g0_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond0)
if score == 'ATE':
train_cond1 = np.where(d == 1)[0]
g1_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond1)
g1_best_params = [xx.best_params_ for xx in g1_tune_res]
else:
g1_best_params = None
m_tune_res = tune_grid_search(d, x, ml_m, smpls, param_grid_m, n_folds_tune)
g0_best_params = [xx.best_params_ for xx in g0_tune_res]
m_best_params = [xx.best_params_ for xx in m_tune_res]
return g0_best_params, g1_best_params, m_best_params
def compute_iivm_residuals(y, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls):
u_hat0 = np.full_like(y, np.nan, dtype='float64')
u_hat1 = np.full_like(y, np.nan, dtype='float64')
g_hat0 = np.full_like(y, np.nan, dtype='float64')
g_hat1 = np.full_like(y, np.nan, dtype='float64')
m_hat = np.full_like(y, np.nan, dtype='float64')
p_hat = np.full_like(y, np.nan, dtype='float64')
for idx, (_, test_index) in enumerate(smpls):
u_hat0[test_index] = y[test_index] - g_hat0_list[idx]
u_hat1[test_index] = y[test_index] - g_hat1_list[idx]
g_hat0[test_index] = g_hat0_list[idx]
g_hat1[test_index] = g_hat1_list[idx]
m_hat[test_index] = m_hat_list[idx]
p_hat[test_index] = p_hat_list[idx]
return u_hat0, u_hat1, g_hat0, g_hat1, m_hat, p_hat
def irm_dml1(y, x, d, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls, score):
thetas = np.zeros(len(smpls))
n_obs = len(y)
u_hat0, u_hat1, g_hat0, g_hat1, m_hat, p_hat = compute_iivm_residuals(
y, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls)
for idx, (_, test_index) in enumerate(smpls):
thetas[idx] = irm_orth(g_hat0[test_index], g_hat1[test_index],
m_hat[test_index], p_hat[test_index],
u_hat0[test_index], u_hat1[test_index],
d[test_index], score)
theta_hat = np.mean(thetas)
if len(smpls) > 1:
se = np.sqrt(var_irm(theta_hat, g_hat0, g_hat1,
m_hat, p_hat,
u_hat0, u_hat1,
d, score, n_obs))
else:
assert len(smpls) == 1
test_index = smpls[0][1]
n_obs = len(test_index)
se = np.sqrt(var_irm(theta_hat, g_hat0[test_index], g_hat1[test_index],
m_hat[test_index], p_hat[test_index],
u_hat0[test_index], u_hat1[test_index],
d[test_index], score, n_obs))
return theta_hat, se
def irm_dml2(y, x, d, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls, score):
n_obs = len(y)
u_hat0, u_hat1, g_hat0, g_hat1, m_hat, p_hat = compute_iivm_residuals(
y, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls)
theta_hat = irm_orth(g_hat0, g_hat1, m_hat, p_hat,
u_hat0, u_hat1, d, score)
se = np.sqrt(var_irm(theta_hat, g_hat0, g_hat1,
m_hat, p_hat,
u_hat0, u_hat1,
d, score, n_obs))
return theta_hat, se
def var_irm(theta, g_hat0, g_hat1, m_hat, p_hat, u_hat0, u_hat1, d, score, n_obs):
if score == 'ATE':
var = 1/n_obs * np.mean(np.power(g_hat1 - g_hat0
+ np.divide(np.multiply(d, u_hat1), m_hat)
- np.divide(np.multiply(1.-d, u_hat0), 1.-m_hat) - theta, 2))
else:
assert score == 'ATTE'
var = 1/n_obs * np.mean(np.power(np.divide(np.multiply(d, u_hat0), p_hat)
- np.divide(np.multiply(m_hat, np.multiply(1.-d, u_hat0)),
np.multiply(p_hat, (1.-m_hat)))
- theta * np.divide(d, p_hat), 2)) \
/ np.power(np.mean(np.divide(d, p_hat)), 2)
return var
def irm_orth(g_hat0, g_hat1, m_hat, p_hat, u_hat0, u_hat1, d, score):
if score == 'ATE':
res = np.mean(g_hat1 - g_hat0
+ np.divide(np.multiply(d, u_hat1), m_hat)
- np.divide(np.multiply(1.-d, u_hat0), 1.-m_hat))
else:
assert score == 'ATTE'
res = np.mean(np.divide(np.multiply(d, u_hat0), p_hat)
- np.divide(np.multiply(m_hat, np.multiply(1.-d, u_hat0)),
np.multiply(p_hat, (1.-m_hat)))) \
/ np.mean(np.divide(d, p_hat))
return res
def boot_irm(y, d, thetas, ses, all_g_hat0, all_g_hat1, all_m_hat, all_p_hat,
all_smpls, score, bootstrap, n_rep_boot,
n_rep=1, apply_cross_fitting=True):
all_boot_theta = list()
all_boot_t_stat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
if apply_cross_fitting:
n_obs = len(y)
else:
test_index = smpls[0][1]
n_obs = len(test_index)
weights = draw_weights(bootstrap, n_rep_boot, n_obs)
boot_theta, boot_t_stat = boot_irm_single_split(
thetas[i_rep], y, d,
all_g_hat0[i_rep], all_g_hat1[i_rep], all_m_hat[i_rep], all_p_hat[i_rep], smpls,
score, ses[i_rep], weights, n_rep_boot, apply_cross_fitting)
all_boot_theta.append(boot_theta)
all_boot_t_stat.append(boot_t_stat)
boot_theta = np.hstack(all_boot_theta)
boot_t_stat = np.hstack(all_boot_t_stat)
return boot_theta, boot_t_stat
def boot_irm_single_split(theta, y, d, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list,
smpls, score, se, weights, n_rep_boot, apply_cross_fitting):
u_hat0, u_hat1, g_hat0, g_hat1, m_hat, p_hat = compute_iivm_residuals(
y, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls)
if apply_cross_fitting:
if score == 'ATE':
J = -1.0
else:
assert score == 'ATTE'
J = np.mean(-np.divide(d, p_hat))
else:
test_index = smpls[0][1]
if score == 'ATE':
J = -1.0
else:
assert score == 'ATTE'
J = np.mean(-np.divide(d[test_index], p_hat[test_index]))
if score == 'ATE':
psi = g_hat1 - g_hat0 \
+ np.divide(np.multiply(d, u_hat1), m_hat) \
- np.divide(np.multiply(1.-d, u_hat0), 1.-m_hat) - theta
else:
assert score == 'ATTE'
psi = np.divide(np.multiply(d, u_hat0), p_hat) \
- np.divide(np.multiply(m_hat, np.multiply(1.-d, u_hat0)),
np.multiply(p_hat, (1.-m_hat))) \
- theta * np.divide(d, p_hat)
boot_theta, boot_t_stat = boot_manual(psi, J, smpls, se, weights, n_rep_boot, apply_cross_fitting)
return boot_theta, boot_t_stat
|
#coding:utf-8
import paramiko
from time import sleep
print '-------------------------------------------------------------'
# 设置 host,username,password
while(True):
environment = raw_input('please entry environment. (eg: "98 or 99"):')
if environment != '' and environment != 'exit':
break
else:
exit()
if environment == '98':
host = '192.168.0.98'
password = 'FK~yGKk40'
elif environment == '99':
host = '192.168.0.99'
password = 'AnrdeqtoC'
elif environment == '96':
host = '192.168.0.96'
password == 'AnHvNDD9D'
elif environment == '46':
host = '192.168.0.46'
password = 'JGV1*Tld4'
#默认用户名root
username = 'root'
# 默认端口号22
port =22
project = raw_input('please entry project (eg uc or wealth or lend):')
if project =='exit':
exit()
tomcat_port = raw_input('please entry remote tomcate port:')
if tomcat_port == 'exit':
exit()
#设置tomcat 路径
tomcat_path = '/opt/tomcat-'+project+'-'+tomcat_port+'/'
print 'tomcat_path:'+tomcat_path
#本地war包的路径
while(True):
local_war_path = raw_input('please entry local war path:')
if local_war_path != '' and local_war_path != 'exit':
break
else:
exit()
local_war_path = local_war_path.replace('\\','/')
print 'local_war_path:'+local_war_path
#创建paramiko ssh 登录系统
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, username, password)
#关闭tomcat
shutdown_sh = 'sh '+tomcat_path+'/bin/shuntdown.sh'
print 'execute shell ----------->'+shutdown_sh
stdin, stdout, stderr = client.exec_command(shutdown_sh)
# 将 tomcat 进程停止
sleep(10)
print stdout.readlines()
print 'has ben shutdown'
#将tomcat 下的 webapp 下的原文件删除
rm_sh = 'rm -rf '+tomcat_path+'/webapps/*'
print 'execute shell ---------->'+rm_sh
stdin, stdout, stderr = client.exec_command(rm_sh)
print stdout.readlines()
#将本地war包上传至 远程服务器
t = paramiko.Transport((host,port))
t.connect(username=username,password=password)
sftp = paramiko.SFTPClient.from_transport(t)
remotePath = tomcat_path+'/webapps/puhui-'+project+'.war'
localPath = local_war_path+'/puhui-'+project+'-20140109.war'
s = sftp.put(localPath, remotePath)
print s;
#启动tomcat
startup_sh = 'sh '+tomcat_path+'/bin/startup.sh'
print 'execute shell ------------->'+startup_sh
stdin, stdout, stderr = client.exec_command(startup_sh)
print stdout.readlines()
print 'has been startup'
print '-------------------------------------------------------------'
|
import sys
cycle_length = {}
def get_cycle_length(n):
length = 1
numbers = {}
while n != 1:
if n in cycle_length:
length = length + cycle_length[n] - 1
break
numbers[n] = length
n = (n / 2) if (n % 2) == 0 else (3 * n + 1)
length += 1
for i in numbers:
cycle_length[i] = length + 1 - numbers[i]
return length
def process(line):
a, b = [int(n) for n in line.split()]
start, end = (a, b + 1) if a < b else (b, a + 1)
print(a, b, max([get_cycle_length(n) for n in range(start, end)]))
def main():
for line in sys.stdin:
process(line)
if __name__ == '__main__':
main()
|
# Voorbeeld
def increment(x):
return x + 1
def square(x):
# Vervang onderstaande lijn door de vertaling van de Java-code
raise NotImplementedError()
def are_ordered(x, y, z):
raise NotImplementedError()
# is_divisible_by
|
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
from collections import deque
l1 =[]
def LevelTraversal(root):
m = deque()
if root == None:
return l1
m.append(root)
try:
while True:
current = m[0]
l1.append(current.info)
if current.left!=None:
m.append(current.left)
if current.right!=None:
m.append(current.right)
m.popleft()
except:
pass
return l1
def VerticalTraversal(root, hd, m):
if root == None:
return
try:
m[hd].append(root.info)
except:
m[hd] = [root.info]
VerticalTraversal(root.left, hd-1, m)
VerticalTraversal(root.right, hd+1, m)
def VerticalTraversalList(root):
l2 = []
hd = 0
m = dict()
VerticalTraversal(root, hd, m)
for i in sorted(m):
l2.append(m[i])
return l2
def topView(root):
#Write your code here
l1 = LevelTraversal(root)
#print(l1)
l2 = VerticalTraversalList(root)
#print(l2)
for i in l2:
compare = []
for j in i:
#print(j, l1.index(j))
compare.append(l1.index(j))
min1 = min(compare)
print(l1[min1], end=' ')
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
topView(tree.root)
|
from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=15)
email = models.CharField(max_length=30)
dob = models.CharField(max_length=15)
password = models.CharField(max_length=30)
# auto add these timestamps
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# objects
# shirts = models.OneToMany
class Shirt(models.Model):
size = models.CharField(max_length=15)
design = models.CharField(max_length=15)
# auto add these timestamps
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
def sumofint(n):
sum = 0
while n != 0:
sum += (n%10)
n = int(n/10)
return sum
n = int(input())
for j in range(20):
if
|
__author__ = 'tonyxufaker'
|
import numpy as np
import matplotlib.pyplot as plt
u=np.linspace(-2,2,3)
v=np.linspace(-1,1,5)
X,Y=np.meshgrid(u,v)
z=X**2/25+Y**2/4
print(X)
print(Y)
print('z:\n',z)
plt.set_cmap('gray')
plt.pcolor(z)
plt.show()
y=np.array([[1,2,3], [4,5,6]])
print('z:\n',z)
plt.pcolor(y)
plt.show()
|
# -*- coding: utf-8 -*-
''' 测试可调用方法__call__()'''
class SalaryAccount:
'''工资计算类'''
def __call__(self, salary):
print("开始计算工资")
yearSalary = salary*12
daySalary = salary//22.5
hourSalary = daySalary//8
return dict(yearSalary = yearSalary, monthSalary = salary, daySalary = daySalary, hourSalary = hourSalary)
#调用工资计算类
s1 = SalaryAccount()
print(s1(14000))
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This temporary script file is located here:
/home/peterb/.spyder2/.temp.py
"""
import numpy as np
import serial
import alsaaudio
import websocket
import json
volFactor=0.1
m=alsaaudio.Mixer()
ws=websocket.create_connection('ws://192.168.13.30:80/mopidy/ws/',timeout=1)
#ser=serial.Serial('/dev/ttyUSB0',115200,timeout=1)
ser=serial.Serial('/dev/ttyUSB1',115200,timeout=1)
volArray=[]
volume=1
volV=0
#ws.send('{"jsonrpc": "2.0", "id": 1, "method": "core.describe"}')
class Mopidy():
def __init__(self,ws):
self.ws=ws
self.vol=0
def listFunction(self):
ws.send('{"jsonrpc": "2.0", "id": 1, "method": "core.describe"}')
res=ws.recv()
res=json.loads(res)
print(res.keys())
print(res['result'])
for k in res['result'].keys():
print(k,res['result'][k])
def setVolume(self,vol):
ws.send('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.set_volume","params": [%s]}'%vol)
#core.playback.set_volume {'params': [{'name': 'volume'}], 'description': None}
res=ws.recv()
res=json.loads(res)
print(res)
def getVolume(self):
ws.send('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.get_volume"}')
res=ws.recv()
res=json.loads(res)
try:
print(int(res['result']))
except:
print(res)
#m=Mopidy(ws)
#m.listFunction()
#m.getVolume()
while True:
line=ser.readline()
line=str(line,'utf-8')
#print(str(vol.strip()))
string=str(line.strip())
string=string.strip("'")
string=string.split('.')
print(string)
#m.setVolume(int(np.round(volume)))
#m.getVolume()
|
from .Action import Action
class Select(Action):
def __init__(self, objects, exclusive):
Action.__init__(self)
self.objects = objects
self.exclusive = exclusive
if exclusive:
self.previousSelections = list(base.selectionMgr.selectedObjects)
def do(self):
if self.exclusive:
base.selectionMgr.multiSelect(self.objects)
else:
for obj in self.objects:
base.selectionMgr.select(obj)
Action.do(self)
def undo(self):
if self.exclusive:
base.selectionMgr.multiSelect(self.previousSelections)
else:
for obj in self.objects:
base.selectionMgr.deselect(obj)
Action.undo(self)
def cleanup(self):
self.objects = None
self.exclusive = None
self.previousSelections = None
Action.cleanup(self)
def modifiesState(self):
return False
class Deselect(Action):
def __init__(self, objects = [], all = False):
Action.__init__(self)
self.objects = objects
self.all = all
if all:
self.previousSelections = list(base.selectionMgr.selectedObjects)
def do(self):
if self.all:
base.selectionMgr.deselectAll()
else:
for obj in self.objects:
base.selectionMgr.deselect(obj)
Action.do(self)
def undo(self):
if self.all:
base.selectionMgr.multiSelect(self.previousSelections)
else:
for obj in self.objects:
base.selectionMgr.select(obj)
Action.undo(self)
def cleanup(self):
self.objects = None
self.all = None
self.previousSelections = None
Action.cleanup(self)
def modifiesState(self):
return False
|
# coding: utf-8
# In[3]:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import csv
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import math
from datetime import datetime
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
# In[1]:
def write_3d(X, filename):
X_list = X.tolist()
with open(filename+'.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerows(X_list)
def transform(data, means_df, scales_df, order, freq = '15min'):
tss = { }
ws = { }
removed_mean = { }
removed_scale = { }
lnk_list = []
for lnk, data_link in data.groupby('link_ref', sort = False):
# Link Data Time Indexed
link_time_ix = pd.DatetimeIndex(data_link.index)
data_link = data_link.set_index(link_time_ix)
# Link Reference Data Index
ix_week = data_link['Weekday'].tolist()
ix_tod = data_link['TOD'].tolist()
## Create multi index for the two lists
mult_ind = pd.MultiIndex.from_arrays([ix_week, ix_tod])
link_travel_time_k = data_link['link_travel_time'].resample(freq).mean()
removed_mean[lnk] = pd.Series(data=means_df[lnk].loc[mult_ind].values,
index = link_time_ix).resample(freq).mean()
removed_scale[lnk] = pd.Series(data =scales_df[lnk].loc[mult_ind].values,
index = link_time_ix).resample(freq).mean()
tss[lnk] = (link_travel_time_k - removed_mean[lnk].values) / removed_scale[lnk].values
ws[lnk] = data_link['link_travel_time'].resample(freq).count()
lnk_list.append(lnk)
ts = pd.DataFrame(data = tss).fillna(method='pad').fillna(0)
df_removed_mean = pd.DataFrame(data = removed_mean, index = ts.index).fillna(method='pad').fillna(method='bfill')
df_removed_scale = pd.DataFrame(data = removed_scale, index = ts.index).fillna(method='pad').fillna(method='bfill')
w = pd.DataFrame(data = ws).fillna(0) # Link Travel Time Weights, e.g. number of measurements
return ts[order], df_removed_mean[order], df_removed_scale[order]
def fit_scale(data, order, ref_freq = '15min'):
means = { }
scales = { }
low = { }
upr = { }
grouping = data[data['link_travel_time'].notnull()].groupby('link_ref', sort = False)
for link_ref, data_link in grouping:
# Fit outlier bounds using MAD
median = data_link.groupby('Weekday')['link_travel_time'].median()
error = pd.concat([data_link['Weekday'], np.abs(data_link['link_travel_time'] - median[data_link['Weekday']].values)], axis = 1)
mad = 1.4826 * error.groupby('Weekday')['link_travel_time'].median()
_low = median - 3 * mad
_upr = median + 3 * mad
mask = (_low[data_link['Weekday']].values < data_link['link_travel_time']) & (data_link['link_travel_time'] < _upr[data_link['Weekday']].values)
data_link_no = data_link[mask]
_mean = data_link_no.groupby(['Weekday', 'TOD'])['link_travel_time'].mean()
means[link_ref] = _mean
scale = data_link_no.groupby(['Weekday', 'TOD'])['link_travel_time'].std()
scales[link_ref] = scale
low[link_ref] = _low
upr[link_ref] = _upr
means_df = pd.DataFrame(data=means).interpolate()
scales_df = pd.DataFrame(data=scales).interpolate()
low_df = pd.DataFrame(data=low).interpolate()
upr_df = pd.DataFrame(data=upr).interpolate()
## Correct order of links
means_df = means_df[order]
scales_df = scales_df[order]
low_df = low_df[order]
upr_df = upr_df[order]
# Fill NaNs
means_df = means_df.fillna(method='pad').fillna(method='bfill')
scales_df = scales_df.fillna(method='pad').fillna(method='bfill')
low_df = low_df.fillna(method='pad').fillna(method='bfill')
upr_df = upr_df.fillna(method='pad').fillna(method='bfill')
return means_df, scales_df
def roll(ix, ts, removed_mean, removed_scale, lags, preds):
X = np.stack([np.roll(ts, i, axis = 0) for i in range(lags, 0, -1)], axis = 1)[lags:-preds,]
Y = np.stack([np.roll(ts, -i, axis = 0) for i in range(0, preds, 1)], axis = 1)[lags:-preds,]
Y_ix = ix[lags:-preds]
Y_mean = np.stack([np.roll(removed_mean, -i, axis = 0) for i in range(0, preds, 1)], axis = 1)[lags:-preds,]
Y_scale = np.stack([np.roll(removed_scale, -i, axis = 0) for i in range(0, preds, 1)], axis = 1)[lags:-preds,]
return X, Y, Y_ix, Y_mean, Y_scale
def sort_links(data, start_link, end_link):
ordered_list = [start_link]
links = data['link_ref'].unique()
stop_end = start_link.rpartition(':')[2]
while True:
stop_start = stop_end
for lnk in links:
if(lnk.rpartition(':')[0] == stop_start):
if( (lnk in ordered_list) or (lnk == end_link) ):
break
else:
ordered_list.append(lnk)
stop_end = lnk.rpartition(':')[2]
if(stop_start == stop_end):
break
ordered_list.append(end_link)
## Only include links in ordered list.
data = data[data['link_ref'].isin(ordered_list)]
return data, ordered_list
def tod_interval(x):
if(x < 2):
return 0
elif(x < 4):
return 1
elif(x < 6):
return 2
elif(x < 8):
return 3
elif(x < 10):
return 4
elif(x < 12):
return 5
elif(x < 14):
return 6
elif(x < 16):
return 7
elif(x < 18):
return 8
elif(x < 20):
return 9
elif(x < 22):
return 10
elif(x < 24):
return 11
def split_df(data, start_train, end_train, end_test):
data_train = data[start_train:end_train]
data_test = data[end_train:end_test]
return data_train, data_test
# In[4]:
def load_data(lags, start_train, end_train, end_test):
preds = 1
data = pd.read_csv('data/link_travel_time_local.csv.gz', compression='gzip', parse_dates = True, index_col = 0)
## Sort links by order
data, order = sort_links(data, '1973:1412', '7057:7058')
## Make a link order column e.g here the neighbouring links for link 1 are 0 and 2.
data['link_order'] = data['link_ref'].astype('category')
not_in_list = data['link_order'].cat.categories.difference(order)
data['link_order'] = data['link_order'].cat.set_categories(np.hstack((order, not_in_list)), ordered=True)
data['link_order'] = data['link_order'].cat.codes
## Add week of day column [Monday, ..., Sunday] = [0, ..., 6]
data['Weekday'] = data.index.weekday
## Add hour of the time to dataframe
data['Hour'] = data.index.hour
## Add time of day variables to data frame
data['TOD'] = data.Hour.apply(tod_interval)
data = data.sort_values('link_order')
data_train, data_test = split_df(data, start_train = start_train, end_train = end_train, end_test = end_test)
## Transform train and test set using the mean and std for train set.
means_df_train, scales_df_train = fit_scale(data_train, order)
ts_train_df, mean_train_df, scale_train_df = transform(data_train,
means_df_train,
scales_df_train,
order,
freq = '15min')
ts_test_df, mean_test_df, scale_test_df = transform(data_test,
means_df_train,
scales_df_train,
order,
freq = '15min')
return ts_train_df, mean_train_df, scale_train_df, ts_test_df, mean_test_df, scale_test_df
# In[5]:
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, RepeatVector, TimeDistributed, ConvLSTM2D, Activation, BatchNormalization, Flatten, Reshape
sum_all = tf.math.reduce_sum
def tilted_loss(q,y,f):
e = (y[:,:,:,0,0]-f[:,:,:,0,0])
# The term inside k.mean is a one line simplification of the first equation
return K.mean(K.maximum(q*e, (q-1)*e))
def mse_loss(y, f):
return K.mean(K.square(y[:,:,:,0,0]-f[:,:,:,0,0]), axis = -1)
## Tilted loss for both mean and quantiles
def joint_tilted_loss(quantiles, y, f):
loss = K.mean(K.square(y[:,:,:,0,0]-f[:,:,:,0,0]), axis = -1)
for i in range(len(quantiles)):
q = quantiles[i]
e = (y[:,:,:,0,i+1]-f[:,:,:,0,i+1])
loss += K.mean(K.maximum(q*e, (q-1)*e))
return loss
## Encoder-decoder convolutional LSTM for jointly estimating quantiles and mean predictions.
def joint_convLstm(num_filters, kernel_length, input_timesteps, num_links, output_timesteps, quantiles, loss):
model = Sequential()
model.add(BatchNormalization(name = 'batch_norm_0', input_shape = (input_timesteps, num_links, 1, 1)))
model.add(ConvLSTM2D(name ='conv_lstm_0',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = False))
model.add(Dropout(0.20, name = 'dropout_0'))
model.add(BatchNormalization(name = 'batch_norm_1'))
model.add(Flatten())
model.add(RepeatVector(output_timesteps))
model.add(Reshape((output_timesteps, num_links, 1, num_filters)))
model.add(ConvLSTM2D(name ='conv_lstm_1',filters = num_filters, kernel_size = (kernel_length, 1), padding='same',return_sequences = True))
model.add(Dropout(0.20, name = 'dropout_1'))
model.add(TimeDistributed(Dense(units = len(quantiles) + 1, name = 'dense_1')))
model.compile(loss = loss, optimizer = 'nadam')
return model
## Encoder-decoder LSTM for mean
def convLstm(num_filters, kernel_length, input_timesteps, num_links, output_timesteps, loss):
model = Sequential()
model.add(BatchNormalization(name = 'batch_norm_0', input_shape = (input_timesteps, num_links, 1, 1)))
model.add(ConvLSTM2D(name ='conv_lstm_0',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = False))
model.add(Dropout(0.20, name = 'dropout_0'))
model.add(BatchNormalization(name = 'batch_norm_1'))
model.add(Flatten())
model.add(RepeatVector(output_timesteps))
model.add(Reshape((output_timesteps, num_links, 1, num_filters)))
model.add(ConvLSTM2D(name ='conv_lstm_1',
filters = num_filters, kernel_size = (kernel_length, 1),
padding='same',
return_sequences = True))
model.add(Dropout(0.20, name = 'dropout_1'))
model.add(TimeDistributed(Dense(units = 1, name = 'dense_1')))
model.compile(loss = loss, optimizer = 'nadam')
return model
# In[ ]:
lags = 10
preds = 1
start_train_lst = ['2019-01-01', '2019-01-07', '2019-01-14', '2019-01-21', '2019-02-01']
end_train_lst = ['2019-01-31', '2019-02-07', '2019-02-14', '2019-02-21', '2019-03-01']
end_test_lst = ['2019-02-07', '2019-02-14', '2019-02-21', '2019-03-01', '2019-03-07']
num_partitions = 3
num_links = 16
batch_size = 80
quantiles = np.array([0.05, 0.95])
units_lst = np.arange(6, 72, 12)
kernel_lengths = np.arange(3, 14, 2)
epochs = 200
patience = 5
mse_i = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
icp_i = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
mil_i = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
mse_j = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
icp_j = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
mil_j = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
time_mean = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
time_quan = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
time_joint = np.empty((num_partitions, len(units_lst), len(kernel_lengths)))
for u, units in enumerate(units_lst):
print("Units {}".format(units))
for k, kern in enumerate(kernel_lengths):
print("Kernel length {}".format(kern))
## Initialise models
## Model for mean predictions
mod_mean = convLstm(units,kern,lags, num_links,1, loss = lambda y, f: mse_loss(y,f))
## Model for quantiles
mod_quan = []
for q, quan in enumerate(quantiles):
mod_quan.append(convLstm(units,kern,lags, num_links, 1, loss = lambda y, f: tilted_loss(quan, y, f)))
## Joint model
mod_joint = joint_convLstm(units, kern,lags, num_links, 1, quantiles, loss = lambda y, f: joint_tilted_loss(quantiles, y, f))
for part in range(num_partitions):
start_train = start_train_lst[part]
end_train = end_train_lst[part]
end_test = end_test_lst[part]
ts_train_df, mean_train_df, scale_train_df, ts_test_df, mean_test_df, scale_test_df = load_data(lags, start_train, end_train, end_test)
X_train, y_train, y_ix_train, y_mean_train, y_std_train = roll(ts_train_df.index,
ts_train_df.values,
mean_train_df.values,
scale_train_df.values,
lags,
preds)
X_test, y_test, y_ix_test, y_mean_test, y_std_test = roll(ts_test_df.index,
ts_test_df.values,
mean_test_df.values,
scale_test_df.values,
lags,
preds)
X_train = X_train[:,:,:,np.newaxis,np.newaxis]
y_train = y_train[:,:,:,np.newaxis,np.newaxis]
X_test = X_test[:,:,:,np.newaxis,np.newaxis]
y_test = y_test[:,:,:,np.newaxis,np.newaxis]
y_traink = np.zeros((y_train.shape[0], y_train.shape[1], y_train.shape[2], 1, len(quantiles)+1))
y_testk = np.zeros((y_test.shape[0], y_train.shape[1], y_train.shape[2], 1, len(quantiles)+1))
for i in range(len(quantiles)):
y_traink[:,:,:,:,i+1] = y_train[:,:,:,:,0]
y_testk[:,:,:,:,i+1] = y_test[:,:,:,:,0]
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=8)
check_mean = ModelCheckpoint('models/mean_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part), monitor='val_loss', mode='min', save_best_only=True)
check_q = []
for q, quan in enumerate(quantiles):
check_q.append(ModelCheckpoint('models/q{}_weights_a{}_k{}_p{}.hdf5'.format(q, units, kern, part), monitor='val_loss', mode='min', save_best_only=True))
check_joint = ModelCheckpoint('models/joint_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part), monitor='val_loss', mode='min', save_best_only=True)
## If not the first partition initialise weights from last partition
if part != 0:
mod_mean.load_weights('models/mean_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part-1))
for q, mod in enumerate(mod_quan):
mod.load_weights('models/q{}_weights_a{}_k{}_p{}.hdf5'.format(q, units, kern, part-1))
mod_joint.load_weights('models/joint_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part-1))
################### INDEPENDENT ############################
t1 = datetime.now()
mod_mean.fit(X_train, y_train, epochs = epochs,validation_data = (X_test, y_test),batch_size = batch_size, callbacks = [es, check_mean],verbose=0)
mod_mean.load_weights('models/mean_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part))
t2 = datetime.now()
time_mean[part, u, k] = (t2-t1).seconds
y_pred = mod_mean.predict(X_test)
y_pred_q = []
for q, mod in enumerate(mod_quan):
t1 = datetime.now()
mod.fit(X_train, y_train, epochs = epochs,validation_data = (X_test, y_test),batch_size=batch_size,callbacks=[es, check_q[q]],verbose = 0)
mod.load_weights('models/q{}_weights_a{}_k{}_p{}.hdf5'.format(q, units, kern, part))
y_pred_q.append(mod.predict(X_test))
t2 = datetime.now()
time_quan[part, u, k] = (t2-t1).seconds
Y_pred_lwr = (y_pred_q[0][:,:,:,0,0] * y_std_test) + y_mean_test
Y_pred_upr = (y_pred_q[1][:,:,:,0,0] * y_std_test) + y_mean_test
Y_pred_mean = (y_pred[:,:,:,0,0] * y_std_test) + y_mean_test
Y_true = y_test[:,:,:,0,0]* y_std_test + y_mean_test
Y_true_total = np.sum(Y_true, axis = 2)
Y_pred_mean_total = np.sum(Y_pred_mean, axis = 2)
icp_lnks = np.zeros(num_links)
mil_lnks = np.zeros(num_links)
for lnk in range(num_links):
q1 = Y_pred_lwr[:,:,lnk]
q2 = Y_pred_upr[:,:,lnk]
icp_lnks[lnk] = 1 - (np.sum(y_test[:,:,lnk] < q1) + np.sum(y_test[:,:,lnk] > q2) )/len(y_test)
mil_lnks[lnk] = np.sum(np.maximum(0, q2 - q1)) / len(y_test)
icp_i[part, u, k] = np.mean(icp_lnks)
mil_i[part, u, k] = np.mean(mil_lnks)
mse_i[part, u, k] = np.sum((Y_pred_mean_total - Y_true_total)**2)/len(Y_true_total)
#################### JDQR MODEL #####################
t1 = datetime.now()
mod_joint.fit(X_train,y_traink, epochs = epochs,validation_data = (X_test, y_testk),batch_size = batch_size,callbacks=[es, check_joint],verbose=0)
mod_joint.load_weights('models/joint_weights_a{}_k{}_p{}.hdf5'.format(units, kern, part))
t2 = datetime.now()
time_joint[part, u, k] = (t2-t1).seconds
y_pred = mod_joint.predict(X_test)
Y_true = y_test[:,:,:,0,0]* y_std_test + y_mean_test
Y_pred_mean = (y_pred[:,:,:,0,0] * y_std_test) + y_mean_test
Y_pred_lwr = (y_pred[:,:,:,0,1] * y_std_test) + y_mean_test
Y_pred_upr = (y_pred[:,:,:,0,2] * y_std_test) + y_mean_test
Y_true_total = np.sum(Y_true , axis = 2)
Y_pred_mean_total = np.sum(Y_pred_mean, axis = 2)
icp_lnks = np.zeros(num_links)
mil_lnks = np.zeros(num_links)
for lnk in range(num_links):
q1 = Y_pred_lwr[:,:,lnk]
q2 = Y_pred_upr[:,:,lnk]
icp_lnks[lnk] = 1 - (np.sum(y_test[:,:,lnk] < q1) + np.sum(y_test[:,:,lnk] > q2) )/len(y_test)
mil_lnks[lnk] = np.sum(np.maximum(0, q2 - q1)) / len(y_test)
icp_j[part, u, k] = np.mean(icp_lnks)
mil_j[part, u, k] = np.mean(mil_lnks)
mse_j[part, u, k] = np.sum((Y_pred_mean_total - Y_true_total)**2)/len(Y_true_total)
write_3d(time_mean, "time_mean")
write_3d(time_quan, "time_quan")
write_3d(time_joint, "time_joint")
write_3d(mse_j, "mse_j")
write_3d(icp_j, "icp_j")
write_3d(mil_j, "mil_j")
write_3d(mse_i, "mse_i")
write_3d(icp_i, "icp_i")
write_3d(mil_i, "mil_i")
|
import pytest
from bromine.utils.wait import Wait
from .. import Mock
@pytest.mark.parametrize('bool_seq,iterations', (
([False, True, False, True], 2),
([False, False, True], 3),
([True, True], 1)
))
def test_wait_until_returns_as_soon_as_condition_is_true(bool_seq, iterations):
condition = Mock(side_effect=bool_seq)
Wait(10, poll_frequency=0.01).until(condition)
assert condition.call_count == iterations
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.